Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(244)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 1405363003: Move Hydrogen and Lithium to src/crankshaft/ (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: rebased Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_IA32
6
7 #include "src/base/bits.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/codegen.h"
11 #include "src/deoptimizer.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ia32/frames-ia32.h"
14 #include "src/ia32/lithium-codegen-ia32.h"
15 #include "src/ic/ic.h"
16 #include "src/ic/stub-cache.h"
17 #include "src/profiler/cpu-profiler.h"
18
19 namespace v8 {
20 namespace internal {
21
22 // When invoking builtins, we need to record the safepoint in the middle of
23 // the invoke instruction sequence generated by the macro assembler.
24 class SafepointGenerator final : public CallWrapper {
25 public:
26 SafepointGenerator(LCodeGen* codegen,
27 LPointerMap* pointers,
28 Safepoint::DeoptMode mode)
29 : codegen_(codegen),
30 pointers_(pointers),
31 deopt_mode_(mode) {}
32 virtual ~SafepointGenerator() {}
33
34 void BeforeCall(int call_size) const override {}
35
36 void AfterCall() const override {
37 codegen_->RecordSafepoint(pointers_, deopt_mode_);
38 }
39
40 private:
41 LCodeGen* codegen_;
42 LPointerMap* pointers_;
43 Safepoint::DeoptMode deopt_mode_;
44 };
45
46
47 #define __ masm()->
48
49 bool LCodeGen::GenerateCode() {
50 LPhase phase("Z_Code generation", chunk());
51 DCHECK(is_unused());
52 status_ = GENERATING;
53
54 // Open a frame scope to indicate that there is a frame on the stack. The
55 // MANUAL indicates that the scope shouldn't actually generate code to set up
56 // the frame (that is done in GeneratePrologue).
57 FrameScope frame_scope(masm_, StackFrame::MANUAL);
58
59 support_aligned_spilled_doubles_ = info()->IsOptimizing();
60
61 dynamic_frame_alignment_ = info()->IsOptimizing() &&
62 ((chunk()->num_double_slots() > 2 &&
63 !chunk()->graph()->is_recursive()) ||
64 !info()->osr_ast_id().IsNone());
65
66 return GeneratePrologue() &&
67 GenerateBody() &&
68 GenerateDeferredCode() &&
69 GenerateJumpTable() &&
70 GenerateSafepointTable();
71 }
72
73
74 void LCodeGen::FinishCode(Handle<Code> code) {
75 DCHECK(is_done());
76 code->set_stack_slots(GetStackSlotCount());
77 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
78 PopulateDeoptimizationData(code);
79 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
80 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
81 }
82 }
83
84
85 #ifdef _MSC_VER
86 void LCodeGen::MakeSureStackPagesMapped(int offset) {
87 const int kPageSize = 4 * KB;
88 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
89 __ mov(Operand(esp, offset), eax);
90 }
91 }
92 #endif
93
94
95 void LCodeGen::SaveCallerDoubles() {
96 DCHECK(info()->saves_caller_doubles());
97 DCHECK(NeedsEagerFrame());
98 Comment(";;; Save clobbered callee double registers");
99 int count = 0;
100 BitVector* doubles = chunk()->allocated_double_registers();
101 BitVector::Iterator save_iterator(doubles);
102 while (!save_iterator.Done()) {
103 __ movsd(MemOperand(esp, count * kDoubleSize),
104 XMMRegister::from_code(save_iterator.Current()));
105 save_iterator.Advance();
106 count++;
107 }
108 }
109
110
111 void LCodeGen::RestoreCallerDoubles() {
112 DCHECK(info()->saves_caller_doubles());
113 DCHECK(NeedsEagerFrame());
114 Comment(";;; Restore clobbered callee double registers");
115 BitVector* doubles = chunk()->allocated_double_registers();
116 BitVector::Iterator save_iterator(doubles);
117 int count = 0;
118 while (!save_iterator.Done()) {
119 __ movsd(XMMRegister::from_code(save_iterator.Current()),
120 MemOperand(esp, count * kDoubleSize));
121 save_iterator.Advance();
122 count++;
123 }
124 }
125
126
127 bool LCodeGen::GeneratePrologue() {
128 DCHECK(is_generating());
129
130 if (info()->IsOptimizing()) {
131 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
132
133 #ifdef DEBUG
134 if (strlen(FLAG_stop_at) > 0 &&
135 info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
136 __ int3();
137 }
138 #endif
139
140 // Sloppy mode functions and builtins need to replace the receiver with the
141 // global proxy when called as functions (without an explicit receiver
142 // object).
143 if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
144 Label ok;
145 // +1 for return address.
146 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
147 __ mov(ecx, Operand(esp, receiver_offset));
148
149 __ cmp(ecx, isolate()->factory()->undefined_value());
150 __ j(not_equal, &ok, Label::kNear);
151
152 __ mov(ecx, GlobalObjectOperand());
153 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
154
155 __ mov(Operand(esp, receiver_offset), ecx);
156
157 __ bind(&ok);
158 }
159
160 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
161 // Move state of dynamic frame alignment into edx.
162 __ Move(edx, Immediate(kNoAlignmentPadding));
163
164 Label do_not_pad, align_loop;
165 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
166 // Align esp + 4 to a multiple of 2 * kPointerSize.
167 __ test(esp, Immediate(kPointerSize));
168 __ j(not_zero, &do_not_pad, Label::kNear);
169 __ push(Immediate(0));
170 __ mov(ebx, esp);
171 __ mov(edx, Immediate(kAlignmentPaddingPushed));
172 // Copy arguments, receiver, and return address.
173 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
174
175 __ bind(&align_loop);
176 __ mov(eax, Operand(ebx, 1 * kPointerSize));
177 __ mov(Operand(ebx, 0), eax);
178 __ add(Operand(ebx), Immediate(kPointerSize));
179 __ dec(ecx);
180 __ j(not_zero, &align_loop, Label::kNear);
181 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
182 __ bind(&do_not_pad);
183 }
184 }
185
186 info()->set_prologue_offset(masm_->pc_offset());
187 if (NeedsEagerFrame()) {
188 DCHECK(!frame_is_built_);
189 frame_is_built_ = true;
190 if (info()->IsStub()) {
191 __ StubPrologue();
192 } else {
193 __ Prologue(info()->IsCodePreAgingActive());
194 }
195 }
196
197 if (info()->IsOptimizing() &&
198 dynamic_frame_alignment_ &&
199 FLAG_debug_code) {
200 __ test(esp, Immediate(kPointerSize));
201 __ Assert(zero, kFrameIsExpectedToBeAligned);
202 }
203
204 // Reserve space for the stack slots needed by the code.
205 int slots = GetStackSlotCount();
206 DCHECK(slots != 0 || !info()->IsOptimizing());
207 if (slots > 0) {
208 if (slots == 1) {
209 if (dynamic_frame_alignment_) {
210 __ push(edx);
211 } else {
212 __ push(Immediate(kNoAlignmentPadding));
213 }
214 } else {
215 if (FLAG_debug_code) {
216 __ sub(Operand(esp), Immediate(slots * kPointerSize));
217 #ifdef _MSC_VER
218 MakeSureStackPagesMapped(slots * kPointerSize);
219 #endif
220 __ push(eax);
221 __ mov(Operand(eax), Immediate(slots));
222 Label loop;
223 __ bind(&loop);
224 __ mov(MemOperand(esp, eax, times_4, 0),
225 Immediate(kSlotsZapValue));
226 __ dec(eax);
227 __ j(not_zero, &loop);
228 __ pop(eax);
229 } else {
230 __ sub(Operand(esp), Immediate(slots * kPointerSize));
231 #ifdef _MSC_VER
232 MakeSureStackPagesMapped(slots * kPointerSize);
233 #endif
234 }
235
236 if (support_aligned_spilled_doubles_) {
237 Comment(";;; Store dynamic frame alignment tag for spilled doubles");
238 // Store dynamic frame alignment state in the first local.
239 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
240 if (dynamic_frame_alignment_) {
241 __ mov(Operand(ebp, offset), edx);
242 } else {
243 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
244 }
245 }
246 }
247
248 if (info()->saves_caller_doubles()) SaveCallerDoubles();
249 }
250 return !is_aborted();
251 }
252
253
254 void LCodeGen::DoPrologue(LPrologue* instr) {
255 Comment(";;; Prologue begin");
256
257 // Possibly allocate a local context.
258 if (info_->num_heap_slots() > 0) {
259 Comment(";;; Allocate local context");
260 bool need_write_barrier = true;
261 // Argument to NewContext is the function, which is still in edi.
262 int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
263 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
264 if (info()->scope()->is_script_scope()) {
265 __ push(edi);
266 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
267 __ CallRuntime(Runtime::kNewScriptContext, 2);
268 deopt_mode = Safepoint::kLazyDeopt;
269 } else if (slots <= FastNewContextStub::kMaximumSlots) {
270 FastNewContextStub stub(isolate(), slots);
271 __ CallStub(&stub);
272 // Result of FastNewContextStub is always in new space.
273 need_write_barrier = false;
274 } else {
275 __ push(edi);
276 __ CallRuntime(Runtime::kNewFunctionContext, 1);
277 }
278 RecordSafepoint(deopt_mode);
279
280 // Context is returned in eax. It replaces the context passed to us.
281 // It's saved in the stack and kept live in esi.
282 __ mov(esi, eax);
283 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
284
285 // Copy parameters into context if necessary.
286 int num_parameters = scope()->num_parameters();
287 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
288 for (int i = first_parameter; i < num_parameters; i++) {
289 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
290 if (var->IsContextSlot()) {
291 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
292 (num_parameters - 1 - i) * kPointerSize;
293 // Load parameter from stack.
294 __ mov(eax, Operand(ebp, parameter_offset));
295 // Store it in the context.
296 int context_offset = Context::SlotOffset(var->index());
297 __ mov(Operand(esi, context_offset), eax);
298 // Update the write barrier. This clobbers eax and ebx.
299 if (need_write_barrier) {
300 __ RecordWriteContextSlot(esi,
301 context_offset,
302 eax,
303 ebx,
304 kDontSaveFPRegs);
305 } else if (FLAG_debug_code) {
306 Label done;
307 __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
308 __ Abort(kExpectedNewSpaceObject);
309 __ bind(&done);
310 }
311 }
312 }
313 Comment(";;; End allocate local context");
314 }
315
316 Comment(";;; Prologue end");
317 }
318
319
320 void LCodeGen::GenerateOsrPrologue() {
321 // Generate the OSR entry prologue at the first unknown OSR value, or if there
322 // are none, at the OSR entrypoint instruction.
323 if (osr_pc_offset_ >= 0) return;
324
325 osr_pc_offset_ = masm()->pc_offset();
326
327 // Move state of dynamic frame alignment into edx.
328 __ Move(edx, Immediate(kNoAlignmentPadding));
329
330 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
331 Label do_not_pad, align_loop;
332 // Align ebp + 4 to a multiple of 2 * kPointerSize.
333 __ test(ebp, Immediate(kPointerSize));
334 __ j(zero, &do_not_pad, Label::kNear);
335 __ push(Immediate(0));
336 __ mov(ebx, esp);
337 __ mov(edx, Immediate(kAlignmentPaddingPushed));
338
339 // Move all parts of the frame over one word. The frame consists of:
340 // unoptimized frame slots, alignment state, context, frame pointer, return
341 // address, receiver, and the arguments.
342 __ mov(ecx, Immediate(scope()->num_parameters() +
343 5 + graph()->osr()->UnoptimizedFrameSlots()));
344
345 __ bind(&align_loop);
346 __ mov(eax, Operand(ebx, 1 * kPointerSize));
347 __ mov(Operand(ebx, 0), eax);
348 __ add(Operand(ebx), Immediate(kPointerSize));
349 __ dec(ecx);
350 __ j(not_zero, &align_loop, Label::kNear);
351 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
352 __ sub(Operand(ebp), Immediate(kPointerSize));
353 __ bind(&do_not_pad);
354 }
355
356 // Save the first local, which is overwritten by the alignment state.
357 Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
358 __ push(alignment_loc);
359
360 // Set the dynamic frame alignment state.
361 __ mov(alignment_loc, edx);
362
363 // Adjust the frame size, subsuming the unoptimized frame into the
364 // optimized frame.
365 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
366 DCHECK(slots >= 1);
367 __ sub(esp, Immediate((slots - 1) * kPointerSize));
368 }
369
370
371 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
372 if (instr->IsCall()) {
373 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
374 }
375 if (!instr->IsLazyBailout() && !instr->IsGap()) {
376 safepoints_.BumpLastLazySafepointIndex();
377 }
378 }
379
380
381 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
382
383
384 bool LCodeGen::GenerateJumpTable() {
385 if (!jump_table_.length()) return !is_aborted();
386
387 Label needs_frame;
388 Comment(";;; -------------------- Jump table --------------------");
389
390 for (int i = 0; i < jump_table_.length(); i++) {
391 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
392 __ bind(&table_entry->label);
393 Address entry = table_entry->address;
394 DeoptComment(table_entry->deopt_info);
395 if (table_entry->needs_frame) {
396 DCHECK(!info()->saves_caller_doubles());
397 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
398 __ call(&needs_frame);
399 } else {
400 if (info()->saves_caller_doubles()) RestoreCallerDoubles();
401 __ call(entry, RelocInfo::RUNTIME_ENTRY);
402 }
403 info()->LogDeoptCallPosition(masm()->pc_offset(),
404 table_entry->deopt_info.inlining_id);
405 }
406 if (needs_frame.is_linked()) {
407 __ bind(&needs_frame);
408 /* stack layout
409 4: entry address
410 3: return address <-- esp
411 2: garbage
412 1: garbage
413 0: garbage
414 */
415 __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker.
416 __ push(MemOperand(esp, kPointerSize)); // Copy return address.
417 __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address.
418
419 /* stack layout
420 4: entry address
421 3: return address
422 2: garbage
423 1: return address
424 0: entry address <-- esp
425 */
426 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp.
427 // Copy context.
428 __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
429 __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
430 // Fill ebp with the right stack frame address.
431 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
432 // This variant of deopt can only be used with stubs. Since we don't
433 // have a function pointer to install in the stack frame that we're
434 // building, install a special marker there instead.
435 DCHECK(info()->IsStub());
436 __ mov(MemOperand(esp, 2 * kPointerSize),
437 Immediate(Smi::FromInt(StackFrame::STUB)));
438
439 /* stack layout
440 4: old ebp
441 3: context pointer
442 2: stub marker
443 1: return address
444 0: entry address <-- esp
445 */
446 __ ret(0); // Call the continuation without clobbering registers.
447 }
448 return !is_aborted();
449 }
450
451
452 bool LCodeGen::GenerateDeferredCode() {
453 DCHECK(is_generating());
454 if (deferred_.length() > 0) {
455 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
456 LDeferredCode* code = deferred_[i];
457
458 HValue* value =
459 instructions_->at(code->instruction_index())->hydrogen_value();
460 RecordAndWritePosition(
461 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
462
463 Comment(";;; <@%d,#%d> "
464 "-------------------- Deferred %s --------------------",
465 code->instruction_index(),
466 code->instr()->hydrogen_value()->id(),
467 code->instr()->Mnemonic());
468 __ bind(code->entry());
469 if (NeedsDeferredFrame()) {
470 Comment(";;; Build frame");
471 DCHECK(!frame_is_built_);
472 DCHECK(info()->IsStub());
473 frame_is_built_ = true;
474 // Build the frame in such a way that esi isn't trashed.
475 __ push(ebp); // Caller's frame pointer.
476 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
477 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
478 __ lea(ebp, Operand(esp, 2 * kPointerSize));
479 Comment(";;; Deferred code");
480 }
481 code->Generate();
482 if (NeedsDeferredFrame()) {
483 __ bind(code->done());
484 Comment(";;; Destroy frame");
485 DCHECK(frame_is_built_);
486 frame_is_built_ = false;
487 __ mov(esp, ebp);
488 __ pop(ebp);
489 }
490 __ jmp(code->exit());
491 }
492 }
493
494 // Deferred code is the last part of the instruction sequence. Mark
495 // the generated code as done unless we bailed out.
496 if (!is_aborted()) status_ = DONE;
497 return !is_aborted();
498 }
499
500
501 bool LCodeGen::GenerateSafepointTable() {
502 DCHECK(is_done());
503 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
504 // For lazy deoptimization we need space to patch a call after every call.
505 // Ensure there is always space for such patching, even if the code ends
506 // in a call.
507 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
508 while (masm()->pc_offset() < target_offset) {
509 masm()->nop();
510 }
511 }
512 safepoints_.Emit(masm(), GetStackSlotCount());
513 return !is_aborted();
514 }
515
516
517 Register LCodeGen::ToRegister(int code) const {
518 return Register::from_code(code);
519 }
520
521
522 XMMRegister LCodeGen::ToDoubleRegister(int code) const {
523 return XMMRegister::from_code(code);
524 }
525
526
527 Register LCodeGen::ToRegister(LOperand* op) const {
528 DCHECK(op->IsRegister());
529 return ToRegister(op->index());
530 }
531
532
533 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
534 DCHECK(op->IsDoubleRegister());
535 return ToDoubleRegister(op->index());
536 }
537
538
539 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
540 return ToRepresentation(op, Representation::Integer32());
541 }
542
543
544 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
545 const Representation& r) const {
546 HConstant* constant = chunk_->LookupConstant(op);
547 if (r.IsExternal()) {
548 return reinterpret_cast<int32_t>(
549 constant->ExternalReferenceValue().address());
550 }
551 int32_t value = constant->Integer32Value();
552 if (r.IsInteger32()) return value;
553 DCHECK(r.IsSmiOrTagged());
554 return reinterpret_cast<int32_t>(Smi::FromInt(value));
555 }
556
557
558 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
559 HConstant* constant = chunk_->LookupConstant(op);
560 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
561 return constant->handle(isolate());
562 }
563
564
565 double LCodeGen::ToDouble(LConstantOperand* op) const {
566 HConstant* constant = chunk_->LookupConstant(op);
567 DCHECK(constant->HasDoubleValue());
568 return constant->DoubleValue();
569 }
570
571
572 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
573 HConstant* constant = chunk_->LookupConstant(op);
574 DCHECK(constant->HasExternalReferenceValue());
575 return constant->ExternalReferenceValue();
576 }
577
578
579 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
580 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
581 }
582
583
584 bool LCodeGen::IsSmi(LConstantOperand* op) const {
585 return chunk_->LookupLiteralRepresentation(op).IsSmi();
586 }
587
588
589 static int ArgumentsOffsetWithoutFrame(int index) {
590 DCHECK(index < 0);
591 return -(index + 1) * kPointerSize + kPCOnStackSize;
592 }
593
594
595 Operand LCodeGen::ToOperand(LOperand* op) const {
596 if (op->IsRegister()) return Operand(ToRegister(op));
597 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
598 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
599 if (NeedsEagerFrame()) {
600 return Operand(ebp, StackSlotOffset(op->index()));
601 } else {
602 // Retrieve parameter without eager stack-frame relative to the
603 // stack-pointer.
604 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
605 }
606 }
607
608
609 Operand LCodeGen::HighOperand(LOperand* op) {
610 DCHECK(op->IsDoubleStackSlot());
611 if (NeedsEagerFrame()) {
612 return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
613 } else {
614 // Retrieve parameter without eager stack-frame relative to the
615 // stack-pointer.
616 return Operand(
617 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
618 }
619 }
620
621
622 void LCodeGen::WriteTranslation(LEnvironment* environment,
623 Translation* translation) {
624 if (environment == NULL) return;
625
626 // The translation includes one command per value in the environment.
627 int translation_size = environment->translation_size();
628
629 WriteTranslation(environment->outer(), translation);
630 WriteTranslationFrame(environment, translation);
631
632 int object_index = 0;
633 int dematerialized_index = 0;
634 for (int i = 0; i < translation_size; ++i) {
635 LOperand* value = environment->values()->at(i);
636 AddToTranslation(
637 environment, translation, value, environment->HasTaggedValueAt(i),
638 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
639 }
640 }
641
642
643 void LCodeGen::AddToTranslation(LEnvironment* environment,
644 Translation* translation,
645 LOperand* op,
646 bool is_tagged,
647 bool is_uint32,
648 int* object_index_pointer,
649 int* dematerialized_index_pointer) {
650 if (op == LEnvironment::materialization_marker()) {
651 int object_index = (*object_index_pointer)++;
652 if (environment->ObjectIsDuplicateAt(object_index)) {
653 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
654 translation->DuplicateObject(dupe_of);
655 return;
656 }
657 int object_length = environment->ObjectLengthAt(object_index);
658 if (environment->ObjectIsArgumentsAt(object_index)) {
659 translation->BeginArgumentsObject(object_length);
660 } else {
661 translation->BeginCapturedObject(object_length);
662 }
663 int dematerialized_index = *dematerialized_index_pointer;
664 int env_offset = environment->translation_size() + dematerialized_index;
665 *dematerialized_index_pointer += object_length;
666 for (int i = 0; i < object_length; ++i) {
667 LOperand* value = environment->values()->at(env_offset + i);
668 AddToTranslation(environment,
669 translation,
670 value,
671 environment->HasTaggedValueAt(env_offset + i),
672 environment->HasUint32ValueAt(env_offset + i),
673 object_index_pointer,
674 dematerialized_index_pointer);
675 }
676 return;
677 }
678
679 if (op->IsStackSlot()) {
680 int index = op->index();
681 if (index >= 0) {
682 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
683 }
684 if (is_tagged) {
685 translation->StoreStackSlot(index);
686 } else if (is_uint32) {
687 translation->StoreUint32StackSlot(index);
688 } else {
689 translation->StoreInt32StackSlot(index);
690 }
691 } else if (op->IsDoubleStackSlot()) {
692 int index = op->index();
693 if (index >= 0) {
694 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
695 }
696 translation->StoreDoubleStackSlot(index);
697 } else if (op->IsRegister()) {
698 Register reg = ToRegister(op);
699 if (is_tagged) {
700 translation->StoreRegister(reg);
701 } else if (is_uint32) {
702 translation->StoreUint32Register(reg);
703 } else {
704 translation->StoreInt32Register(reg);
705 }
706 } else if (op->IsDoubleRegister()) {
707 XMMRegister reg = ToDoubleRegister(op);
708 translation->StoreDoubleRegister(reg);
709 } else if (op->IsConstantOperand()) {
710 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
711 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
712 translation->StoreLiteral(src_index);
713 } else {
714 UNREACHABLE();
715 }
716 }
717
718
719 void LCodeGen::CallCodeGeneric(Handle<Code> code,
720 RelocInfo::Mode mode,
721 LInstruction* instr,
722 SafepointMode safepoint_mode) {
723 DCHECK(instr != NULL);
724 __ call(code, mode);
725 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
726
727 // Signal that we don't inline smi code before these stubs in the
728 // optimizing code generator.
729 if (code->kind() == Code::BINARY_OP_IC ||
730 code->kind() == Code::COMPARE_IC) {
731 __ nop();
732 }
733 }
734
735
736 void LCodeGen::CallCode(Handle<Code> code,
737 RelocInfo::Mode mode,
738 LInstruction* instr) {
739 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
740 }
741
742
743 void LCodeGen::CallRuntime(const Runtime::Function* fun,
744 int argc,
745 LInstruction* instr,
746 SaveFPRegsMode save_doubles) {
747 DCHECK(instr != NULL);
748 DCHECK(instr->HasPointerMap());
749
750 __ CallRuntime(fun, argc, save_doubles);
751
752 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
753
754 DCHECK(info()->is_calling());
755 }
756
757
758 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
759 if (context->IsRegister()) {
760 if (!ToRegister(context).is(esi)) {
761 __ mov(esi, ToRegister(context));
762 }
763 } else if (context->IsStackSlot()) {
764 __ mov(esi, ToOperand(context));
765 } else if (context->IsConstantOperand()) {
766 HConstant* constant =
767 chunk_->LookupConstant(LConstantOperand::cast(context));
768 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
769 } else {
770 UNREACHABLE();
771 }
772 }
773
774 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
775 int argc,
776 LInstruction* instr,
777 LOperand* context) {
778 LoadContextFromDeferred(context);
779
780 __ CallRuntimeSaveDoubles(id);
781 RecordSafepointWithRegisters(
782 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
783
784 DCHECK(info()->is_calling());
785 }
786
787
788 void LCodeGen::RegisterEnvironmentForDeoptimization(
789 LEnvironment* environment, Safepoint::DeoptMode mode) {
790 environment->set_has_been_used();
791 if (!environment->HasBeenRegistered()) {
792 // Physical stack frame layout:
793 // -x ............. -4 0 ..................................... y
794 // [incoming arguments] [spill slots] [pushed outgoing arguments]
795
796 // Layout of the environment:
797 // 0 ..................................................... size-1
798 // [parameters] [locals] [expression stack including arguments]
799
800 // Layout of the translation:
801 // 0 ........................................................ size - 1 + 4
802 // [expression stack including arguments] [locals] [4 words] [parameters]
803 // |>------------ translation_size ------------<|
804
805 int frame_count = 0;
806 int jsframe_count = 0;
807 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
808 ++frame_count;
809 if (e->frame_type() == JS_FUNCTION) {
810 ++jsframe_count;
811 }
812 }
813 Translation translation(&translations_, frame_count, jsframe_count, zone());
814 WriteTranslation(environment, &translation);
815 int deoptimization_index = deoptimizations_.length();
816 int pc_offset = masm()->pc_offset();
817 environment->Register(deoptimization_index,
818 translation.index(),
819 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
820 deoptimizations_.Add(environment, zone());
821 }
822 }
823
824
825 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
826 Deoptimizer::DeoptReason deopt_reason,
827 Deoptimizer::BailoutType bailout_type) {
828 LEnvironment* environment = instr->environment();
829 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
830 DCHECK(environment->HasBeenRegistered());
831 int id = environment->deoptimization_index();
832 Address entry =
833 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
834 if (entry == NULL) {
835 Abort(kBailoutWasNotPrepared);
836 return;
837 }
838
839 if (DeoptEveryNTimes()) {
840 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
841 Label no_deopt;
842 __ pushfd();
843 __ push(eax);
844 __ mov(eax, Operand::StaticVariable(count));
845 __ sub(eax, Immediate(1));
846 __ j(not_zero, &no_deopt, Label::kNear);
847 if (FLAG_trap_on_deopt) __ int3();
848 __ mov(eax, Immediate(FLAG_deopt_every_n_times));
849 __ mov(Operand::StaticVariable(count), eax);
850 __ pop(eax);
851 __ popfd();
852 DCHECK(frame_is_built_);
853 __ call(entry, RelocInfo::RUNTIME_ENTRY);
854 __ bind(&no_deopt);
855 __ mov(Operand::StaticVariable(count), eax);
856 __ pop(eax);
857 __ popfd();
858 }
859
860 if (info()->ShouldTrapOnDeopt()) {
861 Label done;
862 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
863 __ int3();
864 __ bind(&done);
865 }
866
867 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
868
869 DCHECK(info()->IsStub() || frame_is_built_);
870 if (cc == no_condition && frame_is_built_) {
871 DeoptComment(deopt_info);
872 __ call(entry, RelocInfo::RUNTIME_ENTRY);
873 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
874 } else {
875 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
876 !frame_is_built_);
877 // We often have several deopts to the same entry, reuse the last
878 // jump entry if this is the case.
879 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
880 jump_table_.is_empty() ||
881 !table_entry.IsEquivalentTo(jump_table_.last())) {
882 jump_table_.Add(table_entry, zone());
883 }
884 if (cc == no_condition) {
885 __ jmp(&jump_table_.last().label);
886 } else {
887 __ j(cc, &jump_table_.last().label);
888 }
889 }
890 }
891
892
893 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
894 Deoptimizer::DeoptReason deopt_reason) {
895 Deoptimizer::BailoutType bailout_type = info()->IsStub()
896 ? Deoptimizer::LAZY
897 : Deoptimizer::EAGER;
898 DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
899 }
900
901
902 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
903 int length = deoptimizations_.length();
904 if (length == 0) return;
905 Handle<DeoptimizationInputData> data =
906 DeoptimizationInputData::New(isolate(), length, TENURED);
907
908 Handle<ByteArray> translations =
909 translations_.CreateByteArray(isolate()->factory());
910 data->SetTranslationByteArray(*translations);
911 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
912 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
913 if (info_->IsOptimizing()) {
914 // Reference to shared function info does not change between phases.
915 AllowDeferredHandleDereference allow_handle_dereference;
916 data->SetSharedFunctionInfo(*info_->shared_info());
917 } else {
918 data->SetSharedFunctionInfo(Smi::FromInt(0));
919 }
920 data->SetWeakCellCache(Smi::FromInt(0));
921
922 Handle<FixedArray> literals =
923 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
924 { AllowDeferredHandleDereference copy_handles;
925 for (int i = 0; i < deoptimization_literals_.length(); i++) {
926 literals->set(i, *deoptimization_literals_[i]);
927 }
928 data->SetLiteralArray(*literals);
929 }
930
931 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
932 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
933
934 // Populate the deoptimization entries.
935 for (int i = 0; i < length; i++) {
936 LEnvironment* env = deoptimizations_[i];
937 data->SetAstId(i, env->ast_id());
938 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
939 data->SetArgumentsStackHeight(i,
940 Smi::FromInt(env->arguments_stack_height()));
941 data->SetPc(i, Smi::FromInt(env->pc_offset()));
942 }
943 code->set_deoptimization_data(*data);
944 }
945
946
947 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
948 DCHECK_EQ(0, deoptimization_literals_.length());
949 for (auto function : chunk()->inlined_functions()) {
950 DefineDeoptimizationLiteral(function);
951 }
952 inlined_function_count_ = deoptimization_literals_.length();
953 }
954
955
956 void LCodeGen::RecordSafepointWithLazyDeopt(
957 LInstruction* instr, SafepointMode safepoint_mode) {
958 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
959 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
960 } else {
961 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
962 RecordSafepointWithRegisters(
963 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
964 }
965 }
966
967
968 void LCodeGen::RecordSafepoint(
969 LPointerMap* pointers,
970 Safepoint::Kind kind,
971 int arguments,
972 Safepoint::DeoptMode deopt_mode) {
973 DCHECK(kind == expected_safepoint_kind_);
974 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
975 Safepoint safepoint =
976 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
977 for (int i = 0; i < operands->length(); i++) {
978 LOperand* pointer = operands->at(i);
979 if (pointer->IsStackSlot()) {
980 safepoint.DefinePointerSlot(pointer->index(), zone());
981 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
982 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
983 }
984 }
985 }
986
987
988 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
989 Safepoint::DeoptMode mode) {
990 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
991 }
992
993
994 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
995 LPointerMap empty_pointers(zone());
996 RecordSafepoint(&empty_pointers, mode);
997 }
998
999
1000 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1001 int arguments,
1002 Safepoint::DeoptMode mode) {
1003 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1004 }
1005
1006
1007 void LCodeGen::RecordAndWritePosition(int position) {
1008 if (position == RelocInfo::kNoPosition) return;
1009 masm()->positions_recorder()->RecordPosition(position);
1010 masm()->positions_recorder()->WriteRecordedPositions();
1011 }
1012
1013
1014 static const char* LabelType(LLabel* label) {
1015 if (label->is_loop_header()) return " (loop header)";
1016 if (label->is_osr_entry()) return " (OSR entry)";
1017 return "";
1018 }
1019
1020
1021 void LCodeGen::DoLabel(LLabel* label) {
1022 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1023 current_instruction_,
1024 label->hydrogen_value()->id(),
1025 label->block_id(),
1026 LabelType(label));
1027 __ bind(label->label());
1028 current_block_ = label->block_id();
1029 DoGap(label);
1030 }
1031
1032
1033 void LCodeGen::DoParallelMove(LParallelMove* move) {
1034 resolver_.Resolve(move);
1035 }
1036
1037
1038 void LCodeGen::DoGap(LGap* gap) {
1039 for (int i = LGap::FIRST_INNER_POSITION;
1040 i <= LGap::LAST_INNER_POSITION;
1041 i++) {
1042 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1043 LParallelMove* move = gap->GetParallelMove(inner_pos);
1044 if (move != NULL) DoParallelMove(move);
1045 }
1046 }
1047
1048
1049 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1050 DoGap(instr);
1051 }
1052
1053
1054 void LCodeGen::DoParameter(LParameter* instr) {
1055 // Nothing to do.
1056 }
1057
1058
1059 void LCodeGen::DoCallStub(LCallStub* instr) {
1060 DCHECK(ToRegister(instr->context()).is(esi));
1061 DCHECK(ToRegister(instr->result()).is(eax));
1062 switch (instr->hydrogen()->major_key()) {
1063 case CodeStub::RegExpExec: {
1064 RegExpExecStub stub(isolate());
1065 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1066 break;
1067 }
1068 case CodeStub::SubString: {
1069 SubStringStub stub(isolate());
1070 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1071 break;
1072 }
1073 default:
1074 UNREACHABLE();
1075 }
1076 }
1077
1078
1079 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1080 GenerateOsrPrologue();
1081 }
1082
1083
1084 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1085 Register dividend = ToRegister(instr->dividend());
1086 int32_t divisor = instr->divisor();
1087 DCHECK(dividend.is(ToRegister(instr->result())));
1088
1089 // Theoretically, a variation of the branch-free code for integer division by
1090 // a power of 2 (calculating the remainder via an additional multiplication
1091 // (which gets simplified to an 'and') and subtraction) should be faster, and
1092 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1093 // indicate that positive dividends are heavily favored, so the branching
1094 // version performs better.
1095 HMod* hmod = instr->hydrogen();
1096 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1097 Label dividend_is_not_negative, done;
1098 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1099 __ test(dividend, dividend);
1100 __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1101 // Note that this is correct even for kMinInt operands.
1102 __ neg(dividend);
1103 __ and_(dividend, mask);
1104 __ neg(dividend);
1105 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1106 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1107 }
1108 __ jmp(&done, Label::kNear);
1109 }
1110
1111 __ bind(&dividend_is_not_negative);
1112 __ and_(dividend, mask);
1113 __ bind(&done);
1114 }
1115
1116
1117 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1118 Register dividend = ToRegister(instr->dividend());
1119 int32_t divisor = instr->divisor();
1120 DCHECK(ToRegister(instr->result()).is(eax));
1121
1122 if (divisor == 0) {
1123 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1124 return;
1125 }
1126
1127 __ TruncatingDiv(dividend, Abs(divisor));
1128 __ imul(edx, edx, Abs(divisor));
1129 __ mov(eax, dividend);
1130 __ sub(eax, edx);
1131
1132 // Check for negative zero.
1133 HMod* hmod = instr->hydrogen();
1134 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1135 Label remainder_not_zero;
1136 __ j(not_zero, &remainder_not_zero, Label::kNear);
1137 __ cmp(dividend, Immediate(0));
1138 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1139 __ bind(&remainder_not_zero);
1140 }
1141 }
1142
1143
1144 void LCodeGen::DoModI(LModI* instr) {
1145 HMod* hmod = instr->hydrogen();
1146
1147 Register left_reg = ToRegister(instr->left());
1148 DCHECK(left_reg.is(eax));
1149 Register right_reg = ToRegister(instr->right());
1150 DCHECK(!right_reg.is(eax));
1151 DCHECK(!right_reg.is(edx));
1152 Register result_reg = ToRegister(instr->result());
1153 DCHECK(result_reg.is(edx));
1154
1155 Label done;
1156 // Check for x % 0, idiv would signal a divide error. We have to
1157 // deopt in this case because we can't return a NaN.
1158 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1159 __ test(right_reg, Operand(right_reg));
1160 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1161 }
1162
1163 // Check for kMinInt % -1, idiv would signal a divide error. We
1164 // have to deopt if we care about -0, because we can't return that.
1165 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1166 Label no_overflow_possible;
1167 __ cmp(left_reg, kMinInt);
1168 __ j(not_equal, &no_overflow_possible, Label::kNear);
1169 __ cmp(right_reg, -1);
1170 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1171 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1172 } else {
1173 __ j(not_equal, &no_overflow_possible, Label::kNear);
1174 __ Move(result_reg, Immediate(0));
1175 __ jmp(&done, Label::kNear);
1176 }
1177 __ bind(&no_overflow_possible);
1178 }
1179
1180 // Sign extend dividend in eax into edx:eax.
1181 __ cdq();
1182
1183 // If we care about -0, test if the dividend is <0 and the result is 0.
1184 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1185 Label positive_left;
1186 __ test(left_reg, Operand(left_reg));
1187 __ j(not_sign, &positive_left, Label::kNear);
1188 __ idiv(right_reg);
1189 __ test(result_reg, Operand(result_reg));
1190 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1191 __ jmp(&done, Label::kNear);
1192 __ bind(&positive_left);
1193 }
1194 __ idiv(right_reg);
1195 __ bind(&done);
1196 }
1197
1198
1199 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1200 Register dividend = ToRegister(instr->dividend());
1201 int32_t divisor = instr->divisor();
1202 Register result = ToRegister(instr->result());
1203 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1204 DCHECK(!result.is(dividend));
1205
1206 // Check for (0 / -x) that will produce negative zero.
1207 HDiv* hdiv = instr->hydrogen();
1208 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1209 __ test(dividend, dividend);
1210 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1211 }
1212 // Check for (kMinInt / -1).
1213 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1214 __ cmp(dividend, kMinInt);
1215 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1216 }
1217 // Deoptimize if remainder will not be 0.
1218 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1219 divisor != 1 && divisor != -1) {
1220 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1221 __ test(dividend, Immediate(mask));
1222 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1223 }
1224 __ Move(result, dividend);
1225 int32_t shift = WhichPowerOf2Abs(divisor);
1226 if (shift > 0) {
1227 // The arithmetic shift is always OK, the 'if' is an optimization only.
1228 if (shift > 1) __ sar(result, 31);
1229 __ shr(result, 32 - shift);
1230 __ add(result, dividend);
1231 __ sar(result, shift);
1232 }
1233 if (divisor < 0) __ neg(result);
1234 }
1235
1236
1237 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1238 Register dividend = ToRegister(instr->dividend());
1239 int32_t divisor = instr->divisor();
1240 DCHECK(ToRegister(instr->result()).is(edx));
1241
1242 if (divisor == 0) {
1243 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1244 return;
1245 }
1246
1247 // Check for (0 / -x) that will produce negative zero.
1248 HDiv* hdiv = instr->hydrogen();
1249 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1250 __ test(dividend, dividend);
1251 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1252 }
1253
1254 __ TruncatingDiv(dividend, Abs(divisor));
1255 if (divisor < 0) __ neg(edx);
1256
1257 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1258 __ mov(eax, edx);
1259 __ imul(eax, eax, divisor);
1260 __ sub(eax, dividend);
1261 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1262 }
1263 }
1264
1265
1266 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1267 void LCodeGen::DoDivI(LDivI* instr) {
1268 HBinaryOperation* hdiv = instr->hydrogen();
1269 Register dividend = ToRegister(instr->dividend());
1270 Register divisor = ToRegister(instr->divisor());
1271 Register remainder = ToRegister(instr->temp());
1272 DCHECK(dividend.is(eax));
1273 DCHECK(remainder.is(edx));
1274 DCHECK(ToRegister(instr->result()).is(eax));
1275 DCHECK(!divisor.is(eax));
1276 DCHECK(!divisor.is(edx));
1277
1278 // Check for x / 0.
1279 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1280 __ test(divisor, divisor);
1281 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1282 }
1283
1284 // Check for (0 / -x) that will produce negative zero.
1285 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1286 Label dividend_not_zero;
1287 __ test(dividend, dividend);
1288 __ j(not_zero, &dividend_not_zero, Label::kNear);
1289 __ test(divisor, divisor);
1290 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1291 __ bind(&dividend_not_zero);
1292 }
1293
1294 // Check for (kMinInt / -1).
1295 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1296 Label dividend_not_min_int;
1297 __ cmp(dividend, kMinInt);
1298 __ j(not_zero, &dividend_not_min_int, Label::kNear);
1299 __ cmp(divisor, -1);
1300 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1301 __ bind(&dividend_not_min_int);
1302 }
1303
1304 // Sign extend to edx (= remainder).
1305 __ cdq();
1306 __ idiv(divisor);
1307
1308 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1309 // Deoptimize if remainder is not 0.
1310 __ test(remainder, remainder);
1311 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1312 }
1313 }
1314
1315
1316 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1317 Register dividend = ToRegister(instr->dividend());
1318 int32_t divisor = instr->divisor();
1319 DCHECK(dividend.is(ToRegister(instr->result())));
1320
1321 // If the divisor is positive, things are easy: There can be no deopts and we
1322 // can simply do an arithmetic right shift.
1323 if (divisor == 1) return;
1324 int32_t shift = WhichPowerOf2Abs(divisor);
1325 if (divisor > 1) {
1326 __ sar(dividend, shift);
1327 return;
1328 }
1329
1330 // If the divisor is negative, we have to negate and handle edge cases.
1331 __ neg(dividend);
1332 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1333 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1334 }
1335
1336 // Dividing by -1 is basically negation, unless we overflow.
1337 if (divisor == -1) {
1338 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1339 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1340 }
1341 return;
1342 }
1343
1344 // If the negation could not overflow, simply shifting is OK.
1345 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1346 __ sar(dividend, shift);
1347 return;
1348 }
1349
1350 Label not_kmin_int, done;
1351 __ j(no_overflow, &not_kmin_int, Label::kNear);
1352 __ mov(dividend, Immediate(kMinInt / divisor));
1353 __ jmp(&done, Label::kNear);
1354 __ bind(&not_kmin_int);
1355 __ sar(dividend, shift);
1356 __ bind(&done);
1357 }
1358
1359
1360 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1361 Register dividend = ToRegister(instr->dividend());
1362 int32_t divisor = instr->divisor();
1363 DCHECK(ToRegister(instr->result()).is(edx));
1364
1365 if (divisor == 0) {
1366 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1367 return;
1368 }
1369
1370 // Check for (0 / -x) that will produce negative zero.
1371 HMathFloorOfDiv* hdiv = instr->hydrogen();
1372 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1373 __ test(dividend, dividend);
1374 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1375 }
1376
1377 // Easy case: We need no dynamic check for the dividend and the flooring
1378 // division is the same as the truncating division.
1379 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1380 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1381 __ TruncatingDiv(dividend, Abs(divisor));
1382 if (divisor < 0) __ neg(edx);
1383 return;
1384 }
1385
1386 // In the general case we may need to adjust before and after the truncating
1387 // division to get a flooring division.
1388 Register temp = ToRegister(instr->temp3());
1389 DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1390 Label needs_adjustment, done;
1391 __ cmp(dividend, Immediate(0));
1392 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1393 __ TruncatingDiv(dividend, Abs(divisor));
1394 if (divisor < 0) __ neg(edx);
1395 __ jmp(&done, Label::kNear);
1396 __ bind(&needs_adjustment);
1397 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1398 __ TruncatingDiv(temp, Abs(divisor));
1399 if (divisor < 0) __ neg(edx);
1400 __ dec(edx);
1401 __ bind(&done);
1402 }
1403
1404
1405 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1406 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1407 HBinaryOperation* hdiv = instr->hydrogen();
1408 Register dividend = ToRegister(instr->dividend());
1409 Register divisor = ToRegister(instr->divisor());
1410 Register remainder = ToRegister(instr->temp());
1411 Register result = ToRegister(instr->result());
1412 DCHECK(dividend.is(eax));
1413 DCHECK(remainder.is(edx));
1414 DCHECK(result.is(eax));
1415 DCHECK(!divisor.is(eax));
1416 DCHECK(!divisor.is(edx));
1417
1418 // Check for x / 0.
1419 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1420 __ test(divisor, divisor);
1421 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1422 }
1423
1424 // Check for (0 / -x) that will produce negative zero.
1425 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1426 Label dividend_not_zero;
1427 __ test(dividend, dividend);
1428 __ j(not_zero, &dividend_not_zero, Label::kNear);
1429 __ test(divisor, divisor);
1430 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1431 __ bind(&dividend_not_zero);
1432 }
1433
1434 // Check for (kMinInt / -1).
1435 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1436 Label dividend_not_min_int;
1437 __ cmp(dividend, kMinInt);
1438 __ j(not_zero, &dividend_not_min_int, Label::kNear);
1439 __ cmp(divisor, -1);
1440 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1441 __ bind(&dividend_not_min_int);
1442 }
1443
1444 // Sign extend to edx (= remainder).
1445 __ cdq();
1446 __ idiv(divisor);
1447
1448 Label done;
1449 __ test(remainder, remainder);
1450 __ j(zero, &done, Label::kNear);
1451 __ xor_(remainder, divisor);
1452 __ sar(remainder, 31);
1453 __ add(result, remainder);
1454 __ bind(&done);
1455 }
1456
1457
1458 void LCodeGen::DoMulI(LMulI* instr) {
1459 Register left = ToRegister(instr->left());
1460 LOperand* right = instr->right();
1461
1462 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1463 __ mov(ToRegister(instr->temp()), left);
1464 }
1465
1466 if (right->IsConstantOperand()) {
1467 // Try strength reductions on the multiplication.
1468 // All replacement instructions are at most as long as the imul
1469 // and have better latency.
1470 int constant = ToInteger32(LConstantOperand::cast(right));
1471 if (constant == -1) {
1472 __ neg(left);
1473 } else if (constant == 0) {
1474 __ xor_(left, Operand(left));
1475 } else if (constant == 2) {
1476 __ add(left, Operand(left));
1477 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1478 // If we know that the multiplication can't overflow, it's safe to
1479 // use instructions that don't set the overflow flag for the
1480 // multiplication.
1481 switch (constant) {
1482 case 1:
1483 // Do nothing.
1484 break;
1485 case 3:
1486 __ lea(left, Operand(left, left, times_2, 0));
1487 break;
1488 case 4:
1489 __ shl(left, 2);
1490 break;
1491 case 5:
1492 __ lea(left, Operand(left, left, times_4, 0));
1493 break;
1494 case 8:
1495 __ shl(left, 3);
1496 break;
1497 case 9:
1498 __ lea(left, Operand(left, left, times_8, 0));
1499 break;
1500 case 16:
1501 __ shl(left, 4);
1502 break;
1503 default:
1504 __ imul(left, left, constant);
1505 break;
1506 }
1507 } else {
1508 __ imul(left, left, constant);
1509 }
1510 } else {
1511 if (instr->hydrogen()->representation().IsSmi()) {
1512 __ SmiUntag(left);
1513 }
1514 __ imul(left, ToOperand(right));
1515 }
1516
1517 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1518 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1519 }
1520
1521 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1522 // Bail out if the result is supposed to be negative zero.
1523 Label done;
1524 __ test(left, Operand(left));
1525 __ j(not_zero, &done, Label::kNear);
1526 if (right->IsConstantOperand()) {
1527 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1528 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1529 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1530 __ cmp(ToRegister(instr->temp()), Immediate(0));
1531 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1532 }
1533 } else {
1534 // Test the non-zero operand for negative sign.
1535 __ or_(ToRegister(instr->temp()), ToOperand(right));
1536 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1537 }
1538 __ bind(&done);
1539 }
1540 }
1541
1542
1543 void LCodeGen::DoBitI(LBitI* instr) {
1544 LOperand* left = instr->left();
1545 LOperand* right = instr->right();
1546 DCHECK(left->Equals(instr->result()));
1547 DCHECK(left->IsRegister());
1548
1549 if (right->IsConstantOperand()) {
1550 int32_t right_operand =
1551 ToRepresentation(LConstantOperand::cast(right),
1552 instr->hydrogen()->representation());
1553 switch (instr->op()) {
1554 case Token::BIT_AND:
1555 __ and_(ToRegister(left), right_operand);
1556 break;
1557 case Token::BIT_OR:
1558 __ or_(ToRegister(left), right_operand);
1559 break;
1560 case Token::BIT_XOR:
1561 if (right_operand == int32_t(~0)) {
1562 __ not_(ToRegister(left));
1563 } else {
1564 __ xor_(ToRegister(left), right_operand);
1565 }
1566 break;
1567 default:
1568 UNREACHABLE();
1569 break;
1570 }
1571 } else {
1572 switch (instr->op()) {
1573 case Token::BIT_AND:
1574 __ and_(ToRegister(left), ToOperand(right));
1575 break;
1576 case Token::BIT_OR:
1577 __ or_(ToRegister(left), ToOperand(right));
1578 break;
1579 case Token::BIT_XOR:
1580 __ xor_(ToRegister(left), ToOperand(right));
1581 break;
1582 default:
1583 UNREACHABLE();
1584 break;
1585 }
1586 }
1587 }
1588
1589
1590 void LCodeGen::DoShiftI(LShiftI* instr) {
1591 LOperand* left = instr->left();
1592 LOperand* right = instr->right();
1593 DCHECK(left->Equals(instr->result()));
1594 DCHECK(left->IsRegister());
1595 if (right->IsRegister()) {
1596 DCHECK(ToRegister(right).is(ecx));
1597
1598 switch (instr->op()) {
1599 case Token::ROR:
1600 __ ror_cl(ToRegister(left));
1601 break;
1602 case Token::SAR:
1603 __ sar_cl(ToRegister(left));
1604 break;
1605 case Token::SHR:
1606 __ shr_cl(ToRegister(left));
1607 if (instr->can_deopt()) {
1608 __ test(ToRegister(left), ToRegister(left));
1609 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1610 }
1611 break;
1612 case Token::SHL:
1613 __ shl_cl(ToRegister(left));
1614 break;
1615 default:
1616 UNREACHABLE();
1617 break;
1618 }
1619 } else {
1620 int value = ToInteger32(LConstantOperand::cast(right));
1621 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1622 switch (instr->op()) {
1623 case Token::ROR:
1624 if (shift_count == 0 && instr->can_deopt()) {
1625 __ test(ToRegister(left), ToRegister(left));
1626 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1627 } else {
1628 __ ror(ToRegister(left), shift_count);
1629 }
1630 break;
1631 case Token::SAR:
1632 if (shift_count != 0) {
1633 __ sar(ToRegister(left), shift_count);
1634 }
1635 break;
1636 case Token::SHR:
1637 if (shift_count != 0) {
1638 __ shr(ToRegister(left), shift_count);
1639 } else if (instr->can_deopt()) {
1640 __ test(ToRegister(left), ToRegister(left));
1641 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1642 }
1643 break;
1644 case Token::SHL:
1645 if (shift_count != 0) {
1646 if (instr->hydrogen_value()->representation().IsSmi() &&
1647 instr->can_deopt()) {
1648 if (shift_count != 1) {
1649 __ shl(ToRegister(left), shift_count - 1);
1650 }
1651 __ SmiTag(ToRegister(left));
1652 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1653 } else {
1654 __ shl(ToRegister(left), shift_count);
1655 }
1656 }
1657 break;
1658 default:
1659 UNREACHABLE();
1660 break;
1661 }
1662 }
1663 }
1664
1665
1666 void LCodeGen::DoSubI(LSubI* instr) {
1667 LOperand* left = instr->left();
1668 LOperand* right = instr->right();
1669 DCHECK(left->Equals(instr->result()));
1670
1671 if (right->IsConstantOperand()) {
1672 __ sub(ToOperand(left),
1673 ToImmediate(right, instr->hydrogen()->representation()));
1674 } else {
1675 __ sub(ToRegister(left), ToOperand(right));
1676 }
1677 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1678 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1679 }
1680 }
1681
1682
1683 void LCodeGen::DoConstantI(LConstantI* instr) {
1684 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1685 }
1686
1687
1688 void LCodeGen::DoConstantS(LConstantS* instr) {
1689 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1690 }
1691
1692
1693 void LCodeGen::DoConstantD(LConstantD* instr) {
1694 uint64_t const bits = instr->bits();
1695 uint32_t const lower = static_cast<uint32_t>(bits);
1696 uint32_t const upper = static_cast<uint32_t>(bits >> 32);
1697 DCHECK(instr->result()->IsDoubleRegister());
1698
1699 XMMRegister result = ToDoubleRegister(instr->result());
1700 if (bits == 0u) {
1701 __ xorps(result, result);
1702 } else {
1703 Register temp = ToRegister(instr->temp());
1704 if (CpuFeatures::IsSupported(SSE4_1)) {
1705 CpuFeatureScope scope2(masm(), SSE4_1);
1706 if (lower != 0) {
1707 __ Move(temp, Immediate(lower));
1708 __ movd(result, Operand(temp));
1709 __ Move(temp, Immediate(upper));
1710 __ pinsrd(result, Operand(temp), 1);
1711 } else {
1712 __ xorps(result, result);
1713 __ Move(temp, Immediate(upper));
1714 __ pinsrd(result, Operand(temp), 1);
1715 }
1716 } else {
1717 __ Move(temp, Immediate(upper));
1718 __ movd(result, Operand(temp));
1719 __ psllq(result, 32);
1720 if (lower != 0u) {
1721 XMMRegister xmm_scratch = double_scratch0();
1722 __ Move(temp, Immediate(lower));
1723 __ movd(xmm_scratch, Operand(temp));
1724 __ orps(result, xmm_scratch);
1725 }
1726 }
1727 }
1728 }
1729
1730
1731 void LCodeGen::DoConstantE(LConstantE* instr) {
1732 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1733 }
1734
1735
1736 void LCodeGen::DoConstantT(LConstantT* instr) {
1737 Register reg = ToRegister(instr->result());
1738 Handle<Object> object = instr->value(isolate());
1739 AllowDeferredHandleDereference smi_check;
1740 __ LoadObject(reg, object);
1741 }
1742
1743
1744 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1745 Register result = ToRegister(instr->result());
1746 Register map = ToRegister(instr->value());
1747 __ EnumLength(result, map);
1748 }
1749
1750
1751 void LCodeGen::DoDateField(LDateField* instr) {
1752 Register object = ToRegister(instr->date());
1753 Register result = ToRegister(instr->result());
1754 Register scratch = ToRegister(instr->temp());
1755 Smi* index = instr->index();
1756 DCHECK(object.is(result));
1757 DCHECK(object.is(eax));
1758
1759 if (index->value() == 0) {
1760 __ mov(result, FieldOperand(object, JSDate::kValueOffset));
1761 } else {
1762 Label runtime, done;
1763 if (index->value() < JSDate::kFirstUncachedField) {
1764 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1765 __ mov(scratch, Operand::StaticVariable(stamp));
1766 __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
1767 __ j(not_equal, &runtime, Label::kNear);
1768 __ mov(result, FieldOperand(object, JSDate::kValueOffset +
1769 kPointerSize * index->value()));
1770 __ jmp(&done, Label::kNear);
1771 }
1772 __ bind(&runtime);
1773 __ PrepareCallCFunction(2, scratch);
1774 __ mov(Operand(esp, 0), object);
1775 __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
1776 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1777 __ bind(&done);
1778 }
1779 }
1780
1781
1782 Operand LCodeGen::BuildSeqStringOperand(Register string,
1783 LOperand* index,
1784 String::Encoding encoding) {
1785 if (index->IsConstantOperand()) {
1786 int offset = ToRepresentation(LConstantOperand::cast(index),
1787 Representation::Integer32());
1788 if (encoding == String::TWO_BYTE_ENCODING) {
1789 offset *= kUC16Size;
1790 }
1791 STATIC_ASSERT(kCharSize == 1);
1792 return FieldOperand(string, SeqString::kHeaderSize + offset);
1793 }
1794 return FieldOperand(
1795 string, ToRegister(index),
1796 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1797 SeqString::kHeaderSize);
1798 }
1799
1800
1801 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1802 String::Encoding encoding = instr->hydrogen()->encoding();
1803 Register result = ToRegister(instr->result());
1804 Register string = ToRegister(instr->string());
1805
1806 if (FLAG_debug_code) {
1807 __ push(string);
1808 __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
1809 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
1810
1811 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1812 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1813 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1814 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1815 ? one_byte_seq_type : two_byte_seq_type));
1816 __ Check(equal, kUnexpectedStringType);
1817 __ pop(string);
1818 }
1819
1820 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1821 if (encoding == String::ONE_BYTE_ENCODING) {
1822 __ movzx_b(result, operand);
1823 } else {
1824 __ movzx_w(result, operand);
1825 }
1826 }
1827
1828
1829 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1830 String::Encoding encoding = instr->hydrogen()->encoding();
1831 Register string = ToRegister(instr->string());
1832
1833 if (FLAG_debug_code) {
1834 Register value = ToRegister(instr->value());
1835 Register index = ToRegister(instr->index());
1836 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1837 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1838 int encoding_mask =
1839 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1840 ? one_byte_seq_type : two_byte_seq_type;
1841 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1842 }
1843
1844 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1845 if (instr->value()->IsConstantOperand()) {
1846 int value = ToRepresentation(LConstantOperand::cast(instr->value()),
1847 Representation::Integer32());
1848 DCHECK_LE(0, value);
1849 if (encoding == String::ONE_BYTE_ENCODING) {
1850 DCHECK_LE(value, String::kMaxOneByteCharCode);
1851 __ mov_b(operand, static_cast<int8_t>(value));
1852 } else {
1853 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1854 __ mov_w(operand, static_cast<int16_t>(value));
1855 }
1856 } else {
1857 Register value = ToRegister(instr->value());
1858 if (encoding == String::ONE_BYTE_ENCODING) {
1859 __ mov_b(operand, value);
1860 } else {
1861 __ mov_w(operand, value);
1862 }
1863 }
1864 }
1865
1866
1867 void LCodeGen::DoAddI(LAddI* instr) {
1868 LOperand* left = instr->left();
1869 LOperand* right = instr->right();
1870
1871 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1872 if (right->IsConstantOperand()) {
1873 int32_t offset = ToRepresentation(LConstantOperand::cast(right),
1874 instr->hydrogen()->representation());
1875 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
1876 } else {
1877 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1878 __ lea(ToRegister(instr->result()), address);
1879 }
1880 } else {
1881 if (right->IsConstantOperand()) {
1882 __ add(ToOperand(left),
1883 ToImmediate(right, instr->hydrogen()->representation()));
1884 } else {
1885 __ add(ToRegister(left), ToOperand(right));
1886 }
1887 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1888 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1889 }
1890 }
1891 }
1892
1893
1894 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1895 LOperand* left = instr->left();
1896 LOperand* right = instr->right();
1897 DCHECK(left->Equals(instr->result()));
1898 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1899 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1900 Label return_left;
1901 Condition condition = (operation == HMathMinMax::kMathMin)
1902 ? less_equal
1903 : greater_equal;
1904 if (right->IsConstantOperand()) {
1905 Operand left_op = ToOperand(left);
1906 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
1907 instr->hydrogen()->representation());
1908 __ cmp(left_op, immediate);
1909 __ j(condition, &return_left, Label::kNear);
1910 __ mov(left_op, immediate);
1911 } else {
1912 Register left_reg = ToRegister(left);
1913 Operand right_op = ToOperand(right);
1914 __ cmp(left_reg, right_op);
1915 __ j(condition, &return_left, Label::kNear);
1916 __ mov(left_reg, right_op);
1917 }
1918 __ bind(&return_left);
1919 } else {
1920 DCHECK(instr->hydrogen()->representation().IsDouble());
1921 Label check_nan_left, check_zero, return_left, return_right;
1922 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1923 XMMRegister left_reg = ToDoubleRegister(left);
1924 XMMRegister right_reg = ToDoubleRegister(right);
1925 __ ucomisd(left_reg, right_reg);
1926 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1927 __ j(equal, &check_zero, Label::kNear); // left == right.
1928 __ j(condition, &return_left, Label::kNear);
1929 __ jmp(&return_right, Label::kNear);
1930
1931 __ bind(&check_zero);
1932 XMMRegister xmm_scratch = double_scratch0();
1933 __ xorps(xmm_scratch, xmm_scratch);
1934 __ ucomisd(left_reg, xmm_scratch);
1935 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1936 // At this point, both left and right are either 0 or -0.
1937 if (operation == HMathMinMax::kMathMin) {
1938 __ orpd(left_reg, right_reg);
1939 } else {
1940 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1941 __ addsd(left_reg, right_reg);
1942 }
1943 __ jmp(&return_left, Label::kNear);
1944
1945 __ bind(&check_nan_left);
1946 __ ucomisd(left_reg, left_reg); // NaN check.
1947 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
1948 __ bind(&return_right);
1949 __ movaps(left_reg, right_reg);
1950
1951 __ bind(&return_left);
1952 }
1953 }
1954
1955
1956 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1957 XMMRegister left = ToDoubleRegister(instr->left());
1958 XMMRegister right = ToDoubleRegister(instr->right());
1959 XMMRegister result = ToDoubleRegister(instr->result());
1960 switch (instr->op()) {
1961 case Token::ADD:
1962 if (CpuFeatures::IsSupported(AVX)) {
1963 CpuFeatureScope scope(masm(), AVX);
1964 __ vaddsd(result, left, right);
1965 } else {
1966 DCHECK(result.is(left));
1967 __ addsd(left, right);
1968 }
1969 break;
1970 case Token::SUB:
1971 if (CpuFeatures::IsSupported(AVX)) {
1972 CpuFeatureScope scope(masm(), AVX);
1973 __ vsubsd(result, left, right);
1974 } else {
1975 DCHECK(result.is(left));
1976 __ subsd(left, right);
1977 }
1978 break;
1979 case Token::MUL:
1980 if (CpuFeatures::IsSupported(AVX)) {
1981 CpuFeatureScope scope(masm(), AVX);
1982 __ vmulsd(result, left, right);
1983 } else {
1984 DCHECK(result.is(left));
1985 __ mulsd(left, right);
1986 }
1987 break;
1988 case Token::DIV:
1989 if (CpuFeatures::IsSupported(AVX)) {
1990 CpuFeatureScope scope(masm(), AVX);
1991 __ vdivsd(result, left, right);
1992 } else {
1993 DCHECK(result.is(left));
1994 __ divsd(left, right);
1995 }
1996 // Don't delete this mov. It may improve performance on some CPUs,
1997 // when there is a (v)mulsd depending on the result
1998 __ movaps(result, result);
1999 break;
2000 case Token::MOD: {
2001 // Pass two doubles as arguments on the stack.
2002 __ PrepareCallCFunction(4, eax);
2003 __ movsd(Operand(esp, 0 * kDoubleSize), left);
2004 __ movsd(Operand(esp, 1 * kDoubleSize), right);
2005 __ CallCFunction(
2006 ExternalReference::mod_two_doubles_operation(isolate()),
2007 4);
2008
2009 // Return value is in st(0) on ia32.
2010 // Store it into the result register.
2011 __ sub(Operand(esp), Immediate(kDoubleSize));
2012 __ fstp_d(Operand(esp, 0));
2013 __ movsd(result, Operand(esp, 0));
2014 __ add(Operand(esp), Immediate(kDoubleSize));
2015 break;
2016 }
2017 default:
2018 UNREACHABLE();
2019 break;
2020 }
2021 }
2022
2023
2024 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2025 DCHECK(ToRegister(instr->context()).is(esi));
2026 DCHECK(ToRegister(instr->left()).is(edx));
2027 DCHECK(ToRegister(instr->right()).is(eax));
2028 DCHECK(ToRegister(instr->result()).is(eax));
2029
2030 Handle<Code> code =
2031 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2032 CallCode(code, RelocInfo::CODE_TARGET, instr);
2033 }
2034
2035
2036 template<class InstrType>
2037 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2038 int left_block = instr->TrueDestination(chunk_);
2039 int right_block = instr->FalseDestination(chunk_);
2040
2041 int next_block = GetNextEmittedBlock();
2042
2043 if (right_block == left_block || cc == no_condition) {
2044 EmitGoto(left_block);
2045 } else if (left_block == next_block) {
2046 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2047 } else if (right_block == next_block) {
2048 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2049 } else {
2050 __ j(cc, chunk_->GetAssemblyLabel(left_block));
2051 __ jmp(chunk_->GetAssemblyLabel(right_block));
2052 }
2053 }
2054
2055
2056 template <class InstrType>
2057 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
2058 int true_block = instr->TrueDestination(chunk_);
2059 if (cc == no_condition) {
2060 __ jmp(chunk_->GetAssemblyLabel(true_block));
2061 } else {
2062 __ j(cc, chunk_->GetAssemblyLabel(true_block));
2063 }
2064 }
2065
2066
2067 template<class InstrType>
2068 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2069 int false_block = instr->FalseDestination(chunk_);
2070 if (cc == no_condition) {
2071 __ jmp(chunk_->GetAssemblyLabel(false_block));
2072 } else {
2073 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2074 }
2075 }
2076
2077
2078 void LCodeGen::DoBranch(LBranch* instr) {
2079 Representation r = instr->hydrogen()->value()->representation();
2080 if (r.IsSmiOrInteger32()) {
2081 Register reg = ToRegister(instr->value());
2082 __ test(reg, Operand(reg));
2083 EmitBranch(instr, not_zero);
2084 } else if (r.IsDouble()) {
2085 DCHECK(!info()->IsStub());
2086 XMMRegister reg = ToDoubleRegister(instr->value());
2087 XMMRegister xmm_scratch = double_scratch0();
2088 __ xorps(xmm_scratch, xmm_scratch);
2089 __ ucomisd(reg, xmm_scratch);
2090 EmitBranch(instr, not_equal);
2091 } else {
2092 DCHECK(r.IsTagged());
2093 Register reg = ToRegister(instr->value());
2094 HType type = instr->hydrogen()->value()->type();
2095 if (type.IsBoolean()) {
2096 DCHECK(!info()->IsStub());
2097 __ cmp(reg, factory()->true_value());
2098 EmitBranch(instr, equal);
2099 } else if (type.IsSmi()) {
2100 DCHECK(!info()->IsStub());
2101 __ test(reg, Operand(reg));
2102 EmitBranch(instr, not_equal);
2103 } else if (type.IsJSArray()) {
2104 DCHECK(!info()->IsStub());
2105 EmitBranch(instr, no_condition);
2106 } else if (type.IsHeapNumber()) {
2107 DCHECK(!info()->IsStub());
2108 XMMRegister xmm_scratch = double_scratch0();
2109 __ xorps(xmm_scratch, xmm_scratch);
2110 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2111 EmitBranch(instr, not_equal);
2112 } else if (type.IsString()) {
2113 DCHECK(!info()->IsStub());
2114 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2115 EmitBranch(instr, not_equal);
2116 } else {
2117 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2118 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2119
2120 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2121 // undefined -> false.
2122 __ cmp(reg, factory()->undefined_value());
2123 __ j(equal, instr->FalseLabel(chunk_));
2124 }
2125 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2126 // true -> true.
2127 __ cmp(reg, factory()->true_value());
2128 __ j(equal, instr->TrueLabel(chunk_));
2129 // false -> false.
2130 __ cmp(reg, factory()->false_value());
2131 __ j(equal, instr->FalseLabel(chunk_));
2132 }
2133 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2134 // 'null' -> false.
2135 __ cmp(reg, factory()->null_value());
2136 __ j(equal, instr->FalseLabel(chunk_));
2137 }
2138
2139 if (expected.Contains(ToBooleanStub::SMI)) {
2140 // Smis: 0 -> false, all other -> true.
2141 __ test(reg, Operand(reg));
2142 __ j(equal, instr->FalseLabel(chunk_));
2143 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2144 } else if (expected.NeedsMap()) {
2145 // If we need a map later and have a Smi -> deopt.
2146 __ test(reg, Immediate(kSmiTagMask));
2147 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2148 }
2149
2150 Register map = no_reg; // Keep the compiler happy.
2151 if (expected.NeedsMap()) {
2152 map = ToRegister(instr->temp());
2153 DCHECK(!map.is(reg));
2154 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2155
2156 if (expected.CanBeUndetectable()) {
2157 // Undetectable -> false.
2158 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2159 1 << Map::kIsUndetectable);
2160 __ j(not_zero, instr->FalseLabel(chunk_));
2161 }
2162 }
2163
2164 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2165 // spec object -> true.
2166 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2167 __ j(above_equal, instr->TrueLabel(chunk_));
2168 }
2169
2170 if (expected.Contains(ToBooleanStub::STRING)) {
2171 // String value -> false iff empty.
2172 Label not_string;
2173 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2174 __ j(above_equal, &not_string, Label::kNear);
2175 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2176 __ j(not_zero, instr->TrueLabel(chunk_));
2177 __ jmp(instr->FalseLabel(chunk_));
2178 __ bind(&not_string);
2179 }
2180
2181 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2182 // Symbol value -> true.
2183 __ CmpInstanceType(map, SYMBOL_TYPE);
2184 __ j(equal, instr->TrueLabel(chunk_));
2185 }
2186
2187 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2188 // SIMD value -> true.
2189 __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
2190 __ j(equal, instr->TrueLabel(chunk_));
2191 }
2192
2193 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2194 // heap number -> false iff +0, -0, or NaN.
2195 Label not_heap_number;
2196 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2197 factory()->heap_number_map());
2198 __ j(not_equal, &not_heap_number, Label::kNear);
2199 XMMRegister xmm_scratch = double_scratch0();
2200 __ xorps(xmm_scratch, xmm_scratch);
2201 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2202 __ j(zero, instr->FalseLabel(chunk_));
2203 __ jmp(instr->TrueLabel(chunk_));
2204 __ bind(&not_heap_number);
2205 }
2206
2207 if (!expected.IsGeneric()) {
2208 // We've seen something for the first time -> deopt.
2209 // This can only happen if we are not generic already.
2210 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2211 }
2212 }
2213 }
2214 }
2215
2216
2217 void LCodeGen::EmitGoto(int block) {
2218 if (!IsNextEmittedBlock(block)) {
2219 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2220 }
2221 }
2222
2223
2224 void LCodeGen::DoGoto(LGoto* instr) {
2225 EmitGoto(instr->block_id());
2226 }
2227
2228
2229 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2230 Condition cond = no_condition;
2231 switch (op) {
2232 case Token::EQ:
2233 case Token::EQ_STRICT:
2234 cond = equal;
2235 break;
2236 case Token::NE:
2237 case Token::NE_STRICT:
2238 cond = not_equal;
2239 break;
2240 case Token::LT:
2241 cond = is_unsigned ? below : less;
2242 break;
2243 case Token::GT:
2244 cond = is_unsigned ? above : greater;
2245 break;
2246 case Token::LTE:
2247 cond = is_unsigned ? below_equal : less_equal;
2248 break;
2249 case Token::GTE:
2250 cond = is_unsigned ? above_equal : greater_equal;
2251 break;
2252 case Token::IN:
2253 case Token::INSTANCEOF:
2254 default:
2255 UNREACHABLE();
2256 }
2257 return cond;
2258 }
2259
2260
2261 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2262 LOperand* left = instr->left();
2263 LOperand* right = instr->right();
2264 bool is_unsigned =
2265 instr->is_double() ||
2266 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2267 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2268 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2269
2270 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2271 // We can statically evaluate the comparison.
2272 double left_val = ToDouble(LConstantOperand::cast(left));
2273 double right_val = ToDouble(LConstantOperand::cast(right));
2274 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2275 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2276 EmitGoto(next_block);
2277 } else {
2278 if (instr->is_double()) {
2279 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2280 // Don't base result on EFLAGS when a NaN is involved. Instead
2281 // jump to the false block.
2282 __ j(parity_even, instr->FalseLabel(chunk_));
2283 } else {
2284 if (right->IsConstantOperand()) {
2285 __ cmp(ToOperand(left),
2286 ToImmediate(right, instr->hydrogen()->representation()));
2287 } else if (left->IsConstantOperand()) {
2288 __ cmp(ToOperand(right),
2289 ToImmediate(left, instr->hydrogen()->representation()));
2290 // We commuted the operands, so commute the condition.
2291 cc = CommuteCondition(cc);
2292 } else {
2293 __ cmp(ToRegister(left), ToOperand(right));
2294 }
2295 }
2296 EmitBranch(instr, cc);
2297 }
2298 }
2299
2300
2301 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2302 Register left = ToRegister(instr->left());
2303
2304 if (instr->right()->IsConstantOperand()) {
2305 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2306 __ CmpObject(left, right);
2307 } else {
2308 Operand right = ToOperand(instr->right());
2309 __ cmp(left, right);
2310 }
2311 EmitBranch(instr, equal);
2312 }
2313
2314
2315 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2316 if (instr->hydrogen()->representation().IsTagged()) {
2317 Register input_reg = ToRegister(instr->object());
2318 __ cmp(input_reg, factory()->the_hole_value());
2319 EmitBranch(instr, equal);
2320 return;
2321 }
2322
2323 XMMRegister input_reg = ToDoubleRegister(instr->object());
2324 __ ucomisd(input_reg, input_reg);
2325 EmitFalseBranch(instr, parity_odd);
2326
2327 __ sub(esp, Immediate(kDoubleSize));
2328 __ movsd(MemOperand(esp, 0), input_reg);
2329
2330 __ add(esp, Immediate(kDoubleSize));
2331 int offset = sizeof(kHoleNanUpper32);
2332 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2333 EmitBranch(instr, equal);
2334 }
2335
2336
2337 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2338 Representation rep = instr->hydrogen()->value()->representation();
2339 DCHECK(!rep.IsInteger32());
2340 Register scratch = ToRegister(instr->temp());
2341
2342 if (rep.IsDouble()) {
2343 XMMRegister value = ToDoubleRegister(instr->value());
2344 XMMRegister xmm_scratch = double_scratch0();
2345 __ xorps(xmm_scratch, xmm_scratch);
2346 __ ucomisd(xmm_scratch, value);
2347 EmitFalseBranch(instr, not_equal);
2348 __ movmskpd(scratch, value);
2349 __ test(scratch, Immediate(1));
2350 EmitBranch(instr, not_zero);
2351 } else {
2352 Register value = ToRegister(instr->value());
2353 Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2354 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2355 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2356 Immediate(0x1));
2357 EmitFalseBranch(instr, no_overflow);
2358 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2359 Immediate(0x00000000));
2360 EmitBranch(instr, equal);
2361 }
2362 }
2363
2364
2365 Condition LCodeGen::EmitIsString(Register input,
2366 Register temp1,
2367 Label* is_not_string,
2368 SmiCheck check_needed = INLINE_SMI_CHECK) {
2369 if (check_needed == INLINE_SMI_CHECK) {
2370 __ JumpIfSmi(input, is_not_string);
2371 }
2372
2373 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2374
2375 return cond;
2376 }
2377
2378
2379 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2380 Register reg = ToRegister(instr->value());
2381 Register temp = ToRegister(instr->temp());
2382
2383 SmiCheck check_needed =
2384 instr->hydrogen()->value()->type().IsHeapObject()
2385 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2386
2387 Condition true_cond = EmitIsString(
2388 reg, temp, instr->FalseLabel(chunk_), check_needed);
2389
2390 EmitBranch(instr, true_cond);
2391 }
2392
2393
2394 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2395 Operand input = ToOperand(instr->value());
2396
2397 __ test(input, Immediate(kSmiTagMask));
2398 EmitBranch(instr, zero);
2399 }
2400
2401
2402 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2403 Register input = ToRegister(instr->value());
2404 Register temp = ToRegister(instr->temp());
2405
2406 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2407 STATIC_ASSERT(kSmiTag == 0);
2408 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2409 }
2410 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2411 __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2412 1 << Map::kIsUndetectable);
2413 EmitBranch(instr, not_zero);
2414 }
2415
2416
2417 static Condition ComputeCompareCondition(Token::Value op) {
2418 switch (op) {
2419 case Token::EQ_STRICT:
2420 case Token::EQ:
2421 return equal;
2422 case Token::LT:
2423 return less;
2424 case Token::GT:
2425 return greater;
2426 case Token::LTE:
2427 return less_equal;
2428 case Token::GTE:
2429 return greater_equal;
2430 default:
2431 UNREACHABLE();
2432 return no_condition;
2433 }
2434 }
2435
2436
2437 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2438 DCHECK(ToRegister(instr->context()).is(esi));
2439 DCHECK(ToRegister(instr->left()).is(edx));
2440 DCHECK(ToRegister(instr->right()).is(eax));
2441
2442 Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2443 CallCode(code, RelocInfo::CODE_TARGET, instr);
2444 __ test(eax, eax);
2445
2446 EmitBranch(instr, ComputeCompareCondition(instr->op()));
2447 }
2448
2449
2450 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2451 InstanceType from = instr->from();
2452 InstanceType to = instr->to();
2453 if (from == FIRST_TYPE) return to;
2454 DCHECK(from == to || to == LAST_TYPE);
2455 return from;
2456 }
2457
2458
2459 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2460 InstanceType from = instr->from();
2461 InstanceType to = instr->to();
2462 if (from == to) return equal;
2463 if (to == LAST_TYPE) return above_equal;
2464 if (from == FIRST_TYPE) return below_equal;
2465 UNREACHABLE();
2466 return equal;
2467 }
2468
2469
2470 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2471 Register input = ToRegister(instr->value());
2472 Register temp = ToRegister(instr->temp());
2473
2474 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2475 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2476 }
2477
2478 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2479 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2480 }
2481
2482
2483 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2484 Register input = ToRegister(instr->value());
2485 Register result = ToRegister(instr->result());
2486
2487 __ AssertString(input);
2488
2489 __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2490 __ IndexFromHash(result, result);
2491 }
2492
2493
2494 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2495 LHasCachedArrayIndexAndBranch* instr) {
2496 Register input = ToRegister(instr->value());
2497
2498 __ test(FieldOperand(input, String::kHashFieldOffset),
2499 Immediate(String::kContainsCachedArrayIndexMask));
2500 EmitBranch(instr, equal);
2501 }
2502
2503
2504 // Branches to a label or falls through with the answer in the z flag. Trashes
2505 // the temp registers, but not the input.
2506 void LCodeGen::EmitClassOfTest(Label* is_true,
2507 Label* is_false,
2508 Handle<String>class_name,
2509 Register input,
2510 Register temp,
2511 Register temp2) {
2512 DCHECK(!input.is(temp));
2513 DCHECK(!input.is(temp2));
2514 DCHECK(!temp.is(temp2));
2515 __ JumpIfSmi(input, is_false);
2516
2517 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2518 // Assuming the following assertions, we can use the same compares to test
2519 // for both being a function type and being in the object type range.
2520 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2521 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2522 FIRST_SPEC_OBJECT_TYPE + 1);
2523 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2524 LAST_SPEC_OBJECT_TYPE - 1);
2525 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2526 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2527 __ j(below, is_false);
2528 __ j(equal, is_true);
2529 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2530 __ j(equal, is_true);
2531 } else {
2532 // Faster code path to avoid two compares: subtract lower bound from the
2533 // actual type and do a signed compare with the width of the type range.
2534 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2535 __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2536 __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2537 __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2538 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2539 __ j(above, is_false);
2540 }
2541
2542 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2543 // Check if the constructor in the map is a function.
2544 __ GetMapConstructor(temp, temp, temp2);
2545 // Objects with a non-function constructor have class 'Object'.
2546 __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
2547 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2548 __ j(not_equal, is_true);
2549 } else {
2550 __ j(not_equal, is_false);
2551 }
2552
2553 // temp now contains the constructor function. Grab the
2554 // instance class name from there.
2555 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2556 __ mov(temp, FieldOperand(temp,
2557 SharedFunctionInfo::kInstanceClassNameOffset));
2558 // The class name we are testing against is internalized since it's a literal.
2559 // The name in the constructor is internalized because of the way the context
2560 // is booted. This routine isn't expected to work for random API-created
2561 // classes and it doesn't have to because you can't access it with natives
2562 // syntax. Since both sides are internalized it is sufficient to use an
2563 // identity comparison.
2564 __ cmp(temp, class_name);
2565 // End with the answer in the z flag.
2566 }
2567
2568
2569 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2570 Register input = ToRegister(instr->value());
2571 Register temp = ToRegister(instr->temp());
2572 Register temp2 = ToRegister(instr->temp2());
2573
2574 Handle<String> class_name = instr->hydrogen()->class_name();
2575
2576 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2577 class_name, input, temp, temp2);
2578
2579 EmitBranch(instr, equal);
2580 }
2581
2582
2583 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2584 Register reg = ToRegister(instr->value());
2585 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2586 EmitBranch(instr, equal);
2587 }
2588
2589
2590 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2591 DCHECK(ToRegister(instr->context()).is(esi));
2592 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2593 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2594 DCHECK(ToRegister(instr->result()).is(eax));
2595 InstanceOfStub stub(isolate());
2596 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2597 }
2598
2599
2600 void LCodeGen::DoHasInPrototypeChainAndBranch(
2601 LHasInPrototypeChainAndBranch* instr) {
2602 Register const object = ToRegister(instr->object());
2603 Register const object_map = ToRegister(instr->scratch());
2604 Register const object_prototype = object_map;
2605 Register const prototype = ToRegister(instr->prototype());
2606
2607 // The {object} must be a spec object. It's sufficient to know that {object}
2608 // is not a smi, since all other non-spec objects have {null} prototypes and
2609 // will be ruled out below.
2610 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2611 __ test(object, Immediate(kSmiTagMask));
2612 EmitFalseBranch(instr, zero);
2613 }
2614
2615 // Loop through the {object}s prototype chain looking for the {prototype}.
2616 __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
2617 Label loop;
2618 __ bind(&loop);
2619 __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2620 __ cmp(object_prototype, prototype);
2621 EmitTrueBranch(instr, equal);
2622 __ cmp(object_prototype, factory()->null_value());
2623 EmitFalseBranch(instr, equal);
2624 __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2625 __ jmp(&loop);
2626 }
2627
2628
2629 void LCodeGen::DoCmpT(LCmpT* instr) {
2630 Token::Value op = instr->op();
2631
2632 Handle<Code> ic =
2633 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2634 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2635
2636 Condition condition = ComputeCompareCondition(op);
2637 Label true_value, done;
2638 __ test(eax, Operand(eax));
2639 __ j(condition, &true_value, Label::kNear);
2640 __ mov(ToRegister(instr->result()), factory()->false_value());
2641 __ jmp(&done, Label::kNear);
2642 __ bind(&true_value);
2643 __ mov(ToRegister(instr->result()), factory()->true_value());
2644 __ bind(&done);
2645 }
2646
2647
2648 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
2649 int extra_value_count = dynamic_frame_alignment ? 2 : 1;
2650
2651 if (instr->has_constant_parameter_count()) {
2652 int parameter_count = ToInteger32(instr->constant_parameter_count());
2653 if (dynamic_frame_alignment && FLAG_debug_code) {
2654 __ cmp(Operand(esp,
2655 (parameter_count + extra_value_count) * kPointerSize),
2656 Immediate(kAlignmentZapValue));
2657 __ Assert(equal, kExpectedAlignmentMarker);
2658 }
2659 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2660 } else {
2661 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2662 Register reg = ToRegister(instr->parameter_count());
2663 // The argument count parameter is a smi
2664 __ SmiUntag(reg);
2665 Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2666 if (dynamic_frame_alignment && FLAG_debug_code) {
2667 DCHECK(extra_value_count == 2);
2668 __ cmp(Operand(esp, reg, times_pointer_size,
2669 extra_value_count * kPointerSize),
2670 Immediate(kAlignmentZapValue));
2671 __ Assert(equal, kExpectedAlignmentMarker);
2672 }
2673
2674 // emit code to restore stack based on instr->parameter_count()
2675 __ pop(return_addr_reg); // save return address
2676 if (dynamic_frame_alignment) {
2677 __ inc(reg); // 1 more for alignment
2678 }
2679
2680 __ shl(reg, kPointerSizeLog2);
2681 __ add(esp, reg);
2682 __ jmp(return_addr_reg);
2683 }
2684 }
2685
2686
2687 void LCodeGen::DoReturn(LReturn* instr) {
2688 if (FLAG_trace && info()->IsOptimizing()) {
2689 // Preserve the return value on the stack and rely on the runtime call
2690 // to return the value in the same register. We're leaving the code
2691 // managed by the register allocator and tearing down the frame, it's
2692 // safe to write to the context register.
2693 __ push(eax);
2694 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2695 __ CallRuntime(Runtime::kTraceExit, 1);
2696 }
2697 if (info()->saves_caller_doubles()) RestoreCallerDoubles();
2698 if (dynamic_frame_alignment_) {
2699 // Fetch the state of the dynamic frame alignment.
2700 __ mov(edx, Operand(ebp,
2701 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2702 }
2703 if (NeedsEagerFrame()) {
2704 __ mov(esp, ebp);
2705 __ pop(ebp);
2706 }
2707 if (dynamic_frame_alignment_) {
2708 Label no_padding;
2709 __ cmp(edx, Immediate(kNoAlignmentPadding));
2710 __ j(equal, &no_padding, Label::kNear);
2711
2712 EmitReturn(instr, true);
2713 __ bind(&no_padding);
2714 }
2715
2716 EmitReturn(instr, false);
2717 }
2718
2719
2720 template <class T>
2721 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2722 Register vector_register = ToRegister(instr->temp_vector());
2723 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2724 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2725 DCHECK(slot_register.is(eax));
2726
2727 AllowDeferredHandleDereference vector_structure_check;
2728 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2729 __ mov(vector_register, vector);
2730 // No need to allocate this register.
2731 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2732 int index = vector->GetIndex(slot);
2733 __ mov(slot_register, Immediate(Smi::FromInt(index)));
2734 }
2735
2736
2737 template <class T>
2738 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2739 Register vector_register = ToRegister(instr->temp_vector());
2740 Register slot_register = ToRegister(instr->temp_slot());
2741
2742 AllowDeferredHandleDereference vector_structure_check;
2743 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2744 __ mov(vector_register, vector);
2745 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2746 int index = vector->GetIndex(slot);
2747 __ mov(slot_register, Immediate(Smi::FromInt(index)));
2748 }
2749
2750
2751 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2752 DCHECK(ToRegister(instr->context()).is(esi));
2753 DCHECK(ToRegister(instr->global_object())
2754 .is(LoadDescriptor::ReceiverRegister()));
2755 DCHECK(ToRegister(instr->result()).is(eax));
2756
2757 __ mov(LoadDescriptor::NameRegister(), instr->name());
2758 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2759 Handle<Code> ic =
2760 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2761 SLOPPY, PREMONOMORPHIC).code();
2762 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2763 }
2764
2765
2766 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
2767 DCHECK(ToRegister(instr->context()).is(esi));
2768 DCHECK(ToRegister(instr->result()).is(eax));
2769
2770 int const slot = instr->slot_index();
2771 int const depth = instr->depth();
2772 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
2773 __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
2774 Handle<Code> stub =
2775 CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
2776 CallCode(stub, RelocInfo::CODE_TARGET, instr);
2777 } else {
2778 __ Push(Smi::FromInt(slot));
2779 __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
2780 }
2781 }
2782
2783
2784 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2785 Register context = ToRegister(instr->context());
2786 Register result = ToRegister(instr->result());
2787 __ mov(result, ContextOperand(context, instr->slot_index()));
2788
2789 if (instr->hydrogen()->RequiresHoleCheck()) {
2790 __ cmp(result, factory()->the_hole_value());
2791 if (instr->hydrogen()->DeoptimizesOnHole()) {
2792 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2793 } else {
2794 Label is_not_hole;
2795 __ j(not_equal, &is_not_hole, Label::kNear);
2796 __ mov(result, factory()->undefined_value());
2797 __ bind(&is_not_hole);
2798 }
2799 }
2800 }
2801
2802
2803 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2804 Register context = ToRegister(instr->context());
2805 Register value = ToRegister(instr->value());
2806
2807 Label skip_assignment;
2808
2809 Operand target = ContextOperand(context, instr->slot_index());
2810 if (instr->hydrogen()->RequiresHoleCheck()) {
2811 __ cmp(target, factory()->the_hole_value());
2812 if (instr->hydrogen()->DeoptimizesOnHole()) {
2813 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2814 } else {
2815 __ j(not_equal, &skip_assignment, Label::kNear);
2816 }
2817 }
2818
2819 __ mov(target, value);
2820 if (instr->hydrogen()->NeedsWriteBarrier()) {
2821 SmiCheck check_needed =
2822 instr->hydrogen()->value()->type().IsHeapObject()
2823 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2824 Register temp = ToRegister(instr->temp());
2825 int offset = Context::SlotOffset(instr->slot_index());
2826 __ RecordWriteContextSlot(context,
2827 offset,
2828 value,
2829 temp,
2830 kSaveFPRegs,
2831 EMIT_REMEMBERED_SET,
2832 check_needed);
2833 }
2834
2835 __ bind(&skip_assignment);
2836 }
2837
2838
2839 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2840 HObjectAccess access = instr->hydrogen()->access();
2841 int offset = access.offset();
2842
2843 if (access.IsExternalMemory()) {
2844 Register result = ToRegister(instr->result());
2845 MemOperand operand = instr->object()->IsConstantOperand()
2846 ? MemOperand::StaticVariable(ToExternalReference(
2847 LConstantOperand::cast(instr->object())))
2848 : MemOperand(ToRegister(instr->object()), offset);
2849 __ Load(result, operand, access.representation());
2850 return;
2851 }
2852
2853 Register object = ToRegister(instr->object());
2854 if (instr->hydrogen()->representation().IsDouble()) {
2855 XMMRegister result = ToDoubleRegister(instr->result());
2856 __ movsd(result, FieldOperand(object, offset));
2857 return;
2858 }
2859
2860 Register result = ToRegister(instr->result());
2861 if (!access.IsInobject()) {
2862 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2863 object = result;
2864 }
2865 __ Load(result, FieldOperand(object, offset), access.representation());
2866 }
2867
2868
2869 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2870 DCHECK(!operand->IsDoubleRegister());
2871 if (operand->IsConstantOperand()) {
2872 Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
2873 AllowDeferredHandleDereference smi_check;
2874 if (object->IsSmi()) {
2875 __ Push(Handle<Smi>::cast(object));
2876 } else {
2877 __ PushHeapObject(Handle<HeapObject>::cast(object));
2878 }
2879 } else if (operand->IsRegister()) {
2880 __ push(ToRegister(operand));
2881 } else {
2882 __ push(ToOperand(operand));
2883 }
2884 }
2885
2886
2887 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2888 DCHECK(ToRegister(instr->context()).is(esi));
2889 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2890 DCHECK(ToRegister(instr->result()).is(eax));
2891
2892 __ mov(LoadDescriptor::NameRegister(), instr->name());
2893 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2894 Handle<Code> ic =
2895 CodeFactory::LoadICInOptimizedCode(
2896 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
2897 instr->hydrogen()->initialization_state()).code();
2898 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2899 }
2900
2901
2902 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2903 Register function = ToRegister(instr->function());
2904 Register temp = ToRegister(instr->temp());
2905 Register result = ToRegister(instr->result());
2906
2907 // Get the prototype or initial map from the function.
2908 __ mov(result,
2909 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2910
2911 // Check that the function has a prototype or an initial map.
2912 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2913 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2914
2915 // If the function does not have an initial map, we're done.
2916 Label done;
2917 __ CmpObjectType(result, MAP_TYPE, temp);
2918 __ j(not_equal, &done, Label::kNear);
2919
2920 // Get the prototype from the initial map.
2921 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
2922
2923 // All done.
2924 __ bind(&done);
2925 }
2926
2927
2928 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2929 Register result = ToRegister(instr->result());
2930 __ LoadRoot(result, instr->index());
2931 }
2932
2933
2934 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2935 Register arguments = ToRegister(instr->arguments());
2936 Register result = ToRegister(instr->result());
2937 if (instr->length()->IsConstantOperand() &&
2938 instr->index()->IsConstantOperand()) {
2939 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2940 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2941 int index = (const_length - const_index) + 1;
2942 __ mov(result, Operand(arguments, index * kPointerSize));
2943 } else {
2944 Register length = ToRegister(instr->length());
2945 Operand index = ToOperand(instr->index());
2946 // There are two words between the frame pointer and the last argument.
2947 // Subtracting from length accounts for one of them add one more.
2948 __ sub(length, index);
2949 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2950 }
2951 }
2952
2953
2954 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2955 ElementsKind elements_kind = instr->elements_kind();
2956 LOperand* key = instr->key();
2957 if (!key->IsConstantOperand() &&
2958 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
2959 elements_kind)) {
2960 __ SmiUntag(ToRegister(key));
2961 }
2962 Operand operand(BuildFastArrayOperand(
2963 instr->elements(),
2964 key,
2965 instr->hydrogen()->key()->representation(),
2966 elements_kind,
2967 instr->base_offset()));
2968 if (elements_kind == FLOAT32_ELEMENTS) {
2969 XMMRegister result(ToDoubleRegister(instr->result()));
2970 __ movss(result, operand);
2971 __ cvtss2sd(result, result);
2972 } else if (elements_kind == FLOAT64_ELEMENTS) {
2973 __ movsd(ToDoubleRegister(instr->result()), operand);
2974 } else {
2975 Register result(ToRegister(instr->result()));
2976 switch (elements_kind) {
2977 case INT8_ELEMENTS:
2978 __ movsx_b(result, operand);
2979 break;
2980 case UINT8_ELEMENTS:
2981 case UINT8_CLAMPED_ELEMENTS:
2982 __ movzx_b(result, operand);
2983 break;
2984 case INT16_ELEMENTS:
2985 __ movsx_w(result, operand);
2986 break;
2987 case UINT16_ELEMENTS:
2988 __ movzx_w(result, operand);
2989 break;
2990 case INT32_ELEMENTS:
2991 __ mov(result, operand);
2992 break;
2993 case UINT32_ELEMENTS:
2994 __ mov(result, operand);
2995 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2996 __ test(result, Operand(result));
2997 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
2998 }
2999 break;
3000 case FLOAT32_ELEMENTS:
3001 case FLOAT64_ELEMENTS:
3002 case FAST_SMI_ELEMENTS:
3003 case FAST_ELEMENTS:
3004 case FAST_DOUBLE_ELEMENTS:
3005 case FAST_HOLEY_SMI_ELEMENTS:
3006 case FAST_HOLEY_ELEMENTS:
3007 case FAST_HOLEY_DOUBLE_ELEMENTS:
3008 case DICTIONARY_ELEMENTS:
3009 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3010 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3011 UNREACHABLE();
3012 break;
3013 }
3014 }
3015 }
3016
3017
3018 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3019 if (instr->hydrogen()->RequiresHoleCheck()) {
3020 Operand hole_check_operand = BuildFastArrayOperand(
3021 instr->elements(), instr->key(),
3022 instr->hydrogen()->key()->representation(),
3023 FAST_DOUBLE_ELEMENTS,
3024 instr->base_offset() + sizeof(kHoleNanLower32));
3025 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3026 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3027 }
3028
3029 Operand double_load_operand = BuildFastArrayOperand(
3030 instr->elements(),
3031 instr->key(),
3032 instr->hydrogen()->key()->representation(),
3033 FAST_DOUBLE_ELEMENTS,
3034 instr->base_offset());
3035 XMMRegister result = ToDoubleRegister(instr->result());
3036 __ movsd(result, double_load_operand);
3037 }
3038
3039
3040 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3041 Register result = ToRegister(instr->result());
3042
3043 // Load the result.
3044 __ mov(result,
3045 BuildFastArrayOperand(instr->elements(), instr->key(),
3046 instr->hydrogen()->key()->representation(),
3047 FAST_ELEMENTS, instr->base_offset()));
3048
3049 // Check for the hole value.
3050 if (instr->hydrogen()->RequiresHoleCheck()) {
3051 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3052 __ test(result, Immediate(kSmiTagMask));
3053 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
3054 } else {
3055 __ cmp(result, factory()->the_hole_value());
3056 DeoptimizeIf(equal, instr, Deoptimizer::kHole);
3057 }
3058 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3059 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3060 Label done;
3061 __ cmp(result, factory()->the_hole_value());
3062 __ j(not_equal, &done);
3063 if (info()->IsStub()) {
3064 // A stub can safely convert the hole to undefined only if the array
3065 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3066 // it needs to bail out.
3067 __ mov(result, isolate()->factory()->array_protector());
3068 __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
3069 Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
3070 DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
3071 }
3072 __ mov(result, isolate()->factory()->undefined_value());
3073 __ bind(&done);
3074 }
3075 }
3076
3077
3078 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3079 if (instr->is_fixed_typed_array()) {
3080 DoLoadKeyedExternalArray(instr);
3081 } else if (instr->hydrogen()->representation().IsDouble()) {
3082 DoLoadKeyedFixedDoubleArray(instr);
3083 } else {
3084 DoLoadKeyedFixedArray(instr);
3085 }
3086 }
3087
3088
3089 Operand LCodeGen::BuildFastArrayOperand(
3090 LOperand* elements_pointer,
3091 LOperand* key,
3092 Representation key_representation,
3093 ElementsKind elements_kind,
3094 uint32_t base_offset) {
3095 Register elements_pointer_reg = ToRegister(elements_pointer);
3096 int element_shift_size = ElementsKindToShiftSize(elements_kind);
3097 int shift_size = element_shift_size;
3098 if (key->IsConstantOperand()) {
3099 int constant_value = ToInteger32(LConstantOperand::cast(key));
3100 if (constant_value & 0xF0000000) {
3101 Abort(kArrayIndexConstantValueTooBig);
3102 }
3103 return Operand(elements_pointer_reg,
3104 ((constant_value) << shift_size)
3105 + base_offset);
3106 } else {
3107 // Take the tag bit into account while computing the shift size.
3108 if (key_representation.IsSmi() && (shift_size >= 1)) {
3109 shift_size -= kSmiTagSize;
3110 }
3111 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3112 return Operand(elements_pointer_reg,
3113 ToRegister(key),
3114 scale_factor,
3115 base_offset);
3116 }
3117 }
3118
3119
3120 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3121 DCHECK(ToRegister(instr->context()).is(esi));
3122 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3123 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3124
3125 if (instr->hydrogen()->HasVectorAndSlot()) {
3126 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3127 }
3128
3129 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3130 isolate(), instr->hydrogen()->language_mode(),
3131 instr->hydrogen()->initialization_state()).code();
3132 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3133 }
3134
3135
3136 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3137 Register result = ToRegister(instr->result());
3138
3139 if (instr->hydrogen()->from_inlined()) {
3140 __ lea(result, Operand(esp, -2 * kPointerSize));
3141 } else {
3142 // Check for arguments adapter frame.
3143 Label done, adapted;
3144 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3145 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3146 __ cmp(Operand(result),
3147 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3148 __ j(equal, &adapted, Label::kNear);
3149
3150 // No arguments adaptor frame.
3151 __ mov(result, Operand(ebp));
3152 __ jmp(&done, Label::kNear);
3153
3154 // Arguments adaptor frame present.
3155 __ bind(&adapted);
3156 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3157
3158 // Result is the frame pointer for the frame if not adapted and for the real
3159 // frame below the adaptor frame if adapted.
3160 __ bind(&done);
3161 }
3162 }
3163
3164
3165 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3166 Operand elem = ToOperand(instr->elements());
3167 Register result = ToRegister(instr->result());
3168
3169 Label done;
3170
3171 // If no arguments adaptor frame the number of arguments is fixed.
3172 __ cmp(ebp, elem);
3173 __ mov(result, Immediate(scope()->num_parameters()));
3174 __ j(equal, &done, Label::kNear);
3175
3176 // Arguments adaptor frame present. Get argument length from there.
3177 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3178 __ mov(result, Operand(result,
3179 ArgumentsAdaptorFrameConstants::kLengthOffset));
3180 __ SmiUntag(result);
3181
3182 // Argument length is in result register.
3183 __ bind(&done);
3184 }
3185
3186
3187 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3188 Register receiver = ToRegister(instr->receiver());
3189 Register function = ToRegister(instr->function());
3190
3191 // If the receiver is null or undefined, we have to pass the global
3192 // object as a receiver to normal functions. Values have to be
3193 // passed unchanged to builtins and strict-mode functions.
3194 Label receiver_ok, global_object;
3195 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3196 Register scratch = ToRegister(instr->temp());
3197
3198 if (!instr->hydrogen()->known_function()) {
3199 // Do not transform the receiver to object for strict mode
3200 // functions.
3201 __ mov(scratch,
3202 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3203 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3204 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3205 __ j(not_equal, &receiver_ok, dist);
3206
3207 // Do not transform the receiver to object for builtins.
3208 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3209 1 << SharedFunctionInfo::kNativeBitWithinByte);
3210 __ j(not_equal, &receiver_ok, dist);
3211 }
3212
3213 // Normal function. Replace undefined or null with global receiver.
3214 __ cmp(receiver, factory()->null_value());
3215 __ j(equal, &global_object, Label::kNear);
3216 __ cmp(receiver, factory()->undefined_value());
3217 __ j(equal, &global_object, Label::kNear);
3218
3219 // The receiver should be a JS object.
3220 __ test(receiver, Immediate(kSmiTagMask));
3221 DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
3222 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3223 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3224
3225 __ jmp(&receiver_ok, Label::kNear);
3226 __ bind(&global_object);
3227 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3228 const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
3229 __ mov(receiver, Operand(receiver, global_offset));
3230 const int proxy_offset = GlobalObject::kGlobalProxyOffset;
3231 __ mov(receiver, FieldOperand(receiver, proxy_offset));
3232 __ bind(&receiver_ok);
3233 }
3234
3235
3236 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3237 Register receiver = ToRegister(instr->receiver());
3238 Register function = ToRegister(instr->function());
3239 Register length = ToRegister(instr->length());
3240 Register elements = ToRegister(instr->elements());
3241 DCHECK(receiver.is(eax)); // Used for parameter count.
3242 DCHECK(function.is(edi)); // Required by InvokeFunction.
3243 DCHECK(ToRegister(instr->result()).is(eax));
3244
3245 // Copy the arguments to this function possibly from the
3246 // adaptor frame below it.
3247 const uint32_t kArgumentsLimit = 1 * KB;
3248 __ cmp(length, kArgumentsLimit);
3249 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3250
3251 __ push(receiver);
3252 __ mov(receiver, length);
3253
3254 // Loop through the arguments pushing them onto the execution
3255 // stack.
3256 Label invoke, loop;
3257 // length is a small non-negative integer, due to the test above.
3258 __ test(length, Operand(length));
3259 __ j(zero, &invoke, Label::kNear);
3260 __ bind(&loop);
3261 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3262 __ dec(length);
3263 __ j(not_zero, &loop);
3264
3265 // Invoke the function.
3266 __ bind(&invoke);
3267 DCHECK(instr->HasPointerMap());
3268 LPointerMap* pointers = instr->pointer_map();
3269 SafepointGenerator safepoint_generator(
3270 this, pointers, Safepoint::kLazyDeopt);
3271 ParameterCount actual(eax);
3272 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3273 }
3274
3275
3276 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3277 __ int3();
3278 }
3279
3280
3281 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3282 LOperand* argument = instr->value();
3283 EmitPushTaggedOperand(argument);
3284 }
3285
3286
3287 void LCodeGen::DoDrop(LDrop* instr) {
3288 __ Drop(instr->count());
3289 }
3290
3291
3292 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3293 Register result = ToRegister(instr->result());
3294 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3295 }
3296
3297
3298 void LCodeGen::DoContext(LContext* instr) {
3299 Register result = ToRegister(instr->result());
3300 if (info()->IsOptimizing()) {
3301 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3302 } else {
3303 // If there is no frame, the context must be in esi.
3304 DCHECK(result.is(esi));
3305 }
3306 }
3307
3308
3309 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3310 DCHECK(ToRegister(instr->context()).is(esi));
3311 __ push(Immediate(instr->hydrogen()->pairs()));
3312 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3313 CallRuntime(Runtime::kDeclareGlobals, 2, instr);
3314 }
3315
3316
3317 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3318 int formal_parameter_count, int arity,
3319 LInstruction* instr) {
3320 bool dont_adapt_arguments =
3321 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3322 bool can_invoke_directly =
3323 dont_adapt_arguments || formal_parameter_count == arity;
3324
3325 Register function_reg = edi;
3326
3327 if (can_invoke_directly) {
3328 // Change context.
3329 __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
3330
3331 // Always initialize eax to the number of actual arguments.
3332 __ mov(eax, arity);
3333
3334 // Invoke function directly.
3335 if (function.is_identical_to(info()->closure())) {
3336 __ CallSelf();
3337 } else {
3338 __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
3339 }
3340 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3341 } else {
3342 // We need to adapt arguments.
3343 LPointerMap* pointers = instr->pointer_map();
3344 SafepointGenerator generator(
3345 this, pointers, Safepoint::kLazyDeopt);
3346 ParameterCount count(arity);
3347 ParameterCount expected(formal_parameter_count);
3348 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3349 }
3350 }
3351
3352
3353 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3354 DCHECK(ToRegister(instr->result()).is(eax));
3355
3356 if (instr->hydrogen()->IsTailCall()) {
3357 if (NeedsEagerFrame()) __ leave();
3358
3359 if (instr->target()->IsConstantOperand()) {
3360 LConstantOperand* target = LConstantOperand::cast(instr->target());
3361 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3362 __ jmp(code, RelocInfo::CODE_TARGET);
3363 } else {
3364 DCHECK(instr->target()->IsRegister());
3365 Register target = ToRegister(instr->target());
3366 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3367 __ jmp(target);
3368 }
3369 } else {
3370 LPointerMap* pointers = instr->pointer_map();
3371 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3372
3373 if (instr->target()->IsConstantOperand()) {
3374 LConstantOperand* target = LConstantOperand::cast(instr->target());
3375 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3376 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3377 __ call(code, RelocInfo::CODE_TARGET);
3378 } else {
3379 DCHECK(instr->target()->IsRegister());
3380 Register target = ToRegister(instr->target());
3381 generator.BeforeCall(__ CallSize(Operand(target)));
3382 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3383 __ call(target);
3384 }
3385 generator.AfterCall();
3386 }
3387 }
3388
3389
3390 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3391 DCHECK(ToRegister(instr->function()).is(edi));
3392 DCHECK(ToRegister(instr->result()).is(eax));
3393
3394 __ mov(eax, instr->arity());
3395
3396 // Change context.
3397 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3398
3399 bool is_self_call = false;
3400 if (instr->hydrogen()->function()->IsConstant()) {
3401 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3402 Handle<JSFunction> jsfun =
3403 Handle<JSFunction>::cast(fun_const->handle(isolate()));
3404 is_self_call = jsfun.is_identical_to(info()->closure());
3405 }
3406
3407 if (is_self_call) {
3408 __ CallSelf();
3409 } else {
3410 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3411 }
3412
3413 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3414 }
3415
3416
3417 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3418 Register input_reg = ToRegister(instr->value());
3419 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3420 factory()->heap_number_map());
3421 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3422
3423 Label slow, allocated, done;
3424 Register tmp = input_reg.is(eax) ? ecx : eax;
3425 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3426
3427 // Preserve the value of all registers.
3428 PushSafepointRegistersScope scope(this);
3429
3430 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3431 // Check the sign of the argument. If the argument is positive, just
3432 // return it. We do not need to patch the stack since |input| and
3433 // |result| are the same register and |input| will be restored
3434 // unchanged by popping safepoint registers.
3435 __ test(tmp, Immediate(HeapNumber::kSignMask));
3436 __ j(zero, &done, Label::kNear);
3437
3438 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3439 __ jmp(&allocated, Label::kNear);
3440
3441 // Slow case: Call the runtime system to do the number allocation.
3442 __ bind(&slow);
3443 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3444 instr, instr->context());
3445 // Set the pointer to the new heap number in tmp.
3446 if (!tmp.is(eax)) __ mov(tmp, eax);
3447 // Restore input_reg after call to runtime.
3448 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3449
3450 __ bind(&allocated);
3451 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3452 __ and_(tmp2, ~HeapNumber::kSignMask);
3453 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3454 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3455 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3456 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3457
3458 __ bind(&done);
3459 }
3460
3461
3462 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3463 Register input_reg = ToRegister(instr->value());
3464 __ test(input_reg, Operand(input_reg));
3465 Label is_positive;
3466 __ j(not_sign, &is_positive, Label::kNear);
3467 __ neg(input_reg); // Sets flags.
3468 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3469 __ bind(&is_positive);
3470 }
3471
3472
3473 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3474 // Class for deferred case.
3475 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3476 public:
3477 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3478 LMathAbs* instr)
3479 : LDeferredCode(codegen), instr_(instr) { }
3480 void Generate() override {
3481 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3482 }
3483 LInstruction* instr() override { return instr_; }
3484
3485 private:
3486 LMathAbs* instr_;
3487 };
3488
3489 DCHECK(instr->value()->Equals(instr->result()));
3490 Representation r = instr->hydrogen()->value()->representation();
3491
3492 if (r.IsDouble()) {
3493 XMMRegister scratch = double_scratch0();
3494 XMMRegister input_reg = ToDoubleRegister(instr->value());
3495 __ xorps(scratch, scratch);
3496 __ subsd(scratch, input_reg);
3497 __ andps(input_reg, scratch);
3498 } else if (r.IsSmiOrInteger32()) {
3499 EmitIntegerMathAbs(instr);
3500 } else { // Tagged case.
3501 DeferredMathAbsTaggedHeapNumber* deferred =
3502 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3503 Register input_reg = ToRegister(instr->value());
3504 // Smi check.
3505 __ JumpIfNotSmi(input_reg, deferred->entry());
3506 EmitIntegerMathAbs(instr);
3507 __ bind(deferred->exit());
3508 }
3509 }
3510
3511
3512 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3513 XMMRegister xmm_scratch = double_scratch0();
3514 Register output_reg = ToRegister(instr->result());
3515 XMMRegister input_reg = ToDoubleRegister(instr->value());
3516
3517 if (CpuFeatures::IsSupported(SSE4_1)) {
3518 CpuFeatureScope scope(masm(), SSE4_1);
3519 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3520 // Deoptimize on negative zero.
3521 Label non_zero;
3522 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3523 __ ucomisd(input_reg, xmm_scratch);
3524 __ j(not_equal, &non_zero, Label::kNear);
3525 __ movmskpd(output_reg, input_reg);
3526 __ test(output_reg, Immediate(1));
3527 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3528 __ bind(&non_zero);
3529 }
3530 __ roundsd(xmm_scratch, input_reg, kRoundDown);
3531 __ cvttsd2si(output_reg, Operand(xmm_scratch));
3532 // Overflow is signalled with minint.
3533 __ cmp(output_reg, 0x1);
3534 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3535 } else {
3536 Label negative_sign, done;
3537 // Deoptimize on unordered.
3538 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3539 __ ucomisd(input_reg, xmm_scratch);
3540 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3541 __ j(below, &negative_sign, Label::kNear);
3542
3543 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3544 // Check for negative zero.
3545 Label positive_sign;
3546 __ j(above, &positive_sign, Label::kNear);
3547 __ movmskpd(output_reg, input_reg);
3548 __ test(output_reg, Immediate(1));
3549 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3550 __ Move(output_reg, Immediate(0));
3551 __ jmp(&done, Label::kNear);
3552 __ bind(&positive_sign);
3553 }
3554
3555 // Use truncating instruction (OK because input is positive).
3556 __ cvttsd2si(output_reg, Operand(input_reg));
3557 // Overflow is signalled with minint.
3558 __ cmp(output_reg, 0x1);
3559 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3560 __ jmp(&done, Label::kNear);
3561
3562 // Non-zero negative reaches here.
3563 __ bind(&negative_sign);
3564 // Truncate, then compare and compensate.
3565 __ cvttsd2si(output_reg, Operand(input_reg));
3566 __ Cvtsi2sd(xmm_scratch, output_reg);
3567 __ ucomisd(input_reg, xmm_scratch);
3568 __ j(equal, &done, Label::kNear);
3569 __ sub(output_reg, Immediate(1));
3570 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3571
3572 __ bind(&done);
3573 }
3574 }
3575
3576
3577 void LCodeGen::DoMathRound(LMathRound* instr) {
3578 Register output_reg = ToRegister(instr->result());
3579 XMMRegister input_reg = ToDoubleRegister(instr->value());
3580 XMMRegister xmm_scratch = double_scratch0();
3581 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3582 ExternalReference one_half = ExternalReference::address_of_one_half();
3583 ExternalReference minus_one_half =
3584 ExternalReference::address_of_minus_one_half();
3585
3586 Label done, round_to_zero, below_one_half, do_not_compensate;
3587 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3588
3589 __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
3590 __ ucomisd(xmm_scratch, input_reg);
3591 __ j(above, &below_one_half, Label::kNear);
3592
3593 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3594 __ addsd(xmm_scratch, input_reg);
3595 __ cvttsd2si(output_reg, Operand(xmm_scratch));
3596 // Overflow is signalled with minint.
3597 __ cmp(output_reg, 0x1);
3598 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3599 __ jmp(&done, dist);
3600
3601 __ bind(&below_one_half);
3602 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
3603 __ ucomisd(xmm_scratch, input_reg);
3604 __ j(below_equal, &round_to_zero, Label::kNear);
3605
3606 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3607 // compare and compensate.
3608 __ movaps(input_temp, input_reg); // Do not alter input_reg.
3609 __ subsd(input_temp, xmm_scratch);
3610 __ cvttsd2si(output_reg, Operand(input_temp));
3611 // Catch minint due to overflow, and to prevent overflow when compensating.
3612 __ cmp(output_reg, 0x1);
3613 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
3614
3615 __ Cvtsi2sd(xmm_scratch, output_reg);
3616 __ ucomisd(xmm_scratch, input_temp);
3617 __ j(equal, &done, dist);
3618 __ sub(output_reg, Immediate(1));
3619 // No overflow because we already ruled out minint.
3620 __ jmp(&done, dist);
3621
3622 __ bind(&round_to_zero);
3623 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3624 // we can ignore the difference between a result of -0 and +0.
3625 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3626 // If the sign is positive, we return +0.
3627 __ movmskpd(output_reg, input_reg);
3628 __ test(output_reg, Immediate(1));
3629 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3630 }
3631 __ Move(output_reg, Immediate(0));
3632 __ bind(&done);
3633 }
3634
3635
3636 void LCodeGen::DoMathFround(LMathFround* instr) {
3637 XMMRegister input_reg = ToDoubleRegister(instr->value());
3638 XMMRegister output_reg = ToDoubleRegister(instr->result());
3639 __ cvtsd2ss(output_reg, input_reg);
3640 __ cvtss2sd(output_reg, output_reg);
3641 }
3642
3643
3644 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3645 Operand input = ToOperand(instr->value());
3646 XMMRegister output = ToDoubleRegister(instr->result());
3647 __ sqrtsd(output, input);
3648 }
3649
3650
3651 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3652 XMMRegister xmm_scratch = double_scratch0();
3653 XMMRegister input_reg = ToDoubleRegister(instr->value());
3654 Register scratch = ToRegister(instr->temp());
3655 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3656
3657 // Note that according to ECMA-262 15.8.2.13:
3658 // Math.pow(-Infinity, 0.5) == Infinity
3659 // Math.sqrt(-Infinity) == NaN
3660 Label done, sqrt;
3661 // Check base for -Infinity. According to IEEE-754, single-precision
3662 // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
3663 __ mov(scratch, 0xFF800000);
3664 __ movd(xmm_scratch, scratch);
3665 __ cvtss2sd(xmm_scratch, xmm_scratch);
3666 __ ucomisd(input_reg, xmm_scratch);
3667 // Comparing -Infinity with NaN results in "unordered", which sets the
3668 // zero flag as if both were equal. However, it also sets the carry flag.
3669 __ j(not_equal, &sqrt, Label::kNear);
3670 __ j(carry, &sqrt, Label::kNear);
3671 // If input is -Infinity, return Infinity.
3672 __ xorps(input_reg, input_reg);
3673 __ subsd(input_reg, xmm_scratch);
3674 __ jmp(&done, Label::kNear);
3675
3676 // Square root.
3677 __ bind(&sqrt);
3678 __ xorps(xmm_scratch, xmm_scratch);
3679 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3680 __ sqrtsd(input_reg, input_reg);
3681 __ bind(&done);
3682 }
3683
3684
3685 void LCodeGen::DoPower(LPower* instr) {
3686 Representation exponent_type = instr->hydrogen()->right()->representation();
3687 // Having marked this as a call, we can use any registers.
3688 // Just make sure that the input/output registers are the expected ones.
3689 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3690 DCHECK(!instr->right()->IsDoubleRegister() ||
3691 ToDoubleRegister(instr->right()).is(xmm1));
3692 DCHECK(!instr->right()->IsRegister() ||
3693 ToRegister(instr->right()).is(tagged_exponent));
3694 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3695 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3696
3697 if (exponent_type.IsSmi()) {
3698 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3699 __ CallStub(&stub);
3700 } else if (exponent_type.IsTagged()) {
3701 Label no_deopt;
3702 __ JumpIfSmi(tagged_exponent, &no_deopt);
3703 DCHECK(!ecx.is(tagged_exponent));
3704 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
3705 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3706 __ bind(&no_deopt);
3707 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3708 __ CallStub(&stub);
3709 } else if (exponent_type.IsInteger32()) {
3710 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3711 __ CallStub(&stub);
3712 } else {
3713 DCHECK(exponent_type.IsDouble());
3714 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3715 __ CallStub(&stub);
3716 }
3717 }
3718
3719
3720 void LCodeGen::DoMathLog(LMathLog* instr) {
3721 DCHECK(instr->value()->Equals(instr->result()));
3722 XMMRegister input_reg = ToDoubleRegister(instr->value());
3723 XMMRegister xmm_scratch = double_scratch0();
3724 Label positive, done, zero;
3725 __ xorps(xmm_scratch, xmm_scratch);
3726 __ ucomisd(input_reg, xmm_scratch);
3727 __ j(above, &positive, Label::kNear);
3728 __ j(not_carry, &zero, Label::kNear);
3729 __ pcmpeqd(input_reg, input_reg);
3730 __ jmp(&done, Label::kNear);
3731 __ bind(&zero);
3732 ExternalReference ninf =
3733 ExternalReference::address_of_negative_infinity();
3734 __ movsd(input_reg, Operand::StaticVariable(ninf));
3735 __ jmp(&done, Label::kNear);
3736 __ bind(&positive);
3737 __ fldln2();
3738 __ sub(Operand(esp), Immediate(kDoubleSize));
3739 __ movsd(Operand(esp, 0), input_reg);
3740 __ fld_d(Operand(esp, 0));
3741 __ fyl2x();
3742 __ fstp_d(Operand(esp, 0));
3743 __ movsd(input_reg, Operand(esp, 0));
3744 __ add(Operand(esp), Immediate(kDoubleSize));
3745 __ bind(&done);
3746 }
3747
3748
3749 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3750 Register input = ToRegister(instr->value());
3751 Register result = ToRegister(instr->result());
3752
3753 __ Lzcnt(result, input);
3754 }
3755
3756
3757 void LCodeGen::DoMathExp(LMathExp* instr) {
3758 XMMRegister input = ToDoubleRegister(instr->value());
3759 XMMRegister result = ToDoubleRegister(instr->result());
3760 XMMRegister temp0 = double_scratch0();
3761 Register temp1 = ToRegister(instr->temp1());
3762 Register temp2 = ToRegister(instr->temp2());
3763
3764 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3765 }
3766
3767
3768 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3769 DCHECK(ToRegister(instr->context()).is(esi));
3770 DCHECK(ToRegister(instr->function()).is(edi));
3771 DCHECK(instr->HasPointerMap());
3772
3773 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3774 if (known_function.is_null()) {
3775 LPointerMap* pointers = instr->pointer_map();
3776 SafepointGenerator generator(
3777 this, pointers, Safepoint::kLazyDeopt);
3778 ParameterCount count(instr->arity());
3779 __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
3780 } else {
3781 CallKnownFunction(known_function,
3782 instr->hydrogen()->formal_parameter_count(),
3783 instr->arity(), instr);
3784 }
3785 }
3786
3787
3788 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3789 DCHECK(ToRegister(instr->context()).is(esi));
3790 DCHECK(ToRegister(instr->function()).is(edi));
3791 DCHECK(ToRegister(instr->result()).is(eax));
3792
3793 int arity = instr->arity();
3794 CallFunctionFlags flags = instr->hydrogen()->function_flags();
3795 if (instr->hydrogen()->HasVectorAndSlot()) {
3796 Register slot_register = ToRegister(instr->temp_slot());
3797 Register vector_register = ToRegister(instr->temp_vector());
3798 DCHECK(slot_register.is(edx));
3799 DCHECK(vector_register.is(ebx));
3800
3801 AllowDeferredHandleDereference vector_structure_check;
3802 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3803 int index = vector->GetIndex(instr->hydrogen()->slot());
3804
3805 __ mov(vector_register, vector);
3806 __ mov(slot_register, Immediate(Smi::FromInt(index)));
3807
3808 CallICState::CallType call_type =
3809 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
3810
3811 Handle<Code> ic =
3812 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
3813 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3814 } else {
3815 CallFunctionStub stub(isolate(), arity, flags);
3816 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3817 }
3818 }
3819
3820
3821 void LCodeGen::DoCallNew(LCallNew* instr) {
3822 DCHECK(ToRegister(instr->context()).is(esi));
3823 DCHECK(ToRegister(instr->constructor()).is(edi));
3824 DCHECK(ToRegister(instr->result()).is(eax));
3825
3826 // No cell in ebx for construct type feedback in optimized code
3827 __ mov(ebx, isolate()->factory()->undefined_value());
3828 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
3829 __ Move(eax, Immediate(instr->arity()));
3830 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3831 }
3832
3833
3834 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3835 DCHECK(ToRegister(instr->context()).is(esi));
3836 DCHECK(ToRegister(instr->constructor()).is(edi));
3837 DCHECK(ToRegister(instr->result()).is(eax));
3838
3839 __ Move(eax, Immediate(instr->arity()));
3840 if (instr->arity() == 1) {
3841 // We only need the allocation site for the case we have a length argument.
3842 // The case may bail out to the runtime, which will determine the correct
3843 // elements kind with the site.
3844 __ mov(ebx, instr->hydrogen()->site());
3845 } else {
3846 __ mov(ebx, isolate()->factory()->undefined_value());
3847 }
3848
3849 ElementsKind kind = instr->hydrogen()->elements_kind();
3850 AllocationSiteOverrideMode override_mode =
3851 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3852 ? DISABLE_ALLOCATION_SITES
3853 : DONT_OVERRIDE;
3854
3855 if (instr->arity() == 0) {
3856 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3857 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3858 } else if (instr->arity() == 1) {
3859 Label done;
3860 if (IsFastPackedElementsKind(kind)) {
3861 Label packed_case;
3862 // We might need a change here
3863 // look at the first argument
3864 __ mov(ecx, Operand(esp, 0));
3865 __ test(ecx, ecx);
3866 __ j(zero, &packed_case, Label::kNear);
3867
3868 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3869 ArraySingleArgumentConstructorStub stub(isolate(),
3870 holey_kind,
3871 override_mode);
3872 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3873 __ jmp(&done, Label::kNear);
3874 __ bind(&packed_case);
3875 }
3876
3877 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3878 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3879 __ bind(&done);
3880 } else {
3881 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3882 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3883 }
3884 }
3885
3886
3887 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3888 DCHECK(ToRegister(instr->context()).is(esi));
3889 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3890 }
3891
3892
3893 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3894 Register function = ToRegister(instr->function());
3895 Register code_object = ToRegister(instr->code_object());
3896 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
3897 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3898 }
3899
3900
3901 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3902 Register result = ToRegister(instr->result());
3903 Register base = ToRegister(instr->base_object());
3904 if (instr->offset()->IsConstantOperand()) {
3905 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3906 __ lea(result, Operand(base, ToInteger32(offset)));
3907 } else {
3908 Register offset = ToRegister(instr->offset());
3909 __ lea(result, Operand(base, offset, times_1, 0));
3910 }
3911 }
3912
3913
3914 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3915 Representation representation = instr->hydrogen()->field_representation();
3916
3917 HObjectAccess access = instr->hydrogen()->access();
3918 int offset = access.offset();
3919
3920 if (access.IsExternalMemory()) {
3921 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3922 MemOperand operand = instr->object()->IsConstantOperand()
3923 ? MemOperand::StaticVariable(
3924 ToExternalReference(LConstantOperand::cast(instr->object())))
3925 : MemOperand(ToRegister(instr->object()), offset);
3926 if (instr->value()->IsConstantOperand()) {
3927 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3928 __ mov(operand, Immediate(ToInteger32(operand_value)));
3929 } else {
3930 Register value = ToRegister(instr->value());
3931 __ Store(value, operand, representation);
3932 }
3933 return;
3934 }
3935
3936 Register object = ToRegister(instr->object());
3937 __ AssertNotSmi(object);
3938
3939 DCHECK(!representation.IsSmi() ||
3940 !instr->value()->IsConstantOperand() ||
3941 IsSmi(LConstantOperand::cast(instr->value())));
3942 if (representation.IsDouble()) {
3943 DCHECK(access.IsInobject());
3944 DCHECK(!instr->hydrogen()->has_transition());
3945 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3946 XMMRegister value = ToDoubleRegister(instr->value());
3947 __ movsd(FieldOperand(object, offset), value);
3948 return;
3949 }
3950
3951 if (instr->hydrogen()->has_transition()) {
3952 Handle<Map> transition = instr->hydrogen()->transition_map();
3953 AddDeprecationDependency(transition);
3954 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
3955 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3956 Register temp = ToRegister(instr->temp());
3957 Register temp_map = ToRegister(instr->temp_map());
3958 // Update the write barrier for the map field.
3959 __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
3960 }
3961 }
3962
3963 // Do the store.
3964 Register write_register = object;
3965 if (!access.IsInobject()) {
3966 write_register = ToRegister(instr->temp());
3967 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
3968 }
3969
3970 MemOperand operand = FieldOperand(write_register, offset);
3971 if (instr->value()->IsConstantOperand()) {
3972 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3973 if (operand_value->IsRegister()) {
3974 Register value = ToRegister(operand_value);
3975 __ Store(value, operand, representation);
3976 } else if (representation.IsInteger32() || representation.IsExternal()) {
3977 Immediate immediate = ToImmediate(operand_value, representation);
3978 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3979 __ mov(operand, immediate);
3980 } else {
3981 Handle<Object> handle_value = ToHandle(operand_value);
3982 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3983 __ mov(operand, handle_value);
3984 }
3985 } else {
3986 Register value = ToRegister(instr->value());
3987 __ Store(value, operand, representation);
3988 }
3989
3990 if (instr->hydrogen()->NeedsWriteBarrier()) {
3991 Register value = ToRegister(instr->value());
3992 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
3993 // Update the write barrier for the object for in-object properties.
3994 __ RecordWriteField(write_register,
3995 offset,
3996 value,
3997 temp,
3998 kSaveFPRegs,
3999 EMIT_REMEMBERED_SET,
4000 instr->hydrogen()->SmiCheckForWriteBarrier(),
4001 instr->hydrogen()->PointersToHereCheckForValue());
4002 }
4003 }
4004
4005
4006 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4007 DCHECK(ToRegister(instr->context()).is(esi));
4008 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4009 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4010
4011 if (instr->hydrogen()->HasVectorAndSlot()) {
4012 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4013 }
4014
4015 __ mov(StoreDescriptor::NameRegister(), instr->name());
4016 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4017 isolate(), instr->language_mode(),
4018 instr->hydrogen()->initialization_state()).code();
4019 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4020 }
4021
4022
4023 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
4024 DCHECK(ToRegister(instr->context()).is(esi));
4025 DCHECK(ToRegister(instr->value())
4026 .is(StoreGlobalViaContextDescriptor::ValueRegister()));
4027
4028 int const slot = instr->slot_index();
4029 int const depth = instr->depth();
4030 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
4031 __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
4032 Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
4033 isolate(), depth, instr->language_mode())
4034 .code();
4035 CallCode(stub, RelocInfo::CODE_TARGET, instr);
4036 } else {
4037 __ Push(Smi::FromInt(slot));
4038 __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
4039 __ CallRuntime(is_strict(instr->language_mode())
4040 ? Runtime::kStoreGlobalViaContext_Strict
4041 : Runtime::kStoreGlobalViaContext_Sloppy,
4042 2);
4043 }
4044 }
4045
4046
4047 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4048 Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
4049 if (instr->index()->IsConstantOperand()) {
4050 __ cmp(ToOperand(instr->length()),
4051 ToImmediate(LConstantOperand::cast(instr->index()),
4052 instr->hydrogen()->length()->representation()));
4053 cc = CommuteCondition(cc);
4054 } else if (instr->length()->IsConstantOperand()) {
4055 __ cmp(ToOperand(instr->index()),
4056 ToImmediate(LConstantOperand::cast(instr->length()),
4057 instr->hydrogen()->index()->representation()));
4058 } else {
4059 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4060 }
4061 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4062 Label done;
4063 __ j(NegateCondition(cc), &done, Label::kNear);
4064 __ int3();
4065 __ bind(&done);
4066 } else {
4067 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4068 }
4069 }
4070
4071
4072 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4073 ElementsKind elements_kind = instr->elements_kind();
4074 LOperand* key = instr->key();
4075 if (!key->IsConstantOperand() &&
4076 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4077 elements_kind)) {
4078 __ SmiUntag(ToRegister(key));
4079 }
4080 Operand operand(BuildFastArrayOperand(
4081 instr->elements(),
4082 key,
4083 instr->hydrogen()->key()->representation(),
4084 elements_kind,
4085 instr->base_offset()));
4086 if (elements_kind == FLOAT32_ELEMENTS) {
4087 XMMRegister xmm_scratch = double_scratch0();
4088 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4089 __ movss(operand, xmm_scratch);
4090 } else if (elements_kind == FLOAT64_ELEMENTS) {
4091 __ movsd(operand, ToDoubleRegister(instr->value()));
4092 } else {
4093 Register value = ToRegister(instr->value());
4094 switch (elements_kind) {
4095 case UINT8_ELEMENTS:
4096 case INT8_ELEMENTS:
4097 case UINT8_CLAMPED_ELEMENTS:
4098 __ mov_b(operand, value);
4099 break;
4100 case UINT16_ELEMENTS:
4101 case INT16_ELEMENTS:
4102 __ mov_w(operand, value);
4103 break;
4104 case UINT32_ELEMENTS:
4105 case INT32_ELEMENTS:
4106 __ mov(operand, value);
4107 break;
4108 case FLOAT32_ELEMENTS:
4109 case FLOAT64_ELEMENTS:
4110 case FAST_SMI_ELEMENTS:
4111 case FAST_ELEMENTS:
4112 case FAST_DOUBLE_ELEMENTS:
4113 case FAST_HOLEY_SMI_ELEMENTS:
4114 case FAST_HOLEY_ELEMENTS:
4115 case FAST_HOLEY_DOUBLE_ELEMENTS:
4116 case DICTIONARY_ELEMENTS:
4117 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4118 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4119 UNREACHABLE();
4120 break;
4121 }
4122 }
4123 }
4124
4125
4126 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4127 Operand double_store_operand = BuildFastArrayOperand(
4128 instr->elements(),
4129 instr->key(),
4130 instr->hydrogen()->key()->representation(),
4131 FAST_DOUBLE_ELEMENTS,
4132 instr->base_offset());
4133
4134 XMMRegister value = ToDoubleRegister(instr->value());
4135
4136 if (instr->NeedsCanonicalization()) {
4137 XMMRegister xmm_scratch = double_scratch0();
4138 // Turn potential sNaN value into qNaN.
4139 __ xorps(xmm_scratch, xmm_scratch);
4140 __ subsd(value, xmm_scratch);
4141 }
4142
4143 __ movsd(double_store_operand, value);
4144 }
4145
4146
4147 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4148 Register elements = ToRegister(instr->elements());
4149 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4150
4151 Operand operand = BuildFastArrayOperand(
4152 instr->elements(),
4153 instr->key(),
4154 instr->hydrogen()->key()->representation(),
4155 FAST_ELEMENTS,
4156 instr->base_offset());
4157 if (instr->value()->IsRegister()) {
4158 __ mov(operand, ToRegister(instr->value()));
4159 } else {
4160 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4161 if (IsSmi(operand_value)) {
4162 Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4163 __ mov(operand, immediate);
4164 } else {
4165 DCHECK(!IsInteger32(operand_value));
4166 Handle<Object> handle_value = ToHandle(operand_value);
4167 __ mov(operand, handle_value);
4168 }
4169 }
4170
4171 if (instr->hydrogen()->NeedsWriteBarrier()) {
4172 DCHECK(instr->value()->IsRegister());
4173 Register value = ToRegister(instr->value());
4174 DCHECK(!instr->key()->IsConstantOperand());
4175 SmiCheck check_needed =
4176 instr->hydrogen()->value()->type().IsHeapObject()
4177 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4178 // Compute address of modified element and store it into key register.
4179 __ lea(key, operand);
4180 __ RecordWrite(elements,
4181 key,
4182 value,
4183 kSaveFPRegs,
4184 EMIT_REMEMBERED_SET,
4185 check_needed,
4186 instr->hydrogen()->PointersToHereCheckForValue());
4187 }
4188 }
4189
4190
4191 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4192 // By cases...external, fast-double, fast
4193 if (instr->is_fixed_typed_array()) {
4194 DoStoreKeyedExternalArray(instr);
4195 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4196 DoStoreKeyedFixedDoubleArray(instr);
4197 } else {
4198 DoStoreKeyedFixedArray(instr);
4199 }
4200 }
4201
4202
4203 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4204 DCHECK(ToRegister(instr->context()).is(esi));
4205 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4206 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4207 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4208
4209 if (instr->hydrogen()->HasVectorAndSlot()) {
4210 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4211 }
4212
4213 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4214 isolate(), instr->language_mode(),
4215 instr->hydrogen()->initialization_state()).code();
4216 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4217 }
4218
4219
4220 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4221 Register object = ToRegister(instr->object());
4222 Register temp = ToRegister(instr->temp());
4223 Label no_memento_found;
4224 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4225 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4226 __ bind(&no_memento_found);
4227 }
4228
4229
4230 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4231 class DeferredMaybeGrowElements final : public LDeferredCode {
4232 public:
4233 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4234 : LDeferredCode(codegen), instr_(instr) {}
4235 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4236 LInstruction* instr() override { return instr_; }
4237
4238 private:
4239 LMaybeGrowElements* instr_;
4240 };
4241
4242 Register result = eax;
4243 DeferredMaybeGrowElements* deferred =
4244 new (zone()) DeferredMaybeGrowElements(this, instr);
4245 LOperand* key = instr->key();
4246 LOperand* current_capacity = instr->current_capacity();
4247
4248 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4249 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4250 DCHECK(key->IsConstantOperand() || key->IsRegister());
4251 DCHECK(current_capacity->IsConstantOperand() ||
4252 current_capacity->IsRegister());
4253
4254 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4255 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4256 int32_t constant_capacity =
4257 ToInteger32(LConstantOperand::cast(current_capacity));
4258 if (constant_key >= constant_capacity) {
4259 // Deferred case.
4260 __ jmp(deferred->entry());
4261 }
4262 } else if (key->IsConstantOperand()) {
4263 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4264 __ cmp(ToOperand(current_capacity), Immediate(constant_key));
4265 __ j(less_equal, deferred->entry());
4266 } else if (current_capacity->IsConstantOperand()) {
4267 int32_t constant_capacity =
4268 ToInteger32(LConstantOperand::cast(current_capacity));
4269 __ cmp(ToRegister(key), Immediate(constant_capacity));
4270 __ j(greater_equal, deferred->entry());
4271 } else {
4272 __ cmp(ToRegister(key), ToRegister(current_capacity));
4273 __ j(greater_equal, deferred->entry());
4274 }
4275
4276 __ mov(result, ToOperand(instr->elements()));
4277 __ bind(deferred->exit());
4278 }
4279
4280
4281 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4282 // TODO(3095996): Get rid of this. For now, we need to make the
4283 // result register contain a valid pointer because it is already
4284 // contained in the register pointer map.
4285 Register result = eax;
4286 __ Move(result, Immediate(0));
4287
4288 // We have to call a stub.
4289 {
4290 PushSafepointRegistersScope scope(this);
4291 if (instr->object()->IsRegister()) {
4292 __ Move(result, ToRegister(instr->object()));
4293 } else {
4294 __ mov(result, ToOperand(instr->object()));
4295 }
4296
4297 LOperand* key = instr->key();
4298 if (key->IsConstantOperand()) {
4299 __ mov(ebx, ToImmediate(key, Representation::Smi()));
4300 } else {
4301 __ Move(ebx, ToRegister(key));
4302 __ SmiTag(ebx);
4303 }
4304
4305 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4306 instr->hydrogen()->kind());
4307 __ CallStub(&stub);
4308 RecordSafepointWithLazyDeopt(
4309 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4310 __ StoreToSafepointRegisterSlot(result, result);
4311 }
4312
4313 // Deopt on smi, which means the elements array changed to dictionary mode.
4314 __ test(result, Immediate(kSmiTagMask));
4315 DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
4316 }
4317
4318
4319 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4320 Register object_reg = ToRegister(instr->object());
4321
4322 Handle<Map> from_map = instr->original_map();
4323 Handle<Map> to_map = instr->transitioned_map();
4324 ElementsKind from_kind = instr->from_kind();
4325 ElementsKind to_kind = instr->to_kind();
4326
4327 Label not_applicable;
4328 bool is_simple_map_transition =
4329 IsSimpleMapChangeTransition(from_kind, to_kind);
4330 Label::Distance branch_distance =
4331 is_simple_map_transition ? Label::kNear : Label::kFar;
4332 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4333 __ j(not_equal, &not_applicable, branch_distance);
4334 if (is_simple_map_transition) {
4335 Register new_map_reg = ToRegister(instr->new_map_temp());
4336 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4337 Immediate(to_map));
4338 // Write barrier.
4339 DCHECK_NOT_NULL(instr->temp());
4340 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4341 ToRegister(instr->temp()),
4342 kDontSaveFPRegs);
4343 } else {
4344 DCHECK(ToRegister(instr->context()).is(esi));
4345 DCHECK(object_reg.is(eax));
4346 PushSafepointRegistersScope scope(this);
4347 __ mov(ebx, to_map);
4348 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4349 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4350 __ CallStub(&stub);
4351 RecordSafepointWithLazyDeopt(instr,
4352 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4353 }
4354 __ bind(&not_applicable);
4355 }
4356
4357
4358 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4359 class DeferredStringCharCodeAt final : public LDeferredCode {
4360 public:
4361 DeferredStringCharCodeAt(LCodeGen* codegen,
4362 LStringCharCodeAt* instr)
4363 : LDeferredCode(codegen), instr_(instr) { }
4364 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4365 LInstruction* instr() override { return instr_; }
4366
4367 private:
4368 LStringCharCodeAt* instr_;
4369 };
4370
4371 DeferredStringCharCodeAt* deferred =
4372 new(zone()) DeferredStringCharCodeAt(this, instr);
4373
4374 StringCharLoadGenerator::Generate(masm(),
4375 factory(),
4376 ToRegister(instr->string()),
4377 ToRegister(instr->index()),
4378 ToRegister(instr->result()),
4379 deferred->entry());
4380 __ bind(deferred->exit());
4381 }
4382
4383
4384 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4385 Register string = ToRegister(instr->string());
4386 Register result = ToRegister(instr->result());
4387
4388 // TODO(3095996): Get rid of this. For now, we need to make the
4389 // result register contain a valid pointer because it is already
4390 // contained in the register pointer map.
4391 __ Move(result, Immediate(0));
4392
4393 PushSafepointRegistersScope scope(this);
4394 __ push(string);
4395 // Push the index as a smi. This is safe because of the checks in
4396 // DoStringCharCodeAt above.
4397 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4398 if (instr->index()->IsConstantOperand()) {
4399 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4400 Representation::Smi());
4401 __ push(immediate);
4402 } else {
4403 Register index = ToRegister(instr->index());
4404 __ SmiTag(index);
4405 __ push(index);
4406 }
4407 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
4408 instr, instr->context());
4409 __ AssertSmi(eax);
4410 __ SmiUntag(eax);
4411 __ StoreToSafepointRegisterSlot(result, eax);
4412 }
4413
4414
4415 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4416 class DeferredStringCharFromCode final : public LDeferredCode {
4417 public:
4418 DeferredStringCharFromCode(LCodeGen* codegen,
4419 LStringCharFromCode* instr)
4420 : LDeferredCode(codegen), instr_(instr) { }
4421 void Generate() override {
4422 codegen()->DoDeferredStringCharFromCode(instr_);
4423 }
4424 LInstruction* instr() override { return instr_; }
4425
4426 private:
4427 LStringCharFromCode* instr_;
4428 };
4429
4430 DeferredStringCharFromCode* deferred =
4431 new(zone()) DeferredStringCharFromCode(this, instr);
4432
4433 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4434 Register char_code = ToRegister(instr->char_code());
4435 Register result = ToRegister(instr->result());
4436 DCHECK(!char_code.is(result));
4437
4438 __ cmp(char_code, String::kMaxOneByteCharCode);
4439 __ j(above, deferred->entry());
4440 __ Move(result, Immediate(factory()->single_character_string_cache()));
4441 __ mov(result, FieldOperand(result,
4442 char_code, times_pointer_size,
4443 FixedArray::kHeaderSize));
4444 __ cmp(result, factory()->undefined_value());
4445 __ j(equal, deferred->entry());
4446 __ bind(deferred->exit());
4447 }
4448
4449
4450 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4451 Register char_code = ToRegister(instr->char_code());
4452 Register result = ToRegister(instr->result());
4453
4454 // TODO(3095996): Get rid of this. For now, we need to make the
4455 // result register contain a valid pointer because it is already
4456 // contained in the register pointer map.
4457 __ Move(result, Immediate(0));
4458
4459 PushSafepointRegistersScope scope(this);
4460 __ SmiTag(char_code);
4461 __ push(char_code);
4462 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4463 __ StoreToSafepointRegisterSlot(result, eax);
4464 }
4465
4466
4467 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4468 DCHECK(ToRegister(instr->context()).is(esi));
4469 DCHECK(ToRegister(instr->left()).is(edx));
4470 DCHECK(ToRegister(instr->right()).is(eax));
4471 StringAddStub stub(isolate(),
4472 instr->hydrogen()->flags(),
4473 instr->hydrogen()->pretenure_flag());
4474 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4475 }
4476
4477
4478 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4479 LOperand* input = instr->value();
4480 LOperand* output = instr->result();
4481 DCHECK(input->IsRegister() || input->IsStackSlot());
4482 DCHECK(output->IsDoubleRegister());
4483 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4484 }
4485
4486
4487 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4488 LOperand* input = instr->value();
4489 LOperand* output = instr->result();
4490 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4491 }
4492
4493
4494 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4495 class DeferredNumberTagI final : public LDeferredCode {
4496 public:
4497 DeferredNumberTagI(LCodeGen* codegen,
4498 LNumberTagI* instr)
4499 : LDeferredCode(codegen), instr_(instr) { }
4500 void Generate() override {
4501 codegen()->DoDeferredNumberTagIU(
4502 instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
4503 }
4504 LInstruction* instr() override { return instr_; }
4505
4506 private:
4507 LNumberTagI* instr_;
4508 };
4509
4510 LOperand* input = instr->value();
4511 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4512 Register reg = ToRegister(input);
4513
4514 DeferredNumberTagI* deferred =
4515 new(zone()) DeferredNumberTagI(this, instr);
4516 __ SmiTag(reg);
4517 __ j(overflow, deferred->entry());
4518 __ bind(deferred->exit());
4519 }
4520
4521
4522 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4523 class DeferredNumberTagU final : public LDeferredCode {
4524 public:
4525 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4526 : LDeferredCode(codegen), instr_(instr) { }
4527 void Generate() override {
4528 codegen()->DoDeferredNumberTagIU(
4529 instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
4530 }
4531 LInstruction* instr() override { return instr_; }
4532
4533 private:
4534 LNumberTagU* instr_;
4535 };
4536
4537 LOperand* input = instr->value();
4538 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4539 Register reg = ToRegister(input);
4540
4541 DeferredNumberTagU* deferred =
4542 new(zone()) DeferredNumberTagU(this, instr);
4543 __ cmp(reg, Immediate(Smi::kMaxValue));
4544 __ j(above, deferred->entry());
4545 __ SmiTag(reg);
4546 __ bind(deferred->exit());
4547 }
4548
4549
4550 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4551 LOperand* value,
4552 LOperand* temp,
4553 IntegerSignedness signedness) {
4554 Label done, slow;
4555 Register reg = ToRegister(value);
4556 Register tmp = ToRegister(temp);
4557 XMMRegister xmm_scratch = double_scratch0();
4558
4559 if (signedness == SIGNED_INT32) {
4560 // There was overflow, so bits 30 and 31 of the original integer
4561 // disagree. Try to allocate a heap number in new space and store
4562 // the value in there. If that fails, call the runtime system.
4563 __ SmiUntag(reg);
4564 __ xor_(reg, 0x80000000);
4565 __ Cvtsi2sd(xmm_scratch, Operand(reg));
4566 } else {
4567 __ LoadUint32(xmm_scratch, reg);
4568 }
4569
4570 if (FLAG_inline_new) {
4571 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4572 __ jmp(&done, Label::kNear);
4573 }
4574
4575 // Slow case: Call the runtime system to do the number allocation.
4576 __ bind(&slow);
4577 {
4578 // TODO(3095996): Put a valid pointer value in the stack slot where the
4579 // result register is stored, as this register is in the pointer map, but
4580 // contains an integer value.
4581 __ Move(reg, Immediate(0));
4582
4583 // Preserve the value of all registers.
4584 PushSafepointRegistersScope scope(this);
4585
4586 // NumberTagI and NumberTagD use the context from the frame, rather than
4587 // the environment's HContext or HInlinedContext value.
4588 // They only call Runtime::kAllocateHeapNumber.
4589 // The corresponding HChange instructions are added in a phase that does
4590 // not have easy access to the local context.
4591 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4592 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4593 RecordSafepointWithRegisters(
4594 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4595 __ StoreToSafepointRegisterSlot(reg, eax);
4596 }
4597
4598 // Done. Put the value in xmm_scratch into the value of the allocated heap
4599 // number.
4600 __ bind(&done);
4601 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
4602 }
4603
4604
4605 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4606 class DeferredNumberTagD final : public LDeferredCode {
4607 public:
4608 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4609 : LDeferredCode(codegen), instr_(instr) { }
4610 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4611 LInstruction* instr() override { return instr_; }
4612
4613 private:
4614 LNumberTagD* instr_;
4615 };
4616
4617 Register reg = ToRegister(instr->result());
4618
4619 DeferredNumberTagD* deferred =
4620 new(zone()) DeferredNumberTagD(this, instr);
4621 if (FLAG_inline_new) {
4622 Register tmp = ToRegister(instr->temp());
4623 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4624 } else {
4625 __ jmp(deferred->entry());
4626 }
4627 __ bind(deferred->exit());
4628 XMMRegister input_reg = ToDoubleRegister(instr->value());
4629 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4630 }
4631
4632
4633 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4634 // TODO(3095996): Get rid of this. For now, we need to make the
4635 // result register contain a valid pointer because it is already
4636 // contained in the register pointer map.
4637 Register reg = ToRegister(instr->result());
4638 __ Move(reg, Immediate(0));
4639
4640 PushSafepointRegistersScope scope(this);
4641 // NumberTagI and NumberTagD use the context from the frame, rather than
4642 // the environment's HContext or HInlinedContext value.
4643 // They only call Runtime::kAllocateHeapNumber.
4644 // The corresponding HChange instructions are added in a phase that does
4645 // not have easy access to the local context.
4646 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4647 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4648 RecordSafepointWithRegisters(
4649 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4650 __ StoreToSafepointRegisterSlot(reg, eax);
4651 }
4652
4653
4654 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4655 HChange* hchange = instr->hydrogen();
4656 Register input = ToRegister(instr->value());
4657 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4658 hchange->value()->CheckFlag(HValue::kUint32)) {
4659 __ test(input, Immediate(0xc0000000));
4660 DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
4661 }
4662 __ SmiTag(input);
4663 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4664 !hchange->value()->CheckFlag(HValue::kUint32)) {
4665 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4666 }
4667 }
4668
4669
4670 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4671 LOperand* input = instr->value();
4672 Register result = ToRegister(input);
4673 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4674 if (instr->needs_check()) {
4675 __ test(result, Immediate(kSmiTagMask));
4676 DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
4677 } else {
4678 __ AssertSmi(result);
4679 }
4680 __ SmiUntag(result);
4681 }
4682
4683
4684 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4685 Register temp_reg, XMMRegister result_reg,
4686 NumberUntagDMode mode) {
4687 bool can_convert_undefined_to_nan =
4688 instr->hydrogen()->can_convert_undefined_to_nan();
4689 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4690
4691 Label convert, load_smi, done;
4692
4693 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4694 // Smi check.
4695 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4696
4697 // Heap number map check.
4698 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4699 factory()->heap_number_map());
4700 if (can_convert_undefined_to_nan) {
4701 __ j(not_equal, &convert, Label::kNear);
4702 } else {
4703 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4704 }
4705
4706 // Heap number to XMM conversion.
4707 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4708
4709 if (deoptimize_on_minus_zero) {
4710 XMMRegister xmm_scratch = double_scratch0();
4711 __ xorps(xmm_scratch, xmm_scratch);
4712 __ ucomisd(result_reg, xmm_scratch);
4713 __ j(not_zero, &done, Label::kNear);
4714 __ movmskpd(temp_reg, result_reg);
4715 __ test_b(temp_reg, 1);
4716 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4717 }
4718 __ jmp(&done, Label::kNear);
4719
4720 if (can_convert_undefined_to_nan) {
4721 __ bind(&convert);
4722
4723 // Convert undefined to NaN.
4724 __ cmp(input_reg, factory()->undefined_value());
4725 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
4726
4727 __ pcmpeqd(result_reg, result_reg);
4728 __ jmp(&done, Label::kNear);
4729 }
4730 } else {
4731 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4732 }
4733
4734 __ bind(&load_smi);
4735 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
4736 // input register since we avoid dependencies.
4737 __ mov(temp_reg, input_reg);
4738 __ SmiUntag(temp_reg); // Untag smi before converting to float.
4739 __ Cvtsi2sd(result_reg, Operand(temp_reg));
4740 __ bind(&done);
4741 }
4742
4743
4744 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4745 Register input_reg = ToRegister(instr->value());
4746
4747 // The input was optimistically untagged; revert it.
4748 STATIC_ASSERT(kSmiTagSize == 1);
4749 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
4750
4751 if (instr->truncating()) {
4752 Label no_heap_number, check_bools, check_false;
4753
4754 // Heap number map check.
4755 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4756 factory()->heap_number_map());
4757 __ j(not_equal, &no_heap_number, Label::kNear);
4758 __ TruncateHeapNumberToI(input_reg, input_reg);
4759 __ jmp(done);
4760
4761 __ bind(&no_heap_number);
4762 // Check for Oddballs. Undefined/False is converted to zero and True to one
4763 // for truncating conversions.
4764 __ cmp(input_reg, factory()->undefined_value());
4765 __ j(not_equal, &check_bools, Label::kNear);
4766 __ Move(input_reg, Immediate(0));
4767 __ jmp(done);
4768
4769 __ bind(&check_bools);
4770 __ cmp(input_reg, factory()->true_value());
4771 __ j(not_equal, &check_false, Label::kNear);
4772 __ Move(input_reg, Immediate(1));
4773 __ jmp(done);
4774
4775 __ bind(&check_false);
4776 __ cmp(input_reg, factory()->false_value());
4777 DeoptimizeIf(not_equal, instr,
4778 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4779 __ Move(input_reg, Immediate(0));
4780 } else {
4781 XMMRegister scratch = ToDoubleRegister(instr->temp());
4782 DCHECK(!scratch.is(xmm0));
4783 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4784 isolate()->factory()->heap_number_map());
4785 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4786 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4787 __ cvttsd2si(input_reg, Operand(xmm0));
4788 __ Cvtsi2sd(scratch, Operand(input_reg));
4789 __ ucomisd(xmm0, scratch);
4790 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
4791 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
4792 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
4793 __ test(input_reg, Operand(input_reg));
4794 __ j(not_zero, done);
4795 __ movmskpd(input_reg, xmm0);
4796 __ and_(input_reg, 1);
4797 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4798 }
4799 }
4800 }
4801
4802
4803 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4804 class DeferredTaggedToI final : public LDeferredCode {
4805 public:
4806 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4807 : LDeferredCode(codegen), instr_(instr) { }
4808 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
4809 LInstruction* instr() override { return instr_; }
4810
4811 private:
4812 LTaggedToI* instr_;
4813 };
4814
4815 LOperand* input = instr->value();
4816 DCHECK(input->IsRegister());
4817 Register input_reg = ToRegister(input);
4818 DCHECK(input_reg.is(ToRegister(instr->result())));
4819
4820 if (instr->hydrogen()->value()->representation().IsSmi()) {
4821 __ SmiUntag(input_reg);
4822 } else {
4823 DeferredTaggedToI* deferred =
4824 new(zone()) DeferredTaggedToI(this, instr);
4825 // Optimistically untag the input.
4826 // If the input is a HeapObject, SmiUntag will set the carry flag.
4827 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4828 __ SmiUntag(input_reg);
4829 // Branch to deferred code if the input was tagged.
4830 // The deferred code will take care of restoring the tag.
4831 __ j(carry, deferred->entry());
4832 __ bind(deferred->exit());
4833 }
4834 }
4835
4836
4837 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4838 LOperand* input = instr->value();
4839 DCHECK(input->IsRegister());
4840 LOperand* temp = instr->temp();
4841 DCHECK(temp->IsRegister());
4842 LOperand* result = instr->result();
4843 DCHECK(result->IsDoubleRegister());
4844
4845 Register input_reg = ToRegister(input);
4846 Register temp_reg = ToRegister(temp);
4847
4848 HValue* value = instr->hydrogen()->value();
4849 NumberUntagDMode mode = value->representation().IsSmi()
4850 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4851
4852 XMMRegister result_reg = ToDoubleRegister(result);
4853 EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
4854 }
4855
4856
4857 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4858 LOperand* input = instr->value();
4859 DCHECK(input->IsDoubleRegister());
4860 LOperand* result = instr->result();
4861 DCHECK(result->IsRegister());
4862 Register result_reg = ToRegister(result);
4863
4864 if (instr->truncating()) {
4865 XMMRegister input_reg = ToDoubleRegister(input);
4866 __ TruncateDoubleToI(result_reg, input_reg);
4867 } else {
4868 Label lost_precision, is_nan, minus_zero, done;
4869 XMMRegister input_reg = ToDoubleRegister(input);
4870 XMMRegister xmm_scratch = double_scratch0();
4871 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4872 __ DoubleToI(result_reg, input_reg, xmm_scratch,
4873 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
4874 &is_nan, &minus_zero, dist);
4875 __ jmp(&done, dist);
4876 __ bind(&lost_precision);
4877 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
4878 __ bind(&is_nan);
4879 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
4880 __ bind(&minus_zero);
4881 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
4882 __ bind(&done);
4883 }
4884 }
4885
4886
4887 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4888 LOperand* input = instr->value();
4889 DCHECK(input->IsDoubleRegister());
4890 LOperand* result = instr->result();
4891 DCHECK(result->IsRegister());
4892 Register result_reg = ToRegister(result);
4893
4894 Label lost_precision, is_nan, minus_zero, done;
4895 XMMRegister input_reg = ToDoubleRegister(input);
4896 XMMRegister xmm_scratch = double_scratch0();
4897 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4898 __ DoubleToI(result_reg, input_reg, xmm_scratch,
4899 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
4900 &minus_zero, dist);
4901 __ jmp(&done, dist);
4902 __ bind(&lost_precision);
4903 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
4904 __ bind(&is_nan);
4905 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
4906 __ bind(&minus_zero);
4907 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
4908 __ bind(&done);
4909 __ SmiTag(result_reg);
4910 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4911 }
4912
4913
4914 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4915 LOperand* input = instr->value();
4916 __ test(ToOperand(input), Immediate(kSmiTagMask));
4917 DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
4918 }
4919
4920
4921 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4922 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4923 LOperand* input = instr->value();
4924 __ test(ToOperand(input), Immediate(kSmiTagMask));
4925 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
4926 }
4927 }
4928
4929
4930 void LCodeGen::DoCheckArrayBufferNotNeutered(
4931 LCheckArrayBufferNotNeutered* instr) {
4932 Register view = ToRegister(instr->view());
4933 Register scratch = ToRegister(instr->scratch());
4934
4935 __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
4936 __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
4937 1 << JSArrayBuffer::WasNeutered::kShift);
4938 DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
4939 }
4940
4941
4942 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4943 Register input = ToRegister(instr->value());
4944 Register temp = ToRegister(instr->temp());
4945
4946 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
4947
4948 if (instr->hydrogen()->is_interval_check()) {
4949 InstanceType first;
4950 InstanceType last;
4951 instr->hydrogen()->GetCheckInterval(&first, &last);
4952
4953 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
4954 static_cast<int8_t>(first));
4955
4956 // If there is only one type in the interval check for equality.
4957 if (first == last) {
4958 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
4959 } else {
4960 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
4961 // Omit check for the last type.
4962 if (last != LAST_TYPE) {
4963 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
4964 static_cast<int8_t>(last));
4965 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
4966 }
4967 }
4968 } else {
4969 uint8_t mask;
4970 uint8_t tag;
4971 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4972
4973 if (base::bits::IsPowerOfTwo32(mask)) {
4974 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4975 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
4976 DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
4977 Deoptimizer::kWrongInstanceType);
4978 } else {
4979 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
4980 __ and_(temp, mask);
4981 __ cmp(temp, tag);
4982 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
4983 }
4984 }
4985 }
4986
4987
4988 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4989 Handle<HeapObject> object = instr->hydrogen()->object().handle();
4990 if (instr->hydrogen()->object_in_new_space()) {
4991 Register reg = ToRegister(instr->value());
4992 Handle<Cell> cell = isolate()->factory()->NewCell(object);
4993 __ cmp(reg, Operand::ForCell(cell));
4994 } else {
4995 Operand operand = ToOperand(instr->value());
4996 __ cmp(operand, object);
4997 }
4998 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
4999 }
5000
5001
5002 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5003 {
5004 PushSafepointRegistersScope scope(this);
5005 __ push(object);
5006 __ xor_(esi, esi);
5007 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5008 RecordSafepointWithRegisters(
5009 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5010
5011 __ test(eax, Immediate(kSmiTagMask));
5012 }
5013 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5014 }
5015
5016
5017 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5018 class DeferredCheckMaps final : public LDeferredCode {
5019 public:
5020 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5021 : LDeferredCode(codegen), instr_(instr), object_(object) {
5022 SetExit(check_maps());
5023 }
5024 void Generate() override {
5025 codegen()->DoDeferredInstanceMigration(instr_, object_);
5026 }
5027 Label* check_maps() { return &check_maps_; }
5028 LInstruction* instr() override { return instr_; }
5029
5030 private:
5031 LCheckMaps* instr_;
5032 Label check_maps_;
5033 Register object_;
5034 };
5035
5036 if (instr->hydrogen()->IsStabilityCheck()) {
5037 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5038 for (int i = 0; i < maps->size(); ++i) {
5039 AddStabilityDependency(maps->at(i).handle());
5040 }
5041 return;
5042 }
5043
5044 LOperand* input = instr->value();
5045 DCHECK(input->IsRegister());
5046 Register reg = ToRegister(input);
5047
5048 DeferredCheckMaps* deferred = NULL;
5049 if (instr->hydrogen()->HasMigrationTarget()) {
5050 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5051 __ bind(deferred->check_maps());
5052 }
5053
5054 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5055 Label success;
5056 for (int i = 0; i < maps->size() - 1; i++) {
5057 Handle<Map> map = maps->at(i).handle();
5058 __ CompareMap(reg, map);
5059 __ j(equal, &success, Label::kNear);
5060 }
5061
5062 Handle<Map> map = maps->at(maps->size() - 1).handle();
5063 __ CompareMap(reg, map);
5064 if (instr->hydrogen()->HasMigrationTarget()) {
5065 __ j(not_equal, deferred->entry());
5066 } else {
5067 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5068 }
5069
5070 __ bind(&success);
5071 }
5072
5073
5074 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5075 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5076 XMMRegister xmm_scratch = double_scratch0();
5077 Register result_reg = ToRegister(instr->result());
5078 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5079 }
5080
5081
5082 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5083 DCHECK(instr->unclamped()->Equals(instr->result()));
5084 Register value_reg = ToRegister(instr->result());
5085 __ ClampUint8(value_reg);
5086 }
5087
5088
5089 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5090 DCHECK(instr->unclamped()->Equals(instr->result()));
5091 Register input_reg = ToRegister(instr->unclamped());
5092 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5093 XMMRegister xmm_scratch = double_scratch0();
5094 Label is_smi, done, heap_number;
5095
5096 __ JumpIfSmi(input_reg, &is_smi);
5097
5098 // Check for heap number
5099 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5100 factory()->heap_number_map());
5101 __ j(equal, &heap_number, Label::kNear);
5102
5103 // Check for undefined. Undefined is converted to zero for clamping
5104 // conversions.
5105 __ cmp(input_reg, factory()->undefined_value());
5106 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5107 __ mov(input_reg, 0);
5108 __ jmp(&done, Label::kNear);
5109
5110 // Heap number
5111 __ bind(&heap_number);
5112 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5113 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5114 __ jmp(&done, Label::kNear);
5115
5116 // smi
5117 __ bind(&is_smi);
5118 __ SmiUntag(input_reg);
5119 __ ClampUint8(input_reg);
5120 __ bind(&done);
5121 }
5122
5123
5124 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5125 XMMRegister value_reg = ToDoubleRegister(instr->value());
5126 Register result_reg = ToRegister(instr->result());
5127 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5128 if (CpuFeatures::IsSupported(SSE4_1)) {
5129 CpuFeatureScope scope2(masm(), SSE4_1);
5130 __ pextrd(result_reg, value_reg, 1);
5131 } else {
5132 XMMRegister xmm_scratch = double_scratch0();
5133 __ pshufd(xmm_scratch, value_reg, 1);
5134 __ movd(result_reg, xmm_scratch);
5135 }
5136 } else {
5137 __ movd(result_reg, value_reg);
5138 }
5139 }
5140
5141
5142 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5143 Register hi_reg = ToRegister(instr->hi());
5144 Register lo_reg = ToRegister(instr->lo());
5145 XMMRegister result_reg = ToDoubleRegister(instr->result());
5146
5147 if (CpuFeatures::IsSupported(SSE4_1)) {
5148 CpuFeatureScope scope2(masm(), SSE4_1);
5149 __ movd(result_reg, lo_reg);
5150 __ pinsrd(result_reg, hi_reg, 1);
5151 } else {
5152 XMMRegister xmm_scratch = double_scratch0();
5153 __ movd(result_reg, hi_reg);
5154 __ psllq(result_reg, 32);
5155 __ movd(xmm_scratch, lo_reg);
5156 __ orps(result_reg, xmm_scratch);
5157 }
5158 }
5159
5160
5161 void LCodeGen::DoAllocate(LAllocate* instr) {
5162 class DeferredAllocate final : public LDeferredCode {
5163 public:
5164 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5165 : LDeferredCode(codegen), instr_(instr) { }
5166 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5167 LInstruction* instr() override { return instr_; }
5168
5169 private:
5170 LAllocate* instr_;
5171 };
5172
5173 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
5174
5175 Register result = ToRegister(instr->result());
5176 Register temp = ToRegister(instr->temp());
5177
5178 // Allocate memory for the object.
5179 AllocationFlags flags = TAG_OBJECT;
5180 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5181 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5182 }
5183 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5184 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5185 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5186 }
5187
5188 if (instr->size()->IsConstantOperand()) {
5189 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5190 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5191 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5192 } else {
5193 Register size = ToRegister(instr->size());
5194 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5195 }
5196
5197 __ bind(deferred->exit());
5198
5199 if (instr->hydrogen()->MustPrefillWithFiller()) {
5200 if (instr->size()->IsConstantOperand()) {
5201 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5202 __ mov(temp, (size / kPointerSize) - 1);
5203 } else {
5204 temp = ToRegister(instr->size());
5205 __ shr(temp, kPointerSizeLog2);
5206 __ dec(temp);
5207 }
5208 Label loop;
5209 __ bind(&loop);
5210 __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5211 isolate()->factory()->one_pointer_filler_map());
5212 __ dec(temp);
5213 __ j(not_zero, &loop);
5214 }
5215 }
5216
5217
5218 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5219 Register result = ToRegister(instr->result());
5220
5221 // TODO(3095996): Get rid of this. For now, we need to make the
5222 // result register contain a valid pointer because it is already
5223 // contained in the register pointer map.
5224 __ Move(result, Immediate(Smi::FromInt(0)));
5225
5226 PushSafepointRegistersScope scope(this);
5227 if (instr->size()->IsRegister()) {
5228 Register size = ToRegister(instr->size());
5229 DCHECK(!size.is(result));
5230 __ SmiTag(ToRegister(instr->size()));
5231 __ push(size);
5232 } else {
5233 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5234 if (size >= 0 && size <= Smi::kMaxValue) {
5235 __ push(Immediate(Smi::FromInt(size)));
5236 } else {
5237 // We should never get here at runtime => abort
5238 __ int3();
5239 return;
5240 }
5241 }
5242
5243 int flags = AllocateDoubleAlignFlag::encode(
5244 instr->hydrogen()->MustAllocateDoubleAligned());
5245 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5246 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5247 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5248 } else {
5249 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5250 }
5251 __ push(Immediate(Smi::FromInt(flags)));
5252
5253 CallRuntimeFromDeferred(
5254 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5255 __ StoreToSafepointRegisterSlot(result, eax);
5256 }
5257
5258
5259 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5260 DCHECK(ToRegister(instr->value()).is(eax));
5261 __ push(eax);
5262 CallRuntime(Runtime::kToFastProperties, 1, instr);
5263 }
5264
5265
5266 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5267 DCHECK(ToRegister(instr->context()).is(esi));
5268 Label materialized;
5269 // Registers will be used as follows:
5270 // ecx = literals array.
5271 // ebx = regexp literal.
5272 // eax = regexp literal clone.
5273 // esi = context.
5274 int literal_offset =
5275 LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
5276 __ LoadHeapObject(ecx, instr->hydrogen()->literals());
5277 __ mov(ebx, FieldOperand(ecx, literal_offset));
5278 __ cmp(ebx, factory()->undefined_value());
5279 __ j(not_equal, &materialized, Label::kNear);
5280
5281 // Create regexp literal using runtime function
5282 // Result will be in eax.
5283 __ push(ecx);
5284 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
5285 __ push(Immediate(instr->hydrogen()->pattern()));
5286 __ push(Immediate(instr->hydrogen()->flags()));
5287 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5288 __ mov(ebx, eax);
5289
5290 __ bind(&materialized);
5291 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5292 Label allocated, runtime_allocate;
5293 __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5294 __ jmp(&allocated, Label::kNear);
5295
5296 __ bind(&runtime_allocate);
5297 __ push(ebx);
5298 __ push(Immediate(Smi::FromInt(size)));
5299 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5300 __ pop(ebx);
5301
5302 __ bind(&allocated);
5303 // Copy the content into the newly allocated memory.
5304 // (Unroll copy loop once for better throughput).
5305 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5306 __ mov(edx, FieldOperand(ebx, i));
5307 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
5308 __ mov(FieldOperand(eax, i), edx);
5309 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
5310 }
5311 if ((size % (2 * kPointerSize)) != 0) {
5312 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
5313 __ mov(FieldOperand(eax, size - kPointerSize), edx);
5314 }
5315 }
5316
5317
5318 void LCodeGen::DoTypeof(LTypeof* instr) {
5319 DCHECK(ToRegister(instr->context()).is(esi));
5320 DCHECK(ToRegister(instr->value()).is(ebx));
5321 Label end, do_call;
5322 Register value_register = ToRegister(instr->value());
5323 __ JumpIfNotSmi(value_register, &do_call);
5324 __ mov(eax, Immediate(isolate()->factory()->number_string()));
5325 __ jmp(&end);
5326 __ bind(&do_call);
5327 TypeofStub stub(isolate());
5328 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5329 __ bind(&end);
5330 }
5331
5332
5333 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5334 Register input = ToRegister(instr->value());
5335 Condition final_branch_condition = EmitTypeofIs(instr, input);
5336 if (final_branch_condition != no_condition) {
5337 EmitBranch(instr, final_branch_condition);
5338 }
5339 }
5340
5341
5342 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5343 Label* true_label = instr->TrueLabel(chunk_);
5344 Label* false_label = instr->FalseLabel(chunk_);
5345 Handle<String> type_name = instr->type_literal();
5346 int left_block = instr->TrueDestination(chunk_);
5347 int right_block = instr->FalseDestination(chunk_);
5348 int next_block = GetNextEmittedBlock();
5349
5350 Label::Distance true_distance = left_block == next_block ? Label::kNear
5351 : Label::kFar;
5352 Label::Distance false_distance = right_block == next_block ? Label::kNear
5353 : Label::kFar;
5354 Condition final_branch_condition = no_condition;
5355 if (String::Equals(type_name, factory()->number_string())) {
5356 __ JumpIfSmi(input, true_label, true_distance);
5357 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
5358 factory()->heap_number_map());
5359 final_branch_condition = equal;
5360
5361 } else if (String::Equals(type_name, factory()->string_string())) {
5362 __ JumpIfSmi(input, false_label, false_distance);
5363 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5364 final_branch_condition = below;
5365
5366 } else if (String::Equals(type_name, factory()->symbol_string())) {
5367 __ JumpIfSmi(input, false_label, false_distance);
5368 __ CmpObjectType(input, SYMBOL_TYPE, input);
5369 final_branch_condition = equal;
5370
5371 } else if (String::Equals(type_name, factory()->boolean_string())) {
5372 __ cmp(input, factory()->true_value());
5373 __ j(equal, true_label, true_distance);
5374 __ cmp(input, factory()->false_value());
5375 final_branch_condition = equal;
5376
5377 } else if (String::Equals(type_name, factory()->undefined_string())) {
5378 __ cmp(input, factory()->undefined_value());
5379 __ j(equal, true_label, true_distance);
5380 __ JumpIfSmi(input, false_label, false_distance);
5381 // Check for undetectable objects => true.
5382 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5383 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5384 1 << Map::kIsUndetectable);
5385 final_branch_condition = not_zero;
5386
5387 } else if (String::Equals(type_name, factory()->function_string())) {
5388 __ JumpIfSmi(input, false_label, false_distance);
5389 // Check for callable and not undetectable objects => true.
5390 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5391 __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
5392 __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
5393 __ cmp(input, 1 << Map::kIsCallable);
5394 final_branch_condition = equal;
5395
5396 } else if (String::Equals(type_name, factory()->object_string())) {
5397 __ JumpIfSmi(input, false_label, false_distance);
5398 __ cmp(input, factory()->null_value());
5399 __ j(equal, true_label, true_distance);
5400 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
5401 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
5402 __ j(below, false_label, false_distance);
5403 // Check for callable or undetectable objects => false.
5404 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5405 (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
5406 final_branch_condition = zero;
5407
5408 // clang-format off
5409 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5410 } else if (String::Equals(type_name, factory()->type##_string())) { \
5411 __ JumpIfSmi(input, false_label, false_distance); \
5412 __ cmp(FieldOperand(input, HeapObject::kMapOffset), \
5413 factory()->type##_map()); \
5414 final_branch_condition = equal;
5415 SIMD128_TYPES(SIMD128_TYPE)
5416 #undef SIMD128_TYPE
5417 // clang-format on
5418
5419 } else {
5420 __ jmp(false_label, false_distance);
5421 }
5422 return final_branch_condition;
5423 }
5424
5425
5426 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5427 Register temp = ToRegister(instr->temp());
5428
5429 EmitIsConstructCall(temp);
5430 EmitBranch(instr, equal);
5431 }
5432
5433
5434 void LCodeGen::EmitIsConstructCall(Register temp) {
5435 // Get the frame pointer for the calling frame.
5436 __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
5437
5438 // Skip the arguments adaptor frame if it exists.
5439 Label check_frame_marker;
5440 __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5441 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5442 __ j(not_equal, &check_frame_marker, Label::kNear);
5443 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5444
5445 // Check the marker in the calling frame.
5446 __ bind(&check_frame_marker);
5447 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5448 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5449 }
5450
5451
5452 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5453 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5454 // Ensure that we have enough space after the previous lazy-bailout
5455 // instruction for patching the code here.
5456 int current_pc = masm()->pc_offset();
5457 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5458 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5459 __ Nop(padding_size);
5460 }
5461 }
5462 last_lazy_deopt_pc_ = masm()->pc_offset();
5463 }
5464
5465
5466 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5467 last_lazy_deopt_pc_ = masm()->pc_offset();
5468 DCHECK(instr->HasEnvironment());
5469 LEnvironment* env = instr->environment();
5470 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5471 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5472 }
5473
5474
5475 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5476 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5477 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5478 // needed return address), even though the implementation of LAZY and EAGER is
5479 // now identical. When LAZY is eventually completely folded into EAGER, remove
5480 // the special case below.
5481 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5482 type = Deoptimizer::LAZY;
5483 }
5484 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5485 }
5486
5487
5488 void LCodeGen::DoDummy(LDummy* instr) {
5489 // Nothing to see here, move on!
5490 }
5491
5492
5493 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5494 // Nothing to see here, move on!
5495 }
5496
5497
5498 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5499 PushSafepointRegistersScope scope(this);
5500 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5501 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5502 RecordSafepointWithLazyDeopt(
5503 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5504 DCHECK(instr->HasEnvironment());
5505 LEnvironment* env = instr->environment();
5506 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5507 }
5508
5509
5510 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5511 class DeferredStackCheck final : public LDeferredCode {
5512 public:
5513 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5514 : LDeferredCode(codegen), instr_(instr) { }
5515 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5516 LInstruction* instr() override { return instr_; }
5517
5518 private:
5519 LStackCheck* instr_;
5520 };
5521
5522 DCHECK(instr->HasEnvironment());
5523 LEnvironment* env = instr->environment();
5524 // There is no LLazyBailout instruction for stack-checks. We have to
5525 // prepare for lazy deoptimization explicitly here.
5526 if (instr->hydrogen()->is_function_entry()) {
5527 // Perform stack overflow check.
5528 Label done;
5529 ExternalReference stack_limit =
5530 ExternalReference::address_of_stack_limit(isolate());
5531 __ cmp(esp, Operand::StaticVariable(stack_limit));
5532 __ j(above_equal, &done, Label::kNear);
5533
5534 DCHECK(instr->context()->IsRegister());
5535 DCHECK(ToRegister(instr->context()).is(esi));
5536 CallCode(isolate()->builtins()->StackCheck(),
5537 RelocInfo::CODE_TARGET,
5538 instr);
5539 __ bind(&done);
5540 } else {
5541 DCHECK(instr->hydrogen()->is_backwards_branch());
5542 // Perform stack overflow check if this goto needs it before jumping.
5543 DeferredStackCheck* deferred_stack_check =
5544 new(zone()) DeferredStackCheck(this, instr);
5545 ExternalReference stack_limit =
5546 ExternalReference::address_of_stack_limit(isolate());
5547 __ cmp(esp, Operand::StaticVariable(stack_limit));
5548 __ j(below, deferred_stack_check->entry());
5549 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5550 __ bind(instr->done_label());
5551 deferred_stack_check->SetExit(instr->done_label());
5552 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5553 // Don't record a deoptimization index for the safepoint here.
5554 // This will be done explicitly when emitting call and the safepoint in
5555 // the deferred code.
5556 }
5557 }
5558
5559
5560 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5561 // This is a pseudo-instruction that ensures that the environment here is
5562 // properly registered for deoptimization and records the assembler's PC
5563 // offset.
5564 LEnvironment* environment = instr->environment();
5565
5566 // If the environment were already registered, we would have no way of
5567 // backpatching it with the spill slot operands.
5568 DCHECK(!environment->HasBeenRegistered());
5569 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5570
5571 GenerateOsrPrologue();
5572 }
5573
5574
5575 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5576 DCHECK(ToRegister(instr->context()).is(esi));
5577 __ test(eax, Immediate(kSmiTagMask));
5578 DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
5579
5580 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5581 __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
5582 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
5583
5584 Label use_cache, call_runtime;
5585 __ CheckEnumCache(&call_runtime);
5586
5587 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
5588 __ jmp(&use_cache, Label::kNear);
5589
5590 // Get the set of properties to enumerate.
5591 __ bind(&call_runtime);
5592 __ push(eax);
5593 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5594
5595 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
5596 isolate()->factory()->meta_map());
5597 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5598 __ bind(&use_cache);
5599 }
5600
5601
5602 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5603 Register map = ToRegister(instr->map());
5604 Register result = ToRegister(instr->result());
5605 Label load_cache, done;
5606 __ EnumLength(result, map);
5607 __ cmp(result, Immediate(Smi::FromInt(0)));
5608 __ j(not_equal, &load_cache, Label::kNear);
5609 __ mov(result, isolate()->factory()->empty_fixed_array());
5610 __ jmp(&done, Label::kNear);
5611
5612 __ bind(&load_cache);
5613 __ LoadInstanceDescriptors(map, result);
5614 __ mov(result,
5615 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5616 __ mov(result,
5617 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5618 __ bind(&done);
5619 __ test(result, result);
5620 DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
5621 }
5622
5623
5624 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5625 Register object = ToRegister(instr->value());
5626 __ cmp(ToRegister(instr->map()),
5627 FieldOperand(object, HeapObject::kMapOffset));
5628 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5629 }
5630
5631
5632 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5633 Register object,
5634 Register index) {
5635 PushSafepointRegistersScope scope(this);
5636 __ push(object);
5637 __ push(index);
5638 __ xor_(esi, esi);
5639 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5640 RecordSafepointWithRegisters(
5641 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5642 __ StoreToSafepointRegisterSlot(object, eax);
5643 }
5644
5645
5646 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5647 class DeferredLoadMutableDouble final : public LDeferredCode {
5648 public:
5649 DeferredLoadMutableDouble(LCodeGen* codegen,
5650 LLoadFieldByIndex* instr,
5651 Register object,
5652 Register index)
5653 : LDeferredCode(codegen),
5654 instr_(instr),
5655 object_(object),
5656 index_(index) {
5657 }
5658 void Generate() override {
5659 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5660 }
5661 LInstruction* instr() override { return instr_; }
5662
5663 private:
5664 LLoadFieldByIndex* instr_;
5665 Register object_;
5666 Register index_;
5667 };
5668
5669 Register object = ToRegister(instr->object());
5670 Register index = ToRegister(instr->index());
5671
5672 DeferredLoadMutableDouble* deferred;
5673 deferred = new(zone()) DeferredLoadMutableDouble(
5674 this, instr, object, index);
5675
5676 Label out_of_object, done;
5677 __ test(index, Immediate(Smi::FromInt(1)));
5678 __ j(not_zero, deferred->entry());
5679
5680 __ sar(index, 1);
5681
5682 __ cmp(index, Immediate(0));
5683 __ j(less, &out_of_object, Label::kNear);
5684 __ mov(object, FieldOperand(object,
5685 index,
5686 times_half_pointer_size,
5687 JSObject::kHeaderSize));
5688 __ jmp(&done, Label::kNear);
5689
5690 __ bind(&out_of_object);
5691 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5692 __ neg(index);
5693 // Index is now equal to out of object property index plus 1.
5694 __ mov(object, FieldOperand(object,
5695 index,
5696 times_half_pointer_size,
5697 FixedArray::kHeaderSize - kPointerSize));
5698 __ bind(deferred->exit());
5699 __ bind(&done);
5700 }
5701
5702
5703 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5704 Register context = ToRegister(instr->context());
5705 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
5706 }
5707
5708
5709 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5710 Handle<ScopeInfo> scope_info = instr->scope_info();
5711 __ Push(scope_info);
5712 __ push(ToRegister(instr->function()));
5713 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5714 RecordSafepoint(Safepoint::kNoLazyDeopt);
5715 }
5716
5717
5718 #undef __
5719
5720 } // namespace internal
5721 } // namespace v8
5722
5723 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698