Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(142)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 6529055: [Isolates] Merge crankshaft (r5922 from bleeding_edge). (Closed)
Patch Set: Win32 port Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "ia32/lithium-codegen-ia32.h"
29 #include "code-stubs.h"
30 #include "stub-cache.h"
31
32 namespace v8 {
33 namespace internal {
34
35
36 class SafepointGenerator : public PostCallGenerator {
37 public:
38 SafepointGenerator(LCodeGen* codegen,
39 LPointerMap* pointers,
40 int deoptimization_index)
41 : codegen_(codegen),
42 pointers_(pointers),
43 deoptimization_index_(deoptimization_index) { }
44 virtual ~SafepointGenerator() { }
45
46 virtual void Generate() {
47 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
48 }
49
50 private:
51 LCodeGen* codegen_;
52 LPointerMap* pointers_;
53 int deoptimization_index_;
54 };
55
56
57 #define __ masm()->
58
59 bool LCodeGen::GenerateCode() {
60 HPhase phase("Code generation", chunk());
61 ASSERT(is_unused());
62 status_ = GENERATING;
63 CpuFeatures::Scope scope(SSE2);
64 return GeneratePrologue() &&
65 GenerateBody() &&
66 GenerateDeferredCode() &&
67 GenerateSafepointTable();
68 }
69
70
71 void LCodeGen::FinishCode(Handle<Code> code) {
72 ASSERT(is_done());
73 code->set_stack_slots(StackSlotCount());
74 code->set_safepoint_table_start(safepoints_.GetCodeOffset());
75 PopulateDeoptimizationData(code);
76 }
77
78
79 void LCodeGen::Abort(const char* format, ...) {
80 if (FLAG_trace_bailout) {
81 SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
82 PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
83 va_list arguments;
84 va_start(arguments, format);
85 OS::VPrint(format, arguments);
86 va_end(arguments);
87 PrintF("\n");
88 }
89 status_ = ABORTED;
90 }
91
92
93 void LCodeGen::Comment(const char* format, ...) {
94 if (!FLAG_code_comments) return;
95 char buffer[4 * KB];
96 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
97 va_list arguments;
98 va_start(arguments, format);
99 builder.AddFormattedList(format, arguments);
100 va_end(arguments);
101
102 // Copy the string before recording it in the assembler to avoid
103 // issues when the stack allocated buffer goes out of scope.
104 size_t length = builder.position();
105 Vector<char> copy = Vector<char>::New(length + 1);
106 memcpy(copy.start(), builder.Finalize(), copy.length());
107 masm()->RecordComment(copy.start());
108 }
109
110
111 bool LCodeGen::GeneratePrologue() {
112 ASSERT(is_generating());
113
114 #ifdef DEBUG
115 if (strlen(FLAG_stop_at) > 0 &&
116 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
117 __ int3();
118 }
119 #endif
120
121 __ push(ebp); // Caller's frame pointer.
122 __ mov(ebp, esp);
123 __ push(esi); // Callee's context.
124 __ push(edi); // Callee's JS function.
125
126 // Reserve space for the stack slots needed by the code.
127 int slots = StackSlotCount();
128 if (slots > 0) {
129 if (FLAG_debug_code) {
130 __ mov(Operand(eax), Immediate(slots));
131 Label loop;
132 __ bind(&loop);
133 __ push(Immediate(kSlotsZapValue));
134 __ dec(eax);
135 __ j(not_zero, &loop);
136 } else {
137 __ sub(Operand(esp), Immediate(slots * kPointerSize));
138 }
139 }
140
141 // Trace the call.
142 if (FLAG_trace) {
143 __ CallRuntime(Runtime::kTraceEnter, 0);
144 }
145 return !is_aborted();
146 }
147
148
149 bool LCodeGen::GenerateBody() {
150 ASSERT(is_generating());
151 bool emit_instructions = true;
152 for (current_instruction_ = 0;
153 !is_aborted() && current_instruction_ < instructions_->length();
154 current_instruction_++) {
155 LInstruction* instr = instructions_->at(current_instruction_);
156 if (instr->IsLabel()) {
157 LLabel* label = LLabel::cast(instr);
158 emit_instructions = !label->HasReplacement();
159 }
160
161 if (emit_instructions) {
162 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
163 instr->CompileToNative(this);
164 }
165 }
166 return !is_aborted();
167 }
168
169
170 LInstruction* LCodeGen::GetNextInstruction() {
171 if (current_instruction_ < instructions_->length() - 1) {
172 return instructions_->at(current_instruction_ + 1);
173 } else {
174 return NULL;
175 }
176 }
177
178
179 bool LCodeGen::GenerateDeferredCode() {
180 ASSERT(is_generating());
181 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
182 LDeferredCode* code = deferred_[i];
183 __ bind(code->entry());
184 code->Generate();
185 __ jmp(code->exit());
186 }
187
188 // Deferred code is the last part of the instruction sequence. Mark
189 // the generated code as done unless we bailed out.
190 if (!is_aborted()) status_ = DONE;
191 return !is_aborted();
192 }
193
194
195 bool LCodeGen::GenerateSafepointTable() {
196 ASSERT(is_done());
197 safepoints_.Emit(masm(), StackSlotCount());
198 return !is_aborted();
199 }
200
201
202 Register LCodeGen::ToRegister(int index) const {
203 return Register::FromAllocationIndex(index);
204 }
205
206
207 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
208 return XMMRegister::FromAllocationIndex(index);
209 }
210
211
212 Register LCodeGen::ToRegister(LOperand* op) const {
213 ASSERT(op->IsRegister());
214 return ToRegister(op->index());
215 }
216
217
218 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
219 ASSERT(op->IsDoubleRegister());
220 return ToDoubleRegister(op->index());
221 }
222
223
224 int LCodeGen::ToInteger32(LConstantOperand* op) const {
225 Handle<Object> value = chunk_->LookupLiteral(op);
226 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
227 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
228 value->Number());
229 return static_cast<int32_t>(value->Number());
230 }
231
232
233 Immediate LCodeGen::ToImmediate(LOperand* op) {
234 LConstantOperand* const_op = LConstantOperand::cast(op);
235 Handle<Object> literal = chunk_->LookupLiteral(const_op);
236 Representation r = chunk_->LookupLiteralRepresentation(const_op);
237 if (r.IsInteger32()) {
238 ASSERT(literal->IsNumber());
239 return Immediate(static_cast<int32_t>(literal->Number()));
240 } else if (r.IsDouble()) {
241 Abort("unsupported double immediate");
242 }
243 ASSERT(r.IsTagged());
244 return Immediate(literal);
245 }
246
247
248 Operand LCodeGen::ToOperand(LOperand* op) const {
249 if (op->IsRegister()) return Operand(ToRegister(op));
250 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
251 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
252 int index = op->index();
253 if (index >= 0) {
254 // Local or spill slot. Skip the frame pointer, function, and
255 // context in the fixed part of the frame.
256 return Operand(ebp, -(index + 3) * kPointerSize);
257 } else {
258 // Incoming parameter. Skip the return address.
259 return Operand(ebp, -(index - 1) * kPointerSize);
260 }
261 }
262
263
264 void LCodeGen::AddToTranslation(Translation* translation,
265 LOperand* op,
266 bool is_tagged) {
267 if (op == NULL) {
268 // TODO(twuerthinger): Introduce marker operands to indicate that this value
269 // is not present and must be reconstructed from the deoptimizer. Currently
270 // this is only used for the arguments object.
271 translation->StoreArgumentsObject();
272 } else if (op->IsStackSlot()) {
273 if (is_tagged) {
274 translation->StoreStackSlot(op->index());
275 } else {
276 translation->StoreInt32StackSlot(op->index());
277 }
278 } else if (op->IsDoubleStackSlot()) {
279 translation->StoreDoubleStackSlot(op->index());
280 } else if (op->IsArgument()) {
281 ASSERT(is_tagged);
282 int src_index = StackSlotCount() + op->index();
283 translation->StoreStackSlot(src_index);
284 } else if (op->IsRegister()) {
285 Register reg = ToRegister(op);
286 if (is_tagged) {
287 translation->StoreRegister(reg);
288 } else {
289 translation->StoreInt32Register(reg);
290 }
291 } else if (op->IsDoubleRegister()) {
292 XMMRegister reg = ToDoubleRegister(op);
293 translation->StoreDoubleRegister(reg);
294 } else if (op->IsConstantOperand()) {
295 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
296 int src_index = DefineDeoptimizationLiteral(literal);
297 translation->StoreLiteral(src_index);
298 } else {
299 UNREACHABLE();
300 }
301 }
302
303
304 void LCodeGen::CallCode(Handle<Code> code,
305 RelocInfo::Mode mode,
306 LInstruction* instr) {
307 if (instr != NULL) {
308 LPointerMap* pointers = instr->pointer_map();
309 RecordPosition(pointers->position());
310 __ call(code, mode);
311 RegisterLazyDeoptimization(instr);
312 } else {
313 LPointerMap no_pointers(0);
314 RecordPosition(no_pointers.position());
315 __ call(code, mode);
316 RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
317 }
318 }
319
320
321 void LCodeGen::CallRuntime(const Runtime::Function* function,
322 int num_arguments,
323 LInstruction* instr) {
324 ASSERT(instr != NULL);
325 LPointerMap* pointers = instr->pointer_map();
326 ASSERT(pointers != NULL);
327 RecordPosition(pointers->position());
328
329 __ CallRuntime(function, num_arguments);
330 // Runtime calls to Throw are not supposed to ever return at the
331 // call site, so don't register lazy deoptimization for these. We do
332 // however have to record a safepoint since throwing exceptions can
333 // cause garbage collections.
334 // BUG(3243555): register a lazy deoptimization point at throw. We need
335 // it to be able to inline functions containing a throw statement.
336 if (!instr->IsThrow()) {
337 RegisterLazyDeoptimization(instr);
338 } else {
339 RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
340 }
341 }
342
343
344 void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
345 // Create the environment to bailout to. If the call has side effects
346 // execution has to continue after the call otherwise execution can continue
347 // from a previous bailout point repeating the call.
348 LEnvironment* deoptimization_environment;
349 if (instr->HasDeoptimizationEnvironment()) {
350 deoptimization_environment = instr->deoptimization_environment();
351 } else {
352 deoptimization_environment = instr->environment();
353 }
354
355 RegisterEnvironmentForDeoptimization(deoptimization_environment);
356 RecordSafepoint(instr->pointer_map(),
357 deoptimization_environment->deoptimization_index());
358 }
359
360
361 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
362 if (!environment->HasBeenRegistered()) {
363 // Physical stack frame layout:
364 // -x ............. -4 0 ..................................... y
365 // [incoming arguments] [spill slots] [pushed outgoing arguments]
366
367 // Layout of the environment:
368 // 0 ..................................................... size-1
369 // [parameters] [locals] [expression stack including arguments]
370
371 // Layout of the translation:
372 // 0 ........................................................ size - 1 + 4
373 // [expression stack including arguments] [locals] [4 words] [parameters]
374 // |>------------ translation_size ------------<|
375
376 int frame_count = 0;
377 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
378 ++frame_count;
379 }
380 Translation translation(&translations_, frame_count);
381 environment->WriteTranslation(this, &translation);
382 int deoptimization_index = deoptimizations_.length();
383 environment->Register(deoptimization_index, translation.index());
384 deoptimizations_.Add(environment);
385 }
386 }
387
388
389 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
390 RegisterEnvironmentForDeoptimization(environment);
391 ASSERT(environment->HasBeenRegistered());
392 int id = environment->deoptimization_index();
393 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
394 ASSERT(entry != NULL);
395 if (entry == NULL) {
396 Abort("bailout was not prepared");
397 return;
398 }
399
400 if (FLAG_deopt_every_n_times != 0) {
401 Handle<SharedFunctionInfo> shared(info_->shared_info());
402 Label no_deopt;
403 __ pushfd();
404 __ push(eax);
405 __ push(ebx);
406 __ mov(ebx, shared);
407 __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
408 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
409 __ j(not_zero, &no_deopt);
410 if (FLAG_trap_on_deopt) __ int3();
411 __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
412 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
413 __ pop(ebx);
414 __ pop(eax);
415 __ popfd();
416 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
417
418 __ bind(&no_deopt);
419 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
420 __ pop(ebx);
421 __ pop(eax);
422 __ popfd();
423 }
424
425 if (cc == no_condition) {
426 if (FLAG_trap_on_deopt) __ int3();
427 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
428 } else {
429 if (FLAG_trap_on_deopt) {
430 NearLabel done;
431 __ j(NegateCondition(cc), &done);
432 __ int3();
433 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
434 __ bind(&done);
435 } else {
436 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
437 }
438 }
439 }
440
441
442 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
443 int length = deoptimizations_.length();
444 if (length == 0) return;
445 ASSERT(FLAG_deopt);
446 Handle<DeoptimizationInputData> data =
447 FACTORY->NewDeoptimizationInputData(length, TENURED);
448
449 data->SetTranslationByteArray(*translations_.CreateByteArray());
450 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
451
452 Handle<FixedArray> literals =
453 FACTORY->NewFixedArray(deoptimization_literals_.length(), TENURED);
454 for (int i = 0; i < deoptimization_literals_.length(); i++) {
455 literals->set(i, *deoptimization_literals_[i]);
456 }
457 data->SetLiteralArray(*literals);
458
459 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
460 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
461
462 // Populate the deoptimization entries.
463 for (int i = 0; i < length; i++) {
464 LEnvironment* env = deoptimizations_[i];
465 data->SetAstId(i, Smi::FromInt(env->ast_id()));
466 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
467 data->SetArgumentsStackHeight(i,
468 Smi::FromInt(env->arguments_stack_height()));
469 }
470 code->set_deoptimization_data(*data);
471 }
472
473
474 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
475 int result = deoptimization_literals_.length();
476 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
477 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
478 }
479 deoptimization_literals_.Add(literal);
480 return result;
481 }
482
483
484 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
485 ASSERT(deoptimization_literals_.length() == 0);
486
487 const ZoneList<Handle<JSFunction> >* inlined_closures =
488 chunk()->inlined_closures();
489
490 for (int i = 0, length = inlined_closures->length();
491 i < length;
492 i++) {
493 DefineDeoptimizationLiteral(inlined_closures->at(i));
494 }
495
496 inlined_function_count_ = deoptimization_literals_.length();
497 }
498
499
500 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
501 int deoptimization_index) {
502 const ZoneList<LOperand*>* operands = pointers->operands();
503 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
504 deoptimization_index);
505 for (int i = 0; i < operands->length(); i++) {
506 LOperand* pointer = operands->at(i);
507 if (pointer->IsStackSlot()) {
508 safepoint.DefinePointerSlot(pointer->index());
509 }
510 }
511 }
512
513
514 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
515 int arguments,
516 int deoptimization_index) {
517 const ZoneList<LOperand*>* operands = pointers->operands();
518 Safepoint safepoint =
519 safepoints_.DefineSafepointWithRegisters(
520 masm(), arguments, deoptimization_index);
521 for (int i = 0; i < operands->length(); i++) {
522 LOperand* pointer = operands->at(i);
523 if (pointer->IsStackSlot()) {
524 safepoint.DefinePointerSlot(pointer->index());
525 } else if (pointer->IsRegister()) {
526 safepoint.DefinePointerRegister(ToRegister(pointer));
527 }
528 }
529 // Register esi always contains a pointer to the context.
530 safepoint.DefinePointerRegister(esi);
531 }
532
533
534 void LCodeGen::RecordPosition(int position) {
535 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
536 masm()->positions_recorder()->RecordPosition(position);
537 }
538
539
540 void LCodeGen::DoLabel(LLabel* label) {
541 if (label->is_loop_header()) {
542 Comment(";;; B%d - LOOP entry", label->block_id());
543 } else {
544 Comment(";;; B%d", label->block_id());
545 }
546 __ bind(label->label());
547 current_block_ = label->block_id();
548 LCodeGen::DoGap(label);
549 }
550
551
552 void LCodeGen::DoParallelMove(LParallelMove* move) {
553 // xmm0 must always be a scratch register.
554 XMMRegister xmm_scratch = xmm0;
555 LUnallocated marker_operand(LUnallocated::NONE);
556
557 Register cpu_scratch = esi;
558 bool destroys_cpu_scratch = false;
559
560 LGapResolver resolver(move->move_operands(), &marker_operand);
561 const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
562 for (int i = moves->length() - 1; i >= 0; --i) {
563 LMoveOperands move = moves->at(i);
564 LOperand* from = move.from();
565 LOperand* to = move.to();
566 ASSERT(!from->IsDoubleRegister() ||
567 !ToDoubleRegister(from).is(xmm_scratch));
568 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
569 ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
570 ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
571 if (from->IsConstantOperand()) {
572 __ mov(ToOperand(to), ToImmediate(from));
573 } else if (from == &marker_operand) {
574 if (to->IsRegister() || to->IsStackSlot()) {
575 __ mov(ToOperand(to), cpu_scratch);
576 ASSERT(destroys_cpu_scratch);
577 } else {
578 ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
579 __ movdbl(ToOperand(to), xmm_scratch);
580 }
581 } else if (to == &marker_operand) {
582 if (from->IsRegister() || from->IsStackSlot()) {
583 __ mov(cpu_scratch, ToOperand(from));
584 destroys_cpu_scratch = true;
585 } else {
586 ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
587 __ movdbl(xmm_scratch, ToOperand(from));
588 }
589 } else if (from->IsRegister()) {
590 __ mov(ToOperand(to), ToRegister(from));
591 } else if (to->IsRegister()) {
592 __ mov(ToRegister(to), ToOperand(from));
593 } else if (from->IsStackSlot()) {
594 ASSERT(to->IsStackSlot());
595 __ push(eax);
596 __ mov(eax, ToOperand(from));
597 __ mov(ToOperand(to), eax);
598 __ pop(eax);
599 } else if (from->IsDoubleRegister()) {
600 __ movdbl(ToOperand(to), ToDoubleRegister(from));
601 } else if (to->IsDoubleRegister()) {
602 __ movdbl(ToDoubleRegister(to), ToOperand(from));
603 } else {
604 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
605 __ movdbl(xmm_scratch, ToOperand(from));
606 __ movdbl(ToOperand(to), xmm_scratch);
607 }
608 }
609
610 if (destroys_cpu_scratch) {
611 __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
612 }
613 }
614
615
616 void LCodeGen::DoGap(LGap* gap) {
617 for (int i = LGap::FIRST_INNER_POSITION;
618 i <= LGap::LAST_INNER_POSITION;
619 i++) {
620 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
621 LParallelMove* move = gap->GetParallelMove(inner_pos);
622 if (move != NULL) DoParallelMove(move);
623 }
624
625 LInstruction* next = GetNextInstruction();
626 if (next != NULL && next->IsLazyBailout()) {
627 int pc = masm()->pc_offset();
628 safepoints_.SetPcAfterGap(pc);
629 }
630 }
631
632
633 void LCodeGen::DoParameter(LParameter* instr) {
634 // Nothing to do.
635 }
636
637
638 void LCodeGen::DoCallStub(LCallStub* instr) {
639 ASSERT(ToRegister(instr->result()).is(eax));
640 switch (instr->hydrogen()->major_key()) {
641 case CodeStub::RegExpConstructResult: {
642 RegExpConstructResultStub stub;
643 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
644 break;
645 }
646 case CodeStub::RegExpExec: {
647 RegExpExecStub stub;
648 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
649 break;
650 }
651 case CodeStub::SubString: {
652 SubStringStub stub;
653 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
654 break;
655 }
656 case CodeStub::StringCharAt: {
657 StringCharAtStub stub;
658 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
659 break;
660 }
661 case CodeStub::MathPow: {
662 MathPowStub stub;
663 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
664 break;
665 }
666 case CodeStub::NumberToString: {
667 NumberToStringStub stub;
668 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
669 break;
670 }
671 case CodeStub::StringAdd: {
672 StringAddStub stub(NO_STRING_ADD_FLAGS);
673 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
674 break;
675 }
676 case CodeStub::StringCompare: {
677 StringCompareStub stub;
678 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
679 break;
680 }
681 case CodeStub::TranscendentalCache: {
682 TranscendentalCacheStub stub(instr->transcendental_type());
683 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
684 break;
685 }
686 default:
687 UNREACHABLE();
688 }
689 }
690
691
692 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
693 // Nothing to do.
694 }
695
696
697 void LCodeGen::DoModI(LModI* instr) {
698 LOperand* right = instr->right();
699 ASSERT(ToRegister(instr->result()).is(edx));
700 ASSERT(ToRegister(instr->left()).is(eax));
701 ASSERT(!ToRegister(instr->right()).is(eax));
702 ASSERT(!ToRegister(instr->right()).is(edx));
703
704 Register right_reg = ToRegister(right);
705
706 // Check for x % 0.
707 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
708 __ test(right_reg, ToOperand(right));
709 DeoptimizeIf(zero, instr->environment());
710 }
711
712 // Sign extend to edx.
713 __ cdq();
714
715 // Check for (0 % -x) that will produce negative zero.
716 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
717 NearLabel positive_left;
718 NearLabel done;
719 __ test(eax, Operand(eax));
720 __ j(not_sign, &positive_left);
721 __ idiv(right_reg);
722
723 // Test the remainder for 0, because then the result would be -0.
724 __ test(edx, Operand(edx));
725 __ j(not_zero, &done);
726
727 DeoptimizeIf(no_condition, instr->environment());
728 __ bind(&positive_left);
729 __ idiv(right_reg);
730 __ bind(&done);
731 } else {
732 __ idiv(right_reg);
733 }
734 }
735
736
737 void LCodeGen::DoDivI(LDivI* instr) {
738 LOperand* right = instr->right();
739 ASSERT(ToRegister(instr->result()).is(eax));
740 ASSERT(ToRegister(instr->left()).is(eax));
741 ASSERT(!ToRegister(instr->right()).is(eax));
742 ASSERT(!ToRegister(instr->right()).is(edx));
743
744 Register left_reg = eax;
745
746 // Check for x / 0.
747 Register right_reg = ToRegister(right);
748 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
749 __ test(right_reg, ToOperand(right));
750 DeoptimizeIf(zero, instr->environment());
751 }
752
753 // Check for (0 / -x) that will produce negative zero.
754 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
755 NearLabel left_not_zero;
756 __ test(left_reg, Operand(left_reg));
757 __ j(not_zero, &left_not_zero);
758 __ test(right_reg, ToOperand(right));
759 DeoptimizeIf(sign, instr->environment());
760 __ bind(&left_not_zero);
761 }
762
763 // Check for (-kMinInt / -1).
764 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
765 NearLabel left_not_min_int;
766 __ cmp(left_reg, kMinInt);
767 __ j(not_zero, &left_not_min_int);
768 __ cmp(right_reg, -1);
769 DeoptimizeIf(zero, instr->environment());
770 __ bind(&left_not_min_int);
771 }
772
773 // Sign extend to edx.
774 __ cdq();
775 __ idiv(right_reg);
776
777 // Deoptimize if remainder is not 0.
778 __ test(edx, Operand(edx));
779 DeoptimizeIf(not_zero, instr->environment());
780 }
781
782
783 void LCodeGen::DoMulI(LMulI* instr) {
784 Register left = ToRegister(instr->left());
785 LOperand* right = instr->right();
786
787 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
788 __ mov(ToRegister(instr->temp()), left);
789 }
790
791 if (right->IsConstantOperand()) {
792 __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
793 } else {
794 __ imul(left, ToOperand(right));
795 }
796
797 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
798 DeoptimizeIf(overflow, instr->environment());
799 }
800
801 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
802 // Bail out if the result is supposed to be negative zero.
803 NearLabel done;
804 __ test(left, Operand(left));
805 __ j(not_zero, &done);
806 if (right->IsConstantOperand()) {
807 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
808 DeoptimizeIf(no_condition, instr->environment());
809 }
810 } else {
811 // Test the non-zero operand for negative sign.
812 __ or_(ToRegister(instr->temp()), ToOperand(right));
813 DeoptimizeIf(sign, instr->environment());
814 }
815 __ bind(&done);
816 }
817 }
818
819
820 void LCodeGen::DoBitI(LBitI* instr) {
821 LOperand* left = instr->left();
822 LOperand* right = instr->right();
823 ASSERT(left->Equals(instr->result()));
824 ASSERT(left->IsRegister());
825
826 if (right->IsConstantOperand()) {
827 int right_operand = ToInteger32(LConstantOperand::cast(right));
828 switch (instr->op()) {
829 case Token::BIT_AND:
830 __ and_(ToRegister(left), right_operand);
831 break;
832 case Token::BIT_OR:
833 __ or_(ToRegister(left), right_operand);
834 break;
835 case Token::BIT_XOR:
836 __ xor_(ToRegister(left), right_operand);
837 break;
838 default:
839 UNREACHABLE();
840 break;
841 }
842 } else {
843 switch (instr->op()) {
844 case Token::BIT_AND:
845 __ and_(ToRegister(left), ToOperand(right));
846 break;
847 case Token::BIT_OR:
848 __ or_(ToRegister(left), ToOperand(right));
849 break;
850 case Token::BIT_XOR:
851 __ xor_(ToRegister(left), ToOperand(right));
852 break;
853 default:
854 UNREACHABLE();
855 break;
856 }
857 }
858 }
859
860
861 void LCodeGen::DoShiftI(LShiftI* instr) {
862 LOperand* left = instr->left();
863 LOperand* right = instr->right();
864 ASSERT(left->Equals(instr->result()));
865 ASSERT(left->IsRegister());
866 if (right->IsRegister()) {
867 ASSERT(ToRegister(right).is(ecx));
868
869 switch (instr->op()) {
870 case Token::SAR:
871 __ sar_cl(ToRegister(left));
872 break;
873 case Token::SHR:
874 __ shr_cl(ToRegister(left));
875 if (instr->can_deopt()) {
876 __ test(ToRegister(left), Immediate(0x80000000));
877 DeoptimizeIf(not_zero, instr->environment());
878 }
879 break;
880 case Token::SHL:
881 __ shl_cl(ToRegister(left));
882 break;
883 default:
884 UNREACHABLE();
885 break;
886 }
887 } else {
888 int value = ToInteger32(LConstantOperand::cast(right));
889 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
890 switch (instr->op()) {
891 case Token::SAR:
892 if (shift_count != 0) {
893 __ sar(ToRegister(left), shift_count);
894 }
895 break;
896 case Token::SHR:
897 if (shift_count == 0 && instr->can_deopt()) {
898 __ test(ToRegister(left), Immediate(0x80000000));
899 DeoptimizeIf(not_zero, instr->environment());
900 } else {
901 __ shr(ToRegister(left), shift_count);
902 }
903 break;
904 case Token::SHL:
905 if (shift_count != 0) {
906 __ shl(ToRegister(left), shift_count);
907 }
908 break;
909 default:
910 UNREACHABLE();
911 break;
912 }
913 }
914 }
915
916
917 void LCodeGen::DoSubI(LSubI* instr) {
918 LOperand* left = instr->left();
919 LOperand* right = instr->right();
920 ASSERT(left->Equals(instr->result()));
921
922 if (right->IsConstantOperand()) {
923 __ sub(ToOperand(left), ToImmediate(right));
924 } else {
925 __ sub(ToRegister(left), ToOperand(right));
926 }
927 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
928 DeoptimizeIf(overflow, instr->environment());
929 }
930 }
931
932
933 void LCodeGen::DoConstantI(LConstantI* instr) {
934 ASSERT(instr->result()->IsRegister());
935 __ mov(ToRegister(instr->result()), instr->value());
936 }
937
938
939 void LCodeGen::DoConstantD(LConstantD* instr) {
940 ASSERT(instr->result()->IsDoubleRegister());
941 XMMRegister res = ToDoubleRegister(instr->result());
942 double v = instr->value();
943 // Use xor to produce +0.0 in a fast and compact way, but avoid to
944 // do so if the constant is -0.0.
945 if (BitCast<uint64_t, double>(v) == 0) {
946 __ xorpd(res, res);
947 } else {
948 int32_t v_int32 = static_cast<int32_t>(v);
949 if (static_cast<double>(v_int32) == v) {
950 __ push_imm32(v_int32);
951 __ cvtsi2sd(res, Operand(esp, 0));
952 __ add(Operand(esp), Immediate(kPointerSize));
953 } else {
954 uint64_t int_val = BitCast<uint64_t, double>(v);
955 int32_t lower = static_cast<int32_t>(int_val);
956 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
957 __ push_imm32(upper);
958 __ push_imm32(lower);
959 __ movdbl(res, Operand(esp, 0));
960 __ add(Operand(esp), Immediate(2 * kPointerSize));
961 }
962 }
963 }
964
965
966 void LCodeGen::DoConstantT(LConstantT* instr) {
967 ASSERT(instr->result()->IsRegister());
968 __ mov(ToRegister(instr->result()), Immediate(instr->value()));
969 }
970
971
972 void LCodeGen::DoArrayLength(LArrayLength* instr) {
973 Register result = ToRegister(instr->result());
974
975 if (instr->hydrogen()->value()->IsLoadElements()) {
976 // We load the length directly from the elements array.
977 Register elements = ToRegister(instr->input());
978 __ mov(result, FieldOperand(elements, FixedArray::kLengthOffset));
979 } else {
980 // Check that the receiver really is an array.
981 Register array = ToRegister(instr->input());
982 Register temporary = ToRegister(instr->temporary());
983 __ CmpObjectType(array, JS_ARRAY_TYPE, temporary);
984 DeoptimizeIf(not_equal, instr->environment());
985
986 // Load length directly from the array.
987 __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
988 }
989 }
990
991
992 void LCodeGen::DoValueOf(LValueOf* instr) {
993 Register input = ToRegister(instr->input());
994 Register result = ToRegister(instr->result());
995 Register map = ToRegister(instr->temporary());
996 ASSERT(input.is(result));
997 NearLabel done;
998 // If the object is a smi return the object.
999 __ test(input, Immediate(kSmiTagMask));
1000 __ j(zero, &done);
1001
1002 // If the object is not a value type, return the object.
1003 __ CmpObjectType(input, JS_VALUE_TYPE, map);
1004 __ j(not_equal, &done);
1005 __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1006
1007 __ bind(&done);
1008 }
1009
1010
1011 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1012 LOperand* input = instr->input();
1013 ASSERT(input->Equals(instr->result()));
1014 __ not_(ToRegister(input));
1015 }
1016
1017
1018 void LCodeGen::DoThrow(LThrow* instr) {
1019 __ push(ToOperand(instr->input()));
1020 CallRuntime(Runtime::kThrow, 1, instr);
1021
1022 if (FLAG_debug_code) {
1023 Comment("Unreachable code.");
1024 __ int3();
1025 }
1026 }
1027
1028
1029 void LCodeGen::DoAddI(LAddI* instr) {
1030 LOperand* left = instr->left();
1031 LOperand* right = instr->right();
1032 ASSERT(left->Equals(instr->result()));
1033
1034 if (right->IsConstantOperand()) {
1035 __ add(ToOperand(left), ToImmediate(right));
1036 } else {
1037 __ add(ToRegister(left), ToOperand(right));
1038 }
1039
1040 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1041 DeoptimizeIf(overflow, instr->environment());
1042 }
1043 }
1044
1045
1046 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1047 LOperand* left = instr->left();
1048 LOperand* right = instr->right();
1049 // Modulo uses a fixed result register.
1050 ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
1051 switch (instr->op()) {
1052 case Token::ADD:
1053 __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
1054 break;
1055 case Token::SUB:
1056 __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
1057 break;
1058 case Token::MUL:
1059 __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
1060 break;
1061 case Token::DIV:
1062 __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
1063 break;
1064 case Token::MOD: {
1065 // Pass two doubles as arguments on the stack.
1066 __ PrepareCallCFunction(4, eax);
1067 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
1068 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
1069 __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
1070
1071 // Return value is in st(0) on ia32.
1072 // Store it into the (fixed) result register.
1073 __ sub(Operand(esp), Immediate(kDoubleSize));
1074 __ fstp_d(Operand(esp, 0));
1075 __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
1076 __ add(Operand(esp), Immediate(kDoubleSize));
1077 break;
1078 }
1079 default:
1080 UNREACHABLE();
1081 break;
1082 }
1083 }
1084
1085
1086 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1087 ASSERT(ToRegister(instr->left()).is(edx));
1088 ASSERT(ToRegister(instr->right()).is(eax));
1089 ASSERT(ToRegister(instr->result()).is(eax));
1090
1091 TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1092 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1093 }
1094
1095
1096 int LCodeGen::GetNextEmittedBlock(int block) {
1097 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1098 LLabel* label = chunk_->GetLabel(i);
1099 if (!label->HasReplacement()) return i;
1100 }
1101 return -1;
1102 }
1103
1104
1105 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1106 int next_block = GetNextEmittedBlock(current_block_);
1107 right_block = chunk_->LookupDestination(right_block);
1108 left_block = chunk_->LookupDestination(left_block);
1109
1110 if (right_block == left_block) {
1111 EmitGoto(left_block);
1112 } else if (left_block == next_block) {
1113 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1114 } else if (right_block == next_block) {
1115 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1116 } else {
1117 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1118 __ jmp(chunk_->GetAssemblyLabel(right_block));
1119 }
1120 }
1121
1122
1123 void LCodeGen::DoBranch(LBranch* instr) {
1124 int true_block = chunk_->LookupDestination(instr->true_block_id());
1125 int false_block = chunk_->LookupDestination(instr->false_block_id());
1126
1127 Representation r = instr->hydrogen()->representation();
1128 if (r.IsInteger32()) {
1129 Register reg = ToRegister(instr->input());
1130 __ test(reg, Operand(reg));
1131 EmitBranch(true_block, false_block, not_zero);
1132 } else if (r.IsDouble()) {
1133 XMMRegister reg = ToDoubleRegister(instr->input());
1134 __ xorpd(xmm0, xmm0);
1135 __ ucomisd(reg, xmm0);
1136 EmitBranch(true_block, false_block, not_equal);
1137 } else {
1138 ASSERT(r.IsTagged());
1139 Register reg = ToRegister(instr->input());
1140 if (instr->hydrogen()->type().IsBoolean()) {
1141 __ cmp(reg, FACTORY->true_value());
1142 EmitBranch(true_block, false_block, equal);
1143 } else {
1144 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1145 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1146
1147 __ cmp(reg, FACTORY->undefined_value());
1148 __ j(equal, false_label);
1149 __ cmp(reg, FACTORY->true_value());
1150 __ j(equal, true_label);
1151 __ cmp(reg, FACTORY->false_value());
1152 __ j(equal, false_label);
1153 __ test(reg, Operand(reg));
1154 __ j(equal, false_label);
1155 __ test(reg, Immediate(kSmiTagMask));
1156 __ j(zero, true_label);
1157
1158 // Test for double values. Zero is false.
1159 NearLabel call_stub;
1160 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1161 FACTORY->heap_number_map());
1162 __ j(not_equal, &call_stub);
1163 __ fldz();
1164 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
1165 __ FCmp();
1166 __ j(zero, false_label);
1167 __ jmp(true_label);
1168
1169 // The conversion stub doesn't cause garbage collections so it's
1170 // safe to not record a safepoint after the call.
1171 __ bind(&call_stub);
1172 ToBooleanStub stub;
1173 __ pushad();
1174 __ push(reg);
1175 __ CallStub(&stub);
1176 __ test(eax, Operand(eax));
1177 __ popad();
1178 EmitBranch(true_block, false_block, not_zero);
1179 }
1180 }
1181 }
1182
1183
1184 void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1185 block = chunk_->LookupDestination(block);
1186 int next_block = GetNextEmittedBlock(current_block_);
1187 if (block != next_block) {
1188 // Perform stack overflow check if this goto needs it before jumping.
1189 if (deferred_stack_check != NULL) {
1190 ExternalReference stack_limit =
1191 ExternalReference::address_of_stack_limit();
1192 __ cmp(esp, Operand::StaticVariable(stack_limit));
1193 __ j(above_equal, chunk_->GetAssemblyLabel(block));
1194 __ jmp(deferred_stack_check->entry());
1195 deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1196 } else {
1197 __ jmp(chunk_->GetAssemblyLabel(block));
1198 }
1199 }
1200 }
1201
1202
1203 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1204 __ pushad();
1205 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
1206 RecordSafepointWithRegisters(
1207 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
1208 __ popad();
1209 }
1210
1211 void LCodeGen::DoGoto(LGoto* instr) {
1212 class DeferredStackCheck: public LDeferredCode {
1213 public:
1214 DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1215 : LDeferredCode(codegen), instr_(instr) { }
1216 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1217 private:
1218 LGoto* instr_;
1219 };
1220
1221 DeferredStackCheck* deferred = NULL;
1222 if (instr->include_stack_check()) {
1223 deferred = new DeferredStackCheck(this, instr);
1224 }
1225 EmitGoto(instr->block_id(), deferred);
1226 }
1227
1228
1229 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1230 Condition cond = no_condition;
1231 switch (op) {
1232 case Token::EQ:
1233 case Token::EQ_STRICT:
1234 cond = equal;
1235 break;
1236 case Token::LT:
1237 cond = is_unsigned ? below : less;
1238 break;
1239 case Token::GT:
1240 cond = is_unsigned ? above : greater;
1241 break;
1242 case Token::LTE:
1243 cond = is_unsigned ? below_equal : less_equal;
1244 break;
1245 case Token::GTE:
1246 cond = is_unsigned ? above_equal : greater_equal;
1247 break;
1248 case Token::IN:
1249 case Token::INSTANCEOF:
1250 default:
1251 UNREACHABLE();
1252 }
1253 return cond;
1254 }
1255
1256
1257 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1258 if (right->IsConstantOperand()) {
1259 __ cmp(ToOperand(left), ToImmediate(right));
1260 } else {
1261 __ cmp(ToRegister(left), ToOperand(right));
1262 }
1263 }
1264
1265
1266 void LCodeGen::DoCmpID(LCmpID* instr) {
1267 LOperand* left = instr->left();
1268 LOperand* right = instr->right();
1269 LOperand* result = instr->result();
1270
1271 NearLabel unordered;
1272 if (instr->is_double()) {
1273 // Don't base result on EFLAGS when a NaN is involved. Instead
1274 // jump to the unordered case, which produces a false value.
1275 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1276 __ j(parity_even, &unordered, not_taken);
1277 } else {
1278 EmitCmpI(left, right);
1279 }
1280
1281 NearLabel done;
1282 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1283 __ mov(ToRegister(result), Handle<Object>(HEAP->true_value()));
1284 __ j(cc, &done);
1285
1286 __ bind(&unordered);
1287 __ mov(ToRegister(result), Handle<Object>(HEAP->false_value()));
1288 __ bind(&done);
1289 }
1290
1291
1292 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1293 LOperand* left = instr->left();
1294 LOperand* right = instr->right();
1295 int false_block = chunk_->LookupDestination(instr->false_block_id());
1296 int true_block = chunk_->LookupDestination(instr->true_block_id());
1297
1298 if (instr->is_double()) {
1299 // Don't base result on EFLAGS when a NaN is involved. Instead
1300 // jump to the false block.
1301 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1302 __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1303 } else {
1304 EmitCmpI(left, right);
1305 }
1306
1307 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1308 EmitBranch(true_block, false_block, cc);
1309 }
1310
1311
1312 void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1313 Register left = ToRegister(instr->left());
1314 Register right = ToRegister(instr->right());
1315 Register result = ToRegister(instr->result());
1316
1317 __ cmp(left, Operand(right));
1318 __ mov(result, Handle<Object>(HEAP->true_value()));
1319 NearLabel done;
1320 __ j(equal, &done);
1321 __ mov(result, Handle<Object>(HEAP->false_value()));
1322 __ bind(&done);
1323 }
1324
1325
1326 void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1327 Register left = ToRegister(instr->left());
1328 Register right = ToRegister(instr->right());
1329 int false_block = chunk_->LookupDestination(instr->false_block_id());
1330 int true_block = chunk_->LookupDestination(instr->true_block_id());
1331
1332 __ cmp(left, Operand(right));
1333 EmitBranch(true_block, false_block, equal);
1334 }
1335
1336
1337 void LCodeGen::DoIsNull(LIsNull* instr) {
1338 Register reg = ToRegister(instr->input());
1339 Register result = ToRegister(instr->result());
1340
1341 // TODO(fsc): If the expression is known to be a smi, then it's
1342 // definitely not null. Materialize false.
1343
1344 __ cmp(reg, FACTORY->null_value());
1345 if (instr->is_strict()) {
1346 __ mov(result, Handle<Object>(HEAP->true_value()));
1347 NearLabel done;
1348 __ j(equal, &done);
1349 __ mov(result, Handle<Object>(HEAP->false_value()));
1350 __ bind(&done);
1351 } else {
1352 NearLabel true_value, false_value, done;
1353 __ j(equal, &true_value);
1354 __ cmp(reg, FACTORY->undefined_value());
1355 __ j(equal, &true_value);
1356 __ test(reg, Immediate(kSmiTagMask));
1357 __ j(zero, &false_value);
1358 // Check for undetectable objects by looking in the bit field in
1359 // the map. The object has already been smi checked.
1360 Register scratch = result;
1361 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1362 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1363 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1364 __ j(not_zero, &true_value);
1365 __ bind(&false_value);
1366 __ mov(result, Handle<Object>(HEAP->false_value()));
1367 __ jmp(&done);
1368 __ bind(&true_value);
1369 __ mov(result, Handle<Object>(HEAP->true_value()));
1370 __ bind(&done);
1371 }
1372 }
1373
1374
1375 void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1376 Register reg = ToRegister(instr->input());
1377
1378 // TODO(fsc): If the expression is known to be a smi, then it's
1379 // definitely not null. Jump to the false block.
1380
1381 int true_block = chunk_->LookupDestination(instr->true_block_id());
1382 int false_block = chunk_->LookupDestination(instr->false_block_id());
1383
1384 __ cmp(reg, FACTORY->null_value());
1385 if (instr->is_strict()) {
1386 EmitBranch(true_block, false_block, equal);
1387 } else {
1388 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1389 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1390 __ j(equal, true_label);
1391 __ cmp(reg, FACTORY->undefined_value());
1392 __ j(equal, true_label);
1393 __ test(reg, Immediate(kSmiTagMask));
1394 __ j(zero, false_label);
1395 // Check for undetectable objects by looking in the bit field in
1396 // the map. The object has already been smi checked.
1397 Register scratch = ToRegister(instr->temp());
1398 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1399 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1400 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1401 EmitBranch(true_block, false_block, not_zero);
1402 }
1403 }
1404
1405
1406 void LCodeGen::DoIsSmi(LIsSmi* instr) {
1407 Operand input = ToOperand(instr->input());
1408 Register result = ToRegister(instr->result());
1409
1410 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1411 __ test(input, Immediate(kSmiTagMask));
1412 __ mov(result, Handle<Object>(HEAP->true_value()));
1413 NearLabel done;
1414 __ j(zero, &done);
1415 __ mov(result, Handle<Object>(HEAP->false_value()));
1416 __ bind(&done);
1417 }
1418
1419
1420 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1421 Operand input = ToOperand(instr->input());
1422
1423 int true_block = chunk_->LookupDestination(instr->true_block_id());
1424 int false_block = chunk_->LookupDestination(instr->false_block_id());
1425
1426 __ test(input, Immediate(kSmiTagMask));
1427 EmitBranch(true_block, false_block, zero);
1428 }
1429
1430
1431 InstanceType LHasInstanceType::TestType() {
1432 InstanceType from = hydrogen()->from();
1433 InstanceType to = hydrogen()->to();
1434 if (from == FIRST_TYPE) return to;
1435 ASSERT(from == to || to == LAST_TYPE);
1436 return from;
1437 }
1438
1439
1440
1441 Condition LHasInstanceType::BranchCondition() {
1442 InstanceType from = hydrogen()->from();
1443 InstanceType to = hydrogen()->to();
1444 if (from == to) return equal;
1445 if (to == LAST_TYPE) return above_equal;
1446 if (from == FIRST_TYPE) return below_equal;
1447 UNREACHABLE();
1448 return equal;
1449 }
1450
1451
1452 void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1453 Register input = ToRegister(instr->input());
1454 Register result = ToRegister(instr->result());
1455
1456 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1457 __ test(input, Immediate(kSmiTagMask));
1458 NearLabel done, is_false;
1459 __ j(zero, &is_false);
1460 __ CmpObjectType(input, instr->TestType(), result);
1461 __ j(NegateCondition(instr->BranchCondition()), &is_false);
1462 __ mov(result, Handle<Object>(HEAP->true_value()));
1463 __ jmp(&done);
1464 __ bind(&is_false);
1465 __ mov(result, Handle<Object>(HEAP->false_value()));
1466 __ bind(&done);
1467 }
1468
1469
1470 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1471 Register input = ToRegister(instr->input());
1472 Register temp = ToRegister(instr->temp());
1473
1474 int true_block = chunk_->LookupDestination(instr->true_block_id());
1475 int false_block = chunk_->LookupDestination(instr->false_block_id());
1476
1477 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1478
1479 __ test(input, Immediate(kSmiTagMask));
1480 __ j(zero, false_label);
1481
1482 __ CmpObjectType(input, instr->TestType(), temp);
1483 EmitBranch(true_block, false_block, instr->BranchCondition());
1484 }
1485
1486
1487 void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1488 Register input = ToRegister(instr->input());
1489 Register result = ToRegister(instr->result());
1490
1491 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1492 __ mov(result, Handle<Object>(HEAP->true_value()));
1493 __ test(FieldOperand(input, String::kHashFieldOffset),
1494 Immediate(String::kContainsCachedArrayIndexMask));
1495 NearLabel done;
1496 __ j(not_zero, &done);
1497 __ mov(result, Handle<Object>(HEAP->false_value()));
1498 __ bind(&done);
1499 }
1500
1501
1502 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1503 LHasCachedArrayIndexAndBranch* instr) {
1504 Register input = ToRegister(instr->input());
1505
1506 int true_block = chunk_->LookupDestination(instr->true_block_id());
1507 int false_block = chunk_->LookupDestination(instr->false_block_id());
1508
1509 __ test(FieldOperand(input, String::kHashFieldOffset),
1510 Immediate(String::kContainsCachedArrayIndexMask));
1511 EmitBranch(true_block, false_block, not_equal);
1512 }
1513
1514
1515 // Branches to a label or falls through with the answer in the z flag. Trashes
1516 // the temp registers, but not the input. Only input and temp2 may alias.
1517 void LCodeGen::EmitClassOfTest(Label* is_true,
1518 Label* is_false,
1519 Handle<String>class_name,
1520 Register input,
1521 Register temp,
1522 Register temp2) {
1523 ASSERT(!input.is(temp));
1524 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1525 __ test(input, Immediate(kSmiTagMask));
1526 __ j(zero, is_false);
1527 __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
1528 __ j(below, is_false);
1529
1530 // Map is now in temp.
1531 // Functions have class 'Function'.
1532 __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
1533 if (class_name->IsEqualTo(CStrVector("Function"))) {
1534 __ j(equal, is_true);
1535 } else {
1536 __ j(equal, is_false);
1537 }
1538
1539 // Check if the constructor in the map is a function.
1540 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
1541
1542 // As long as JS_FUNCTION_TYPE is the last instance type and it is
1543 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
1544 // LAST_JS_OBJECT_TYPE.
1545 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1546 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1547
1548 // Objects with a non-function constructor have class 'Object'.
1549 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
1550 if (class_name->IsEqualTo(CStrVector("Object"))) {
1551 __ j(not_equal, is_true);
1552 } else {
1553 __ j(not_equal, is_false);
1554 }
1555
1556 // temp now contains the constructor function. Grab the
1557 // instance class name from there.
1558 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1559 __ mov(temp, FieldOperand(temp,
1560 SharedFunctionInfo::kInstanceClassNameOffset));
1561 // The class name we are testing against is a symbol because it's a literal.
1562 // The name in the constructor is a symbol because of the way the context is
1563 // booted. This routine isn't expected to work for random API-created
1564 // classes and it doesn't have to because you can't access it with natives
1565 // syntax. Since both sides are symbols it is sufficient to use an identity
1566 // comparison.
1567 __ cmp(temp, class_name);
1568 // End with the answer in the z flag.
1569 }
1570
1571
1572 void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
1573 Register input = ToRegister(instr->input());
1574 Register result = ToRegister(instr->result());
1575 ASSERT(input.is(result));
1576 Register temp = ToRegister(instr->temporary());
1577 Handle<String> class_name = instr->hydrogen()->class_name();
1578 NearLabel done;
1579 Label is_true, is_false;
1580
1581 EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
1582
1583 __ j(not_equal, &is_false);
1584
1585 __ bind(&is_true);
1586 __ mov(result, Handle<Object>(HEAP->true_value()));
1587 __ jmp(&done);
1588
1589 __ bind(&is_false);
1590 __ mov(result, Handle<Object>(HEAP->false_value()));
1591 __ bind(&done);
1592 }
1593
1594
1595 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1596 Register input = ToRegister(instr->input());
1597 Register temp = ToRegister(instr->temporary());
1598 Register temp2 = ToRegister(instr->temporary2());
1599 if (input.is(temp)) {
1600 // Swap.
1601 Register swapper = temp;
1602 temp = temp2;
1603 temp2 = swapper;
1604 }
1605 Handle<String> class_name = instr->hydrogen()->class_name();
1606
1607 int true_block = chunk_->LookupDestination(instr->true_block_id());
1608 int false_block = chunk_->LookupDestination(instr->false_block_id());
1609
1610 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1611 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1612
1613 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1614
1615 EmitBranch(true_block, false_block, equal);
1616 }
1617
1618
1619 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1620 Register reg = ToRegister(instr->input());
1621 int true_block = instr->true_block_id();
1622 int false_block = instr->false_block_id();
1623
1624 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1625 EmitBranch(true_block, false_block, equal);
1626 }
1627
1628
1629 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1630 InstanceofStub stub;
1631 __ push(ToOperand(instr->left()));
1632 __ push(ToOperand(instr->right()));
1633 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1634
1635 NearLabel true_value, done;
1636 __ test(eax, Operand(eax));
1637 __ j(zero, &true_value);
1638 __ mov(ToRegister(instr->result()), FACTORY->false_value());
1639 __ jmp(&done);
1640 __ bind(&true_value);
1641 __ mov(ToRegister(instr->result()), FACTORY->true_value());
1642 __ bind(&done);
1643 }
1644
1645
1646 void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
1647 int true_block = chunk_->LookupDestination(instr->true_block_id());
1648 int false_block = chunk_->LookupDestination(instr->false_block_id());
1649
1650 InstanceofStub stub;
1651 __ push(ToOperand(instr->left()));
1652 __ push(ToOperand(instr->right()));
1653 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1654 __ test(eax, Operand(eax));
1655 EmitBranch(true_block, false_block, zero);
1656 }
1657
1658
1659 static Condition ComputeCompareCondition(Token::Value op) {
1660 switch (op) {
1661 case Token::EQ_STRICT:
1662 case Token::EQ:
1663 return equal;
1664 case Token::LT:
1665 return less;
1666 case Token::GT:
1667 return greater;
1668 case Token::LTE:
1669 return less_equal;
1670 case Token::GTE:
1671 return greater_equal;
1672 default:
1673 UNREACHABLE();
1674 return no_condition;
1675 }
1676 }
1677
1678
1679 void LCodeGen::DoCmpT(LCmpT* instr) {
1680 Token::Value op = instr->op();
1681
1682 Handle<Code> ic = CompareIC::GetUninitialized(op);
1683 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1684
1685 Condition condition = ComputeCompareCondition(op);
1686 if (op == Token::GT || op == Token::LTE) {
1687 condition = ReverseCondition(condition);
1688 }
1689 NearLabel true_value, done;
1690 __ test(eax, Operand(eax));
1691 __ j(condition, &true_value);
1692 __ mov(ToRegister(instr->result()), FACTORY->false_value());
1693 __ jmp(&done);
1694 __ bind(&true_value);
1695 __ mov(ToRegister(instr->result()), FACTORY->true_value());
1696 __ bind(&done);
1697 }
1698
1699
1700 void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
1701 Token::Value op = instr->op();
1702 int true_block = chunk_->LookupDestination(instr->true_block_id());
1703 int false_block = chunk_->LookupDestination(instr->false_block_id());
1704
1705 Handle<Code> ic = CompareIC::GetUninitialized(op);
1706 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1707
1708 // The compare stub expects compare condition and the input operands
1709 // reversed for GT and LTE.
1710 Condition condition = ComputeCompareCondition(op);
1711 if (op == Token::GT || op == Token::LTE) {
1712 condition = ReverseCondition(condition);
1713 }
1714 __ test(eax, Operand(eax));
1715 EmitBranch(true_block, false_block, condition);
1716 }
1717
1718
1719 void LCodeGen::DoReturn(LReturn* instr) {
1720 if (FLAG_trace) {
1721 // Preserve the return value on the stack and rely on the runtime
1722 // call to return the value in the same register.
1723 __ push(eax);
1724 __ CallRuntime(Runtime::kTraceExit, 1);
1725 }
1726 __ mov(esp, ebp);
1727 __ pop(ebp);
1728 __ ret((ParameterCount() + 1) * kPointerSize);
1729 }
1730
1731
1732 void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
1733 Register result = ToRegister(instr->result());
1734 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
1735 if (instr->hydrogen()->check_hole_value()) {
1736 __ cmp(result, FACTORY->the_hole_value());
1737 DeoptimizeIf(equal, instr->environment());
1738 }
1739 }
1740
1741
1742 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
1743 Register value = ToRegister(instr->input());
1744 __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
1745 }
1746
1747
1748 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
1749 Register object = ToRegister(instr->input());
1750 Register result = ToRegister(instr->result());
1751 if (instr->hydrogen()->is_in_object()) {
1752 __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
1753 } else {
1754 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
1755 __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
1756 }
1757 }
1758
1759
1760 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
1761 ASSERT(ToRegister(instr->object()).is(eax));
1762 ASSERT(ToRegister(instr->result()).is(eax));
1763
1764 __ mov(ecx, instr->name());
1765 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
1766 Builtins::LoadIC_Initialize));
1767 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1768 }
1769
1770
1771 void LCodeGen::DoLoadElements(LLoadElements* instr) {
1772 ASSERT(instr->result()->Equals(instr->input()));
1773 Register reg = ToRegister(instr->input());
1774 __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
1775 if (FLAG_debug_code) {
1776 NearLabel done;
1777 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1778 Immediate(FACTORY->fixed_array_map()));
1779 __ j(equal, &done);
1780 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1781 Immediate(FACTORY->fixed_cow_array_map()));
1782 __ Check(equal, "Check for fast elements failed.");
1783 __ bind(&done);
1784 }
1785 }
1786
1787
1788 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1789 Register arguments = ToRegister(instr->arguments());
1790 Register length = ToRegister(instr->length());
1791 Operand index = ToOperand(instr->index());
1792 Register result = ToRegister(instr->result());
1793
1794 __ sub(length, index);
1795 DeoptimizeIf(below_equal, instr->environment());
1796
1797 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
1798 }
1799
1800
1801 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
1802 Register elements = ToRegister(instr->elements());
1803 Register key = ToRegister(instr->key());
1804 Register result;
1805 if (instr->load_result() != NULL) {
1806 result = ToRegister(instr->load_result());
1807 } else {
1808 result = ToRegister(instr->result());
1809 ASSERT(result.is(elements));
1810 }
1811
1812 // Load the result.
1813 __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
1814
1815 Representation r = instr->hydrogen()->representation();
1816 if (r.IsInteger32()) {
1817 // Untag and check for smi.
1818 __ SmiUntag(result);
1819 DeoptimizeIf(carry, instr->environment());
1820 } else if (r.IsDouble()) {
1821 EmitNumberUntagD(result,
1822 ToDoubleRegister(instr->result()),
1823 instr->environment());
1824 } else {
1825 // Check for the hole value.
1826 ASSERT(r.IsTagged());
1827 __ cmp(result, FACTORY->the_hole_value());
1828 DeoptimizeIf(equal, instr->environment());
1829 }
1830 }
1831
1832
1833 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
1834 ASSERT(ToRegister(instr->object()).is(edx));
1835 ASSERT(ToRegister(instr->key()).is(eax));
1836
1837 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
1838 Builtins::KeyedLoadIC_Initialize));
1839 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1840 }
1841
1842
1843 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1844 Register result = ToRegister(instr->result());
1845
1846 // Check for arguments adapter frame.
1847 Label done, adapted;
1848 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1849 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
1850 __ cmp(Operand(result),
1851 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1852 __ j(equal, &adapted);
1853
1854 // No arguments adaptor frame.
1855 __ mov(result, Operand(ebp));
1856 __ jmp(&done);
1857
1858 // Arguments adaptor frame present.
1859 __ bind(&adapted);
1860 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1861
1862 // Done. Pointer to topmost argument is in result.
1863 __ bind(&done);
1864 }
1865
1866
1867 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1868 Operand elem = ToOperand(instr->input());
1869 Register result = ToRegister(instr->result());
1870
1871 Label done;
1872
1873 // No arguments adaptor frame. Number of arguments is fixed.
1874 __ cmp(ebp, elem);
1875 __ mov(result, Immediate(scope()->num_parameters()));
1876 __ j(equal, &done);
1877
1878 // Arguments adaptor frame present. Get argument length from there.
1879 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1880 __ mov(result, Operand(result,
1881 ArgumentsAdaptorFrameConstants::kLengthOffset));
1882 __ SmiUntag(result);
1883
1884 // Done. Argument length is in result register.
1885 __ bind(&done);
1886 }
1887
1888
1889 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1890 Register receiver = ToRegister(instr->receiver());
1891 ASSERT(ToRegister(instr->function()).is(edi));
1892 ASSERT(ToRegister(instr->result()).is(eax));
1893
1894 // If the receiver is null or undefined, we have to pass the
1895 // global object as a receiver.
1896 NearLabel global_receiver, receiver_ok;
1897 __ cmp(receiver, FACTORY->null_value());
1898 __ j(equal, &global_receiver);
1899 __ cmp(receiver, FACTORY->undefined_value());
1900 __ j(not_equal, &receiver_ok);
1901 __ bind(&global_receiver);
1902 __ mov(receiver, GlobalObjectOperand());
1903 __ bind(&receiver_ok);
1904
1905 Register length = ToRegister(instr->length());
1906 Register elements = ToRegister(instr->elements());
1907
1908 Label invoke;
1909
1910 // Copy the arguments to this function possibly from the
1911 // adaptor frame below it.
1912 const uint32_t kArgumentsLimit = 1 * KB;
1913 __ cmp(length, kArgumentsLimit);
1914 DeoptimizeIf(above, instr->environment());
1915
1916 __ push(receiver);
1917 __ mov(receiver, length);
1918
1919 // Loop through the arguments pushing them onto the execution
1920 // stack.
1921 Label loop;
1922 // length is a small non-negative integer, due to the test above.
1923 __ test(length, Operand(length));
1924 __ j(zero, &invoke);
1925 __ bind(&loop);
1926 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
1927 __ dec(length);
1928 __ j(not_zero, &loop);
1929
1930 // Invoke the function.
1931 __ bind(&invoke);
1932 ASSERT(receiver.is(eax));
1933 v8::internal::ParameterCount actual(eax);
1934 SafepointGenerator safepoint_generator(this,
1935 instr->pointer_map(),
1936 Safepoint::kNoDeoptimizationIndex);
1937 __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
1938 }
1939
1940
1941 void LCodeGen::DoPushArgument(LPushArgument* instr) {
1942 LOperand* argument = instr->input();
1943 if (argument->IsConstantOperand()) {
1944 __ push(ToImmediate(argument));
1945 } else {
1946 __ push(ToOperand(argument));
1947 }
1948 }
1949
1950
1951 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
1952 Register result = ToRegister(instr->result());
1953 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
1954 }
1955
1956
1957 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
1958 Register result = ToRegister(instr->result());
1959 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
1960 __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
1961 }
1962
1963
1964 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1965 int arity,
1966 LInstruction* instr) {
1967 // Change context if needed.
1968 bool change_context =
1969 (graph()->info()->closure()->context() != function->context()) ||
1970 scope()->contains_with() ||
1971 (scope()->num_heap_slots() > 0);
1972 if (change_context) {
1973 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
1974 }
1975
1976 // Set eax to arguments count if adaption is not needed. Assumes that eax
1977 // is available to write to at this point.
1978 if (!function->NeedsArgumentsAdaption()) {
1979 __ mov(eax, arity);
1980 }
1981
1982 LPointerMap* pointers = instr->pointer_map();
1983 RecordPosition(pointers->position());
1984
1985 // Invoke function.
1986 if (*function == *graph()->info()->closure()) {
1987 __ CallSelf();
1988 } else {
1989 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
1990 }
1991
1992 // Setup deoptimization.
1993 RegisterLazyDeoptimization(instr);
1994
1995 // Restore context.
1996 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
1997 }
1998
1999
2000 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2001 ASSERT(ToRegister(instr->result()).is(eax));
2002 __ mov(edi, instr->function());
2003 CallKnownFunction(instr->function(), instr->arity(), instr);
2004 }
2005
2006
2007 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2008 Register input_reg = ToRegister(instr->input());
2009 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2010 FACTORY->heap_number_map());
2011 DeoptimizeIf(not_equal, instr->environment());
2012
2013 Label done;
2014 Register tmp = input_reg.is(eax) ? ecx : eax;
2015 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
2016
2017 // Preserve the value of all registers.
2018 __ PushSafepointRegisters();
2019
2020 Label negative;
2021 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2022 // Check the sign of the argument. If the argument is positive,
2023 // just return it.
2024 __ test(tmp, Immediate(HeapNumber::kSignMask));
2025 __ j(not_zero, &negative);
2026 __ mov(tmp, input_reg);
2027 __ jmp(&done);
2028
2029 __ bind(&negative);
2030
2031 Label allocated, slow;
2032 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
2033 __ jmp(&allocated);
2034
2035 // Slow case: Call the runtime system to do the number allocation.
2036 __ bind(&slow);
2037
2038 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2039 RecordSafepointWithRegisters(
2040 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2041 // Set the pointer to the new heap number in tmp.
2042 if (!tmp.is(eax)) __ mov(tmp, eax);
2043
2044 // Restore input_reg after call to runtime.
2045 __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
2046
2047 __ bind(&allocated);
2048 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2049 __ and_(tmp2, ~HeapNumber::kSignMask);
2050 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
2051 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
2052 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
2053
2054 __ bind(&done);
2055 __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
2056
2057 __ PopSafepointRegisters();
2058 }
2059
2060
2061 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2062 // Class for deferred case.
2063 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2064 public:
2065 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2066 LUnaryMathOperation* instr)
2067 : LDeferredCode(codegen), instr_(instr) { }
2068 virtual void Generate() {
2069 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2070 }
2071 private:
2072 LUnaryMathOperation* instr_;
2073 };
2074
2075 ASSERT(instr->input()->Equals(instr->result()));
2076 Representation r = instr->hydrogen()->value()->representation();
2077
2078 if (r.IsDouble()) {
2079 XMMRegister scratch = xmm0;
2080 XMMRegister input_reg = ToDoubleRegister(instr->input());
2081 __ pxor(scratch, scratch);
2082 __ subsd(scratch, input_reg);
2083 __ pand(input_reg, scratch);
2084 } else if (r.IsInteger32()) {
2085 Register input_reg = ToRegister(instr->input());
2086 __ test(input_reg, Operand(input_reg));
2087 Label is_positive;
2088 __ j(not_sign, &is_positive);
2089 __ neg(input_reg);
2090 __ test(input_reg, Operand(input_reg));
2091 DeoptimizeIf(negative, instr->environment());
2092 __ bind(&is_positive);
2093 } else { // Tagged case.
2094 DeferredMathAbsTaggedHeapNumber* deferred =
2095 new DeferredMathAbsTaggedHeapNumber(this, instr);
2096 Label not_smi;
2097 Register input_reg = ToRegister(instr->input());
2098 // Smi check.
2099 __ test(input_reg, Immediate(kSmiTagMask));
2100 __ j(not_zero, deferred->entry());
2101 __ test(input_reg, Operand(input_reg));
2102 Label is_positive;
2103 __ j(not_sign, &is_positive);
2104 __ neg(input_reg);
2105
2106 __ test(input_reg, Operand(input_reg));
2107 DeoptimizeIf(negative, instr->environment());
2108
2109 __ bind(&is_positive);
2110 __ bind(deferred->exit());
2111 }
2112 }
2113
2114
2115 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2116 XMMRegister xmm_scratch = xmm0;
2117 Register output_reg = ToRegister(instr->result());
2118 XMMRegister input_reg = ToDoubleRegister(instr->input());
2119 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2120 __ ucomisd(input_reg, xmm_scratch);
2121
2122 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2123 DeoptimizeIf(below_equal, instr->environment());
2124 } else {
2125 DeoptimizeIf(below, instr->environment());
2126 }
2127
2128 // Use truncating instruction (OK because input is positive).
2129 __ cvttsd2si(output_reg, Operand(input_reg));
2130
2131 // Overflow is signalled with minint.
2132 __ cmp(output_reg, 0x80000000u);
2133 DeoptimizeIf(equal, instr->environment());
2134 }
2135
2136
2137 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2138 XMMRegister xmm_scratch = xmm0;
2139 Register output_reg = ToRegister(instr->result());
2140 XMMRegister input_reg = ToDoubleRegister(instr->input());
2141
2142 // xmm_scratch = 0.5
2143 ExternalReference one_half = ExternalReference::address_of_one_half();
2144 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
2145
2146 // input = input + 0.5
2147 __ addsd(input_reg, xmm_scratch);
2148
2149 // We need to return -0 for the input range [-0.5, 0[, otherwise
2150 // compute Math.floor(value + 0.5).
2151 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2152 __ ucomisd(input_reg, xmm_scratch);
2153 DeoptimizeIf(below_equal, instr->environment());
2154 } else {
2155 // If we don't need to bailout on -0, we check only bailout
2156 // on negative inputs.
2157 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2158 __ ucomisd(input_reg, xmm_scratch);
2159 DeoptimizeIf(below, instr->environment());
2160 }
2161
2162 // Compute Math.floor(value + 0.5).
2163 // Use truncating instruction (OK because input is positive).
2164 __ cvttsd2si(output_reg, Operand(input_reg));
2165
2166 // Overflow is signalled with minint.
2167 __ cmp(output_reg, 0x80000000u);
2168 DeoptimizeIf(equal, instr->environment());
2169 }
2170
2171
2172 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2173 XMMRegister input_reg = ToDoubleRegister(instr->input());
2174 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2175 __ sqrtsd(input_reg, input_reg);
2176 }
2177
2178
2179 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
2180 switch (instr->op()) {
2181 case kMathAbs:
2182 DoMathAbs(instr);
2183 break;
2184 case kMathFloor:
2185 DoMathFloor(instr);
2186 break;
2187 case kMathRound:
2188 DoMathRound(instr);
2189 break;
2190 case kMathSqrt:
2191 DoMathSqrt(instr);
2192 break;
2193 default:
2194 UNREACHABLE();
2195 }
2196 }
2197
2198
2199 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
2200 ASSERT(ToRegister(instr->result()).is(eax));
2201
2202 int arity = instr->arity();
2203 Handle<Code> ic = Isolate::Current()->stub_cache()->
2204 ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
2205 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2206 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2207 }
2208
2209
2210 void LCodeGen::DoCallNamed(LCallNamed* instr) {
2211 ASSERT(ToRegister(instr->result()).is(eax));
2212
2213 int arity = instr->arity();
2214 Handle<Code> ic = Isolate::Current()->stub_cache()->
2215 ComputeCallInitialize(arity, NOT_IN_LOOP);
2216 __ mov(ecx, instr->name());
2217 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2218 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2219 }
2220
2221
2222 void LCodeGen::DoCallFunction(LCallFunction* instr) {
2223 ASSERT(ToRegister(instr->result()).is(eax));
2224
2225 int arity = instr->arity();
2226 CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
2227 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2228 __ Drop(1);
2229 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2230 }
2231
2232
2233 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
2234 ASSERT(ToRegister(instr->result()).is(eax));
2235
2236 int arity = instr->arity();
2237 Handle<Code> ic = Isolate::Current()->stub_cache()->
2238 ComputeCallInitialize(arity, NOT_IN_LOOP);
2239 __ mov(ecx, instr->name());
2240 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2241 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2242 }
2243
2244
2245 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
2246 ASSERT(ToRegister(instr->result()).is(eax));
2247 __ mov(edi, instr->target());
2248 CallKnownFunction(instr->target(), instr->arity(), instr);
2249 }
2250
2251
2252 void LCodeGen::DoCallNew(LCallNew* instr) {
2253 ASSERT(ToRegister(instr->input()).is(edi));
2254 ASSERT(ToRegister(instr->result()).is(eax));
2255
2256 Handle<Code> builtin(Isolate::Current()->builtins()->builtin(
2257 Builtins::JSConstructCall));
2258 __ Set(eax, Immediate(instr->arity()));
2259 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
2260 }
2261
2262
2263 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2264 CallRuntime(instr->function(), instr->arity(), instr);
2265 }
2266
2267
2268 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
2269 Register object = ToRegister(instr->object());
2270 Register value = ToRegister(instr->value());
2271 int offset = instr->offset();
2272
2273 if (!instr->transition().is_null()) {
2274 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
2275 }
2276
2277 // Do the store.
2278 if (instr->is_in_object()) {
2279 __ mov(FieldOperand(object, offset), value);
2280 if (instr->needs_write_barrier()) {
2281 Register temp = ToRegister(instr->temp());
2282 // Update the write barrier for the object for in-object properties.
2283 __ RecordWrite(object, offset, value, temp);
2284 }
2285 } else {
2286 Register temp = ToRegister(instr->temp());
2287 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
2288 __ mov(FieldOperand(temp, offset), value);
2289 if (instr->needs_write_barrier()) {
2290 // Update the write barrier for the properties array.
2291 // object is used as a scratch register.
2292 __ RecordWrite(temp, offset, value, object);
2293 }
2294 }
2295 }
2296
2297
2298 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
2299 ASSERT(ToRegister(instr->object()).is(edx));
2300 ASSERT(ToRegister(instr->value()).is(eax));
2301
2302 __ mov(ecx, instr->name());
2303 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
2304 Builtins::StoreIC_Initialize));
2305 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2306 }
2307
2308
2309 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
2310 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
2311 DeoptimizeIf(above_equal, instr->environment());
2312 }
2313
2314
2315 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
2316 Register value = ToRegister(instr->value());
2317 Register elements = ToRegister(instr->object());
2318 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
2319
2320 // Do the store.
2321 if (instr->key()->IsConstantOperand()) {
2322 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
2323 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2324 int offset =
2325 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
2326 __ mov(FieldOperand(elements, offset), value);
2327 } else {
2328 __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
2329 value);
2330 }
2331
2332 // Update the write barrier unless we're certain that we're storing a smi.
2333 if (instr->hydrogen()->NeedsWriteBarrier()) {
2334 // Compute address of modified element and store it into key register.
2335 __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2336 __ RecordWrite(elements, key, value);
2337 }
2338 }
2339
2340
2341 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
2342 ASSERT(ToRegister(instr->object()).is(edx));
2343 ASSERT(ToRegister(instr->key()).is(ecx));
2344 ASSERT(ToRegister(instr->value()).is(eax));
2345
2346 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
2347 Builtins::KeyedStoreIC_Initialize));
2348 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2349 }
2350
2351
2352 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
2353 LOperand* input = instr->input();
2354 ASSERT(input->IsRegister() || input->IsStackSlot());
2355 LOperand* output = instr->result();
2356 ASSERT(output->IsDoubleRegister());
2357 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
2358 }
2359
2360
2361 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
2362 class DeferredNumberTagI: public LDeferredCode {
2363 public:
2364 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
2365 : LDeferredCode(codegen), instr_(instr) { }
2366 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
2367 private:
2368 LNumberTagI* instr_;
2369 };
2370
2371 LOperand* input = instr->input();
2372 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2373 Register reg = ToRegister(input);
2374
2375 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
2376 __ SmiTag(reg);
2377 __ j(overflow, deferred->entry());
2378 __ bind(deferred->exit());
2379 }
2380
2381
2382 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
2383 Label slow;
2384 Register reg = ToRegister(instr->input());
2385 Register tmp = reg.is(eax) ? ecx : eax;
2386
2387 // Preserve the value of all registers.
2388 __ PushSafepointRegisters();
2389
2390 // There was overflow, so bits 30 and 31 of the original integer
2391 // disagree. Try to allocate a heap number in new space and store
2392 // the value in there. If that fails, call the runtime system.
2393 NearLabel done;
2394 __ SmiUntag(reg);
2395 __ xor_(reg, 0x80000000);
2396 __ cvtsi2sd(xmm0, Operand(reg));
2397 if (FLAG_inline_new) {
2398 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
2399 __ jmp(&done);
2400 }
2401
2402 // Slow case: Call the runtime system to do the number allocation.
2403 __ bind(&slow);
2404
2405 // TODO(3095996): Put a valid pointer value in the stack slot where the result
2406 // register is stored, as this register is in the pointer map, but contains an
2407 // integer value.
2408 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
2409
2410 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2411 RecordSafepointWithRegisters(
2412 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2413 if (!reg.is(eax)) __ mov(reg, eax);
2414
2415 // Done. Put the value in xmm0 into the value of the allocated heap
2416 // number.
2417 __ bind(&done);
2418 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
2419 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
2420 __ PopSafepointRegisters();
2421 }
2422
2423
2424 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
2425 class DeferredNumberTagD: public LDeferredCode {
2426 public:
2427 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
2428 : LDeferredCode(codegen), instr_(instr) { }
2429 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
2430 private:
2431 LNumberTagD* instr_;
2432 };
2433
2434 XMMRegister input_reg = ToDoubleRegister(instr->input());
2435 Register reg = ToRegister(instr->result());
2436 Register tmp = ToRegister(instr->temp());
2437
2438 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
2439 if (FLAG_inline_new) {
2440 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
2441 } else {
2442 __ jmp(deferred->entry());
2443 }
2444 __ bind(deferred->exit());
2445 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
2446 }
2447
2448
2449 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
2450 // TODO(3095996): Get rid of this. For now, we need to make the
2451 // result register contain a valid pointer because it is already
2452 // contained in the register pointer map.
2453 Register reg = ToRegister(instr->result());
2454 __ Set(reg, Immediate(0));
2455
2456 __ PushSafepointRegisters();
2457 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2458 RecordSafepointWithRegisters(
2459 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2460 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
2461 __ PopSafepointRegisters();
2462 }
2463
2464
2465 void LCodeGen::DoSmiTag(LSmiTag* instr) {
2466 LOperand* input = instr->input();
2467 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2468 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
2469 __ SmiTag(ToRegister(input));
2470 }
2471
2472
2473 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
2474 LOperand* input = instr->input();
2475 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2476 if (instr->needs_check()) {
2477 __ test(ToRegister(input), Immediate(kSmiTagMask));
2478 DeoptimizeIf(not_zero, instr->environment());
2479 }
2480 __ SmiUntag(ToRegister(input));
2481 }
2482
2483
2484 void LCodeGen::EmitNumberUntagD(Register input_reg,
2485 XMMRegister result_reg,
2486 LEnvironment* env) {
2487 NearLabel load_smi, heap_number, done;
2488
2489 // Smi check.
2490 __ test(input_reg, Immediate(kSmiTagMask));
2491 __ j(zero, &load_smi, not_taken);
2492
2493 // Heap number map check.
2494 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2495 FACTORY->heap_number_map());
2496 __ j(equal, &heap_number);
2497
2498 __ cmp(input_reg, FACTORY->undefined_value());
2499 DeoptimizeIf(not_equal, env);
2500
2501 // Convert undefined to NaN.
2502 __ push(input_reg);
2503 __ mov(input_reg, FACTORY->nan_value());
2504 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2505 __ pop(input_reg);
2506 __ jmp(&done);
2507
2508 // Heap number to XMM conversion.
2509 __ bind(&heap_number);
2510 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2511 __ jmp(&done);
2512
2513 // Smi to XMM conversion
2514 __ bind(&load_smi);
2515 __ SmiUntag(input_reg); // Untag smi before converting to float.
2516 __ cvtsi2sd(result_reg, Operand(input_reg));
2517 __ SmiTag(input_reg); // Retag smi.
2518 __ bind(&done);
2519 }
2520
2521
2522 class DeferredTaggedToI: public LDeferredCode {
2523 public:
2524 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
2525 : LDeferredCode(codegen), instr_(instr) { }
2526 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
2527 private:
2528 LTaggedToI* instr_;
2529 };
2530
2531
2532 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
2533 NearLabel done, heap_number;
2534 Register input_reg = ToRegister(instr->input());
2535
2536 // Heap number map check.
2537 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2538 FACTORY->heap_number_map());
2539
2540 if (instr->truncating()) {
2541 __ j(equal, &heap_number);
2542 // Check for undefined. Undefined is converted to zero for truncating
2543 // conversions.
2544 __ cmp(input_reg, FACTORY->undefined_value());
2545 DeoptimizeIf(not_equal, instr->environment());
2546 __ mov(input_reg, 0);
2547 __ jmp(&done);
2548
2549 __ bind(&heap_number);
2550 if (Isolate::Current()->cpu_features()->IsSupported(SSE3)) {
2551 CpuFeatures::Scope scope(SSE3);
2552 NearLabel convert;
2553 // Use more powerful conversion when sse3 is available.
2554 // Load x87 register with heap number.
2555 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
2556 // Get exponent alone and check for too-big exponent.
2557 __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2558 __ and_(input_reg, HeapNumber::kExponentMask);
2559 const uint32_t kTooBigExponent =
2560 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2561 __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
2562 __ j(less, &convert);
2563 // Pop FPU stack before deoptimizing.
2564 __ ffree(0);
2565 __ fincstp();
2566 DeoptimizeIf(no_condition, instr->environment());
2567
2568 // Reserve space for 64 bit answer.
2569 __ bind(&convert);
2570 __ sub(Operand(esp), Immediate(kDoubleSize));
2571 // Do conversion, which cannot fail because we checked the exponent.
2572 __ fisttp_d(Operand(esp, 0));
2573 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
2574 __ add(Operand(esp), Immediate(kDoubleSize));
2575 } else {
2576 NearLabel deopt;
2577 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
2578 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2579 __ cvttsd2si(input_reg, Operand(xmm0));
2580 __ cmp(input_reg, 0x80000000u);
2581 __ j(not_equal, &done);
2582 // Check if the input was 0x8000000 (kMinInt).
2583 // If no, then we got an overflow and we deoptimize.
2584 ExternalReference min_int = ExternalReference::address_of_min_int();
2585 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
2586 __ ucomisd(xmm_temp, xmm0);
2587 DeoptimizeIf(not_equal, instr->environment());
2588 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2589 }
2590 } else {
2591 // Deoptimize if we don't have a heap number.
2592 DeoptimizeIf(not_equal, instr->environment());
2593
2594 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
2595 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2596 __ cvttsd2si(input_reg, Operand(xmm0));
2597 __ cvtsi2sd(xmm_temp, Operand(input_reg));
2598 __ ucomisd(xmm0, xmm_temp);
2599 DeoptimizeIf(not_equal, instr->environment());
2600 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2601 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2602 __ test(input_reg, Operand(input_reg));
2603 __ j(not_zero, &done);
2604 __ movmskpd(input_reg, xmm0);
2605 __ and_(input_reg, 1);
2606 DeoptimizeIf(not_zero, instr->environment());
2607 }
2608 }
2609 __ bind(&done);
2610 }
2611
2612
2613 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
2614 LOperand* input = instr->input();
2615 ASSERT(input->IsRegister());
2616 ASSERT(input->Equals(instr->result()));
2617
2618 Register input_reg = ToRegister(input);
2619
2620 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
2621
2622 // Smi check.
2623 __ test(input_reg, Immediate(kSmiTagMask));
2624 __ j(not_zero, deferred->entry());
2625
2626 // Smi to int32 conversion
2627 __ SmiUntag(input_reg); // Untag smi.
2628
2629 __ bind(deferred->exit());
2630 }
2631
2632
2633 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
2634 LOperand* input = instr->input();
2635 ASSERT(input->IsRegister());
2636 LOperand* result = instr->result();
2637 ASSERT(result->IsDoubleRegister());
2638
2639 Register input_reg = ToRegister(input);
2640 XMMRegister result_reg = ToDoubleRegister(result);
2641
2642 EmitNumberUntagD(input_reg, result_reg, instr->environment());
2643 }
2644
2645
2646 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
2647 LOperand* input = instr->input();
2648 ASSERT(input->IsDoubleRegister());
2649 LOperand* result = instr->result();
2650 ASSERT(result->IsRegister());
2651
2652 XMMRegister input_reg = ToDoubleRegister(input);
2653 Register result_reg = ToRegister(result);
2654
2655 if (instr->truncating()) {
2656 // Performs a truncating conversion of a floating point number as used by
2657 // the JS bitwise operations.
2658 __ cvttsd2si(result_reg, Operand(input_reg));
2659 __ cmp(result_reg, 0x80000000u);
2660 if (Isolate::Current()->cpu_features()->IsSupported(SSE3)) {
2661 // This will deoptimize if the exponent of the input in out of range.
2662 CpuFeatures::Scope scope(SSE3);
2663 NearLabel convert, done;
2664 __ j(not_equal, &done);
2665 __ sub(Operand(esp), Immediate(kDoubleSize));
2666 __ movdbl(Operand(esp, 0), input_reg);
2667 // Get exponent alone and check for too-big exponent.
2668 __ mov(result_reg, Operand(esp, sizeof(int32_t)));
2669 __ and_(result_reg, HeapNumber::kExponentMask);
2670 const uint32_t kTooBigExponent =
2671 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2672 __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
2673 __ j(less, &convert);
2674 __ add(Operand(esp), Immediate(kDoubleSize));
2675 DeoptimizeIf(no_condition, instr->environment());
2676 __ bind(&convert);
2677 // Do conversion, which cannot fail because we checked the exponent.
2678 __ fld_d(Operand(esp, 0));
2679 __ fisttp_d(Operand(esp, 0));
2680 __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
2681 __ add(Operand(esp), Immediate(kDoubleSize));
2682 __ bind(&done);
2683 } else {
2684 // This will bail out if the input was not in the int32 range (or,
2685 // unfortunately, if the input was 0x80000000).
2686 DeoptimizeIf(equal, instr->environment());
2687 }
2688 } else {
2689 NearLabel done;
2690 __ cvttsd2si(result_reg, Operand(input_reg));
2691 __ cvtsi2sd(xmm0, Operand(result_reg));
2692 __ ucomisd(xmm0, input_reg);
2693 DeoptimizeIf(not_equal, instr->environment());
2694 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2695 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2696 // The integer converted back is equal to the original. We
2697 // only have to test if we got -0 as an input.
2698 __ test(result_reg, Operand(result_reg));
2699 __ j(not_zero, &done);
2700 __ movmskpd(result_reg, input_reg);
2701 // Bit 0 contains the sign of the double in input_reg.
2702 // If input was positive, we are ok and return 0, otherwise
2703 // deoptimize.
2704 __ and_(result_reg, 1);
2705 DeoptimizeIf(not_zero, instr->environment());
2706 }
2707 __ bind(&done);
2708 }
2709 }
2710
2711
2712 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2713 LOperand* input = instr->input();
2714 ASSERT(input->IsRegister());
2715 __ test(ToRegister(input), Immediate(kSmiTagMask));
2716 DeoptimizeIf(instr->condition(), instr->environment());
2717 }
2718
2719
2720 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2721 Register input = ToRegister(instr->input());
2722 Register temp = ToRegister(instr->temp());
2723 InstanceType first = instr->hydrogen()->first();
2724 InstanceType last = instr->hydrogen()->last();
2725
2726 __ test(input, Immediate(kSmiTagMask));
2727 DeoptimizeIf(zero, instr->environment());
2728
2729 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2730 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
2731 static_cast<int8_t>(first));
2732
2733 // If there is only one type in the interval check for equality.
2734 if (first == last) {
2735 DeoptimizeIf(not_equal, instr->environment());
2736 } else {
2737 DeoptimizeIf(below, instr->environment());
2738 // Omit check for the last type.
2739 if (last != LAST_TYPE) {
2740 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
2741 static_cast<int8_t>(last));
2742 DeoptimizeIf(above, instr->environment());
2743 }
2744 }
2745 }
2746
2747
2748 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
2749 ASSERT(instr->input()->IsRegister());
2750 Register reg = ToRegister(instr->input());
2751 __ cmp(reg, instr->hydrogen()->target());
2752 DeoptimizeIf(not_equal, instr->environment());
2753 }
2754
2755
2756 void LCodeGen::DoCheckMap(LCheckMap* instr) {
2757 LOperand* input = instr->input();
2758 ASSERT(input->IsRegister());
2759 Register reg = ToRegister(input);
2760 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2761 instr->hydrogen()->map());
2762 DeoptimizeIf(not_equal, instr->environment());
2763 }
2764
2765
2766 void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
2767 if (HEAP->InNewSpace(*prototype)) {
2768 Handle<JSGlobalPropertyCell> cell =
2769 FACTORY->NewJSGlobalPropertyCell(prototype);
2770 __ mov(result, Operand::Cell(cell));
2771 } else {
2772 __ mov(result, prototype);
2773 }
2774 }
2775
2776
2777 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
2778 Register reg = ToRegister(instr->temp());
2779
2780 Handle<JSObject> holder = instr->holder();
2781 Handle<Map> receiver_map = instr->receiver_map();
2782 Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
2783
2784 // Load prototype object.
2785 LoadPrototype(reg, current_prototype);
2786
2787 // Check prototype maps up to the holder.
2788 while (!current_prototype.is_identical_to(holder)) {
2789 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2790 Handle<Map>(current_prototype->map()));
2791 DeoptimizeIf(not_equal, instr->environment());
2792 current_prototype =
2793 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
2794 // Load next prototype object.
2795 LoadPrototype(reg, current_prototype);
2796 }
2797
2798 // Check the holder map.
2799 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2800 Handle<Map>(current_prototype->map()));
2801 DeoptimizeIf(not_equal, instr->environment());
2802 }
2803
2804
2805 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
2806 // Setup the parameters to the stub/runtime call.
2807 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2808 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
2809 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
2810 __ push(Immediate(instr->hydrogen()->constant_elements()));
2811
2812 // Pick the right runtime function or stub to call.
2813 int length = instr->hydrogen()->length();
2814 if (instr->hydrogen()->IsCopyOnWrite()) {
2815 ASSERT(instr->hydrogen()->depth() == 1);
2816 FastCloneShallowArrayStub::Mode mode =
2817 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
2818 FastCloneShallowArrayStub stub(mode, length);
2819 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2820 } else if (instr->hydrogen()->depth() > 1) {
2821 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
2822 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
2823 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
2824 } else {
2825 FastCloneShallowArrayStub::Mode mode =
2826 FastCloneShallowArrayStub::CLONE_ELEMENTS;
2827 FastCloneShallowArrayStub stub(mode, length);
2828 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2829 }
2830 }
2831
2832
2833 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
2834 // Setup the parameters to the stub/runtime call.
2835 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2836 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
2837 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
2838 __ push(Immediate(instr->hydrogen()->constant_properties()));
2839 __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
2840
2841 // Pick the right runtime function or stub to call.
2842 if (instr->hydrogen()->depth() > 1) {
2843 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
2844 } else {
2845 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
2846 }
2847 }
2848
2849
2850 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
2851 NearLabel materialized;
2852 // Registers will be used as follows:
2853 // edi = JS function.
2854 // ecx = literals array.
2855 // ebx = regexp literal.
2856 // eax = regexp literal clone.
2857 __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2858 __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
2859 int literal_offset = FixedArray::kHeaderSize +
2860 instr->hydrogen()->literal_index() * kPointerSize;
2861 __ mov(ebx, FieldOperand(ecx, literal_offset));
2862 __ cmp(ebx, FACTORY->undefined_value());
2863 __ j(not_equal, &materialized);
2864
2865 // Create regexp literal using runtime function
2866 // Result will be in eax.
2867 __ push(ecx);
2868 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
2869 __ push(Immediate(instr->hydrogen()->pattern()));
2870 __ push(Immediate(instr->hydrogen()->flags()));
2871 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
2872 __ mov(ebx, eax);
2873
2874 __ bind(&materialized);
2875 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
2876 Label allocated, runtime_allocate;
2877 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
2878 __ jmp(&allocated);
2879
2880 __ bind(&runtime_allocate);
2881 __ push(ebx);
2882 __ push(Immediate(Smi::FromInt(size)));
2883 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
2884 __ pop(ebx);
2885
2886 __ bind(&allocated);
2887 // Copy the content into the newly allocated memory.
2888 // (Unroll copy loop once for better throughput).
2889 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
2890 __ mov(edx, FieldOperand(ebx, i));
2891 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
2892 __ mov(FieldOperand(eax, i), edx);
2893 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
2894 }
2895 if ((size % (2 * kPointerSize)) != 0) {
2896 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
2897 __ mov(FieldOperand(eax, size - kPointerSize), edx);
2898 }
2899 }
2900
2901
2902 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2903 // Use the fast case closure allocation code that allocates in new
2904 // space for nested functions that don't need literals cloning.
2905 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
2906 bool pretenure = !instr->hydrogen()->pretenure();
2907 if (shared_info->num_literals() == 0 && !pretenure) {
2908 FastNewClosureStub stub;
2909 __ push(Immediate(shared_info));
2910 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2911 } else {
2912 __ push(esi);
2913 __ push(Immediate(shared_info));
2914 __ push(Immediate(pretenure
2915 ? FACTORY->true_value()
2916 : FACTORY->false_value()));
2917 CallRuntime(Runtime::kNewClosure, 3, instr);
2918 }
2919 }
2920
2921
2922 void LCodeGen::DoTypeof(LTypeof* instr) {
2923 LOperand* input = instr->input();
2924 if (input->IsConstantOperand()) {
2925 __ push(ToImmediate(input));
2926 } else {
2927 __ push(ToOperand(input));
2928 }
2929 CallRuntime(Runtime::kTypeof, 1, instr);
2930 }
2931
2932
2933 void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
2934 Register input = ToRegister(instr->input());
2935 Register result = ToRegister(instr->result());
2936 Label true_label;
2937 Label false_label;
2938 NearLabel done;
2939
2940 Condition final_branch_condition = EmitTypeofIs(&true_label,
2941 &false_label,
2942 input,
2943 instr->type_literal());
2944 __ j(final_branch_condition, &true_label);
2945 __ bind(&false_label);
2946 __ mov(result, Handle<Object>(HEAP->false_value()));
2947 __ jmp(&done);
2948
2949 __ bind(&true_label);
2950 __ mov(result, Handle<Object>(HEAP->true_value()));
2951
2952 __ bind(&done);
2953 }
2954
2955
2956 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
2957 Register input = ToRegister(instr->input());
2958 int true_block = chunk_->LookupDestination(instr->true_block_id());
2959 int false_block = chunk_->LookupDestination(instr->false_block_id());
2960 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2961 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2962
2963 Condition final_branch_condition = EmitTypeofIs(true_label,
2964 false_label,
2965 input,
2966 instr->type_literal());
2967
2968 EmitBranch(true_block, false_block, final_branch_condition);
2969 }
2970
2971
2972 Condition LCodeGen::EmitTypeofIs(Label* true_label,
2973 Label* false_label,
2974 Register input,
2975 Handle<String> type_name) {
2976 Condition final_branch_condition = no_condition;
2977 if (type_name->Equals(HEAP->number_symbol())) {
2978 __ test(input, Immediate(kSmiTagMask));
2979 __ j(zero, true_label);
2980 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
2981 FACTORY->heap_number_map());
2982 final_branch_condition = equal;
2983
2984 } else if (type_name->Equals(HEAP->string_symbol())) {
2985 __ test(input, Immediate(kSmiTagMask));
2986 __ j(zero, false_label);
2987 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
2988 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
2989 1 << Map::kIsUndetectable);
2990 __ j(not_zero, false_label);
2991 __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
2992 final_branch_condition = below;
2993
2994 } else if (type_name->Equals(HEAP->boolean_symbol())) {
2995 __ cmp(input, Handle<Object>(HEAP->true_value()));
2996 __ j(equal, true_label);
2997 __ cmp(input, Handle<Object>(HEAP->false_value()));
2998 final_branch_condition = equal;
2999
3000 } else if (type_name->Equals(HEAP->undefined_symbol())) {
3001 __ cmp(input, FACTORY->undefined_value());
3002 __ j(equal, true_label);
3003 __ test(input, Immediate(kSmiTagMask));
3004 __ j(zero, false_label);
3005 // Check for undetectable objects => true.
3006 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3007 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3008 1 << Map::kIsUndetectable);
3009 final_branch_condition = not_zero;
3010
3011 } else if (type_name->Equals(HEAP->function_symbol())) {
3012 __ test(input, Immediate(kSmiTagMask));
3013 __ j(zero, false_label);
3014 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
3015 __ j(equal, true_label);
3016 // Regular expressions => 'function' (they are callable).
3017 __ CmpInstanceType(input, JS_REGEXP_TYPE);
3018 final_branch_condition = equal;
3019
3020 } else if (type_name->Equals(HEAP->object_symbol())) {
3021 __ test(input, Immediate(kSmiTagMask));
3022 __ j(zero, false_label);
3023 __ cmp(input, FACTORY->null_value());
3024 __ j(equal, true_label);
3025 // Regular expressions => 'function', not 'object'.
3026 __ CmpObjectType(input, JS_REGEXP_TYPE, input);
3027 __ j(equal, false_label);
3028 // Check for undetectable objects => false.
3029 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3030 1 << Map::kIsUndetectable);
3031 __ j(not_zero, false_label);
3032 // Check for JS objects => true.
3033 __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
3034 __ j(below, false_label);
3035 __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
3036 final_branch_condition = below_equal;
3037
3038 } else {
3039 final_branch_condition = not_equal;
3040 __ jmp(false_label);
3041 // A dead branch instruction will be generated after this point.
3042 }
3043
3044 return final_branch_condition;
3045 }
3046
3047
3048 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
3049 // No code for lazy bailout instruction. Used to capture environment after a
3050 // call for populating the safepoint data with deoptimization data.
3051 }
3052
3053
3054 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
3055 DeoptimizeIf(no_condition, instr->environment());
3056 }
3057
3058
3059 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
3060 LOperand* obj = instr->object();
3061 LOperand* key = instr->key();
3062 __ push(ToOperand(obj));
3063 if (key->IsConstantOperand()) {
3064 __ push(ToImmediate(key));
3065 } else {
3066 __ push(ToOperand(key));
3067 }
3068 RecordPosition(instr->pointer_map()->position());
3069 SafepointGenerator safepoint_generator(this,
3070 instr->pointer_map(),
3071 Safepoint::kNoDeoptimizationIndex);
3072 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
3073 }
3074
3075
3076 void LCodeGen::DoStackCheck(LStackCheck* instr) {
3077 // Perform stack overflow check.
3078 NearLabel done;
3079 ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
3080 __ cmp(esp, Operand::StaticVariable(stack_limit));
3081 __ j(above_equal, &done);
3082
3083 StackCheckStub stub;
3084 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3085 __ bind(&done);
3086 }
3087
3088
3089 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3090 // This is a pseudo-instruction that ensures that the environment here is
3091 // properly registered for deoptimization and records the assembler's PC
3092 // offset.
3093 LEnvironment* environment = instr->environment();
3094 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
3095 instr->SpilledDoubleRegisterArray());
3096
3097 // If the environment were already registered, we would have no way of
3098 // backpatching it with the spill slot operands.
3099 ASSERT(!environment->HasBeenRegistered());
3100 RegisterEnvironmentForDeoptimization(environment);
3101 ASSERT(osr_pc_offset_ == -1);
3102 osr_pc_offset_ = masm()->pc_offset();
3103 }
3104
3105
3106 #undef __
3107
3108 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698