Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 6811012: Remove some dead code. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/codegen-ia32.h ('k') | src/ia32/codegen-ia32-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 10 matching lines...) Expand all
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_IA32) 30 #if defined(V8_TARGET_ARCH_IA32)
31 31
32 #include "codegen-inl.h" 32 #include "codegen.h"
33 #include "bootstrapper.h"
34 #include "code-stubs.h"
35 #include "compiler.h"
36 #include "debug.h"
37 #include "ic-inl.h"
38 #include "parser.h"
39 #include "regexp-macro-assembler.h"
40 #include "register-allocator-inl.h"
41 #include "scopes.h"
42 #include "virtual-frame-inl.h"
43 33
44 namespace v8 { 34 namespace v8 {
45 namespace internal { 35 namespace internal {
46 36
47 #define __ ACCESS_MASM(masm)
48
49 // -------------------------------------------------------------------------
50 // Platform-specific FrameRegisterState functions.
51
52 void FrameRegisterState::Save(MacroAssembler* masm) const {
53 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
54 int action = registers_[i];
55 if (action == kPush) {
56 __ push(RegisterAllocator::ToRegister(i));
57 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
58 __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
59 }
60 }
61 }
62
63
64 void FrameRegisterState::Restore(MacroAssembler* masm) const {
65 // Restore registers in reverse order due to the stack.
66 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
67 int action = registers_[i];
68 if (action == kPush) {
69 __ pop(RegisterAllocator::ToRegister(i));
70 } else if (action != kIgnore) {
71 action &= ~kSyncedFlag;
72 __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
73 }
74 }
75 }
76
77
78 #undef __
79 #define __ ACCESS_MASM(masm_)
80
81 // -------------------------------------------------------------------------
82 // Platform-specific DeferredCode functions.
83
84 void DeferredCode::SaveRegisters() {
85 frame_state_.Save(masm_);
86 }
87
88
89 void DeferredCode::RestoreRegisters() {
90 frame_state_.Restore(masm_);
91 }
92
93 37
94 // ------------------------------------------------------------------------- 38 // -------------------------------------------------------------------------
95 // Platform-specific RuntimeCallHelper functions. 39 // Platform-specific RuntimeCallHelper functions.
96 40
97 void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
98 frame_state_->Save(masm);
99 }
100
101
102 void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
103 frame_state_->Restore(masm);
104 }
105
106
107 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 41 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
108 masm->EnterInternalFrame(); 42 masm->EnterInternalFrame();
109 } 43 }
110 44
111 45
112 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 46 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
113 masm->LeaveInternalFrame(); 47 masm->LeaveInternalFrame();
114 } 48 }
115 49
116 50
117 // -------------------------------------------------------------------------
118 // CodeGenState implementation.
119
120 CodeGenState::CodeGenState(CodeGenerator* owner)
121 : owner_(owner),
122 destination_(NULL),
123 previous_(NULL) {
124 owner_->set_state(this);
125 }
126
127
128 CodeGenState::CodeGenState(CodeGenerator* owner,
129 ControlDestination* destination)
130 : owner_(owner),
131 destination_(destination),
132 previous_(owner->state()) {
133 owner_->set_state(this);
134 }
135
136
137 CodeGenState::~CodeGenState() {
138 ASSERT(owner_->state() == this);
139 owner_->set_state(previous_);
140 }
141
142 // -------------------------------------------------------------------------
143 // CodeGenerator implementation.
144
145 CodeGenerator::CodeGenerator(MacroAssembler* masm)
146 : deferred_(8),
147 masm_(masm),
148 info_(NULL),
149 frame_(NULL),
150 allocator_(NULL),
151 state_(NULL),
152 loop_nesting_(0),
153 in_safe_int32_mode_(false),
154 safe_int32_mode_enabled_(true),
155 function_return_is_shadowed_(false),
156 in_spilled_code_(false),
157 jit_cookie_((FLAG_mask_constants_with_cookie) ?
158 V8::RandomPrivate(Isolate::Current()) : 0) {
159 }
160
161
162 // Calling conventions:
163 // ebp: caller's frame pointer
164 // esp: stack pointer
165 // edi: called JS function
166 // esi: callee's context
167
168 void CodeGenerator::Generate(CompilationInfo* info) {
169 // Record the position for debugging purposes.
170 CodeForFunctionPosition(info->function());
171 Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
172
173 // Initialize state.
174 info_ = info;
175 ASSERT(allocator_ == NULL);
176 RegisterAllocator register_allocator(this);
177 allocator_ = &register_allocator;
178 ASSERT(frame_ == NULL);
179 frame_ = new VirtualFrame();
180 set_in_spilled_code(false);
181
182 // Adjust for function-level loop nesting.
183 ASSERT_EQ(0, loop_nesting_);
184 loop_nesting_ = info->is_in_loop() ? 1 : 0;
185
186 masm()->isolate()->set_jump_target_compiling_deferred_code(false);
187
188 {
189 CodeGenState state(this);
190
191 // Entry:
192 // Stack: receiver, arguments, return address.
193 // ebp: caller's frame pointer
194 // esp: stack pointer
195 // edi: called JS function
196 // esi: callee's context
197 allocator_->Initialize();
198
199 #ifdef DEBUG
200 if (strlen(FLAG_stop_at) > 0 &&
201 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
202 frame_->SpillAll();
203 __ int3();
204 }
205 #endif
206
207 frame_->Enter();
208
209 // Allocate space for locals and initialize them.
210 frame_->AllocateStackSlots();
211
212 // Allocate the local context if needed.
213 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
214 if (heap_slots > 0) {
215 Comment cmnt(masm_, "[ allocate local context");
216 // Allocate local context.
217 // Get outer context and create a new context based on it.
218 frame_->PushFunction();
219 Result context;
220 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
221 FastNewContextStub stub(heap_slots);
222 context = frame_->CallStub(&stub, 1);
223 } else {
224 context = frame_->CallRuntime(Runtime::kNewContext, 1);
225 }
226
227 // Update context local.
228 frame_->SaveContextRegister();
229
230 // Verify that the runtime call result and esi agree.
231 if (FLAG_debug_code) {
232 __ cmp(context.reg(), Operand(esi));
233 __ Assert(equal, "Runtime::NewContext should end up in esi");
234 }
235 }
236
237 // TODO(1241774): Improve this code:
238 // 1) only needed if we have a context
239 // 2) no need to recompute context ptr every single time
240 // 3) don't copy parameter operand code from SlotOperand!
241 {
242 Comment cmnt2(masm_, "[ copy context parameters into .context");
243 // Note that iteration order is relevant here! If we have the same
244 // parameter twice (e.g., function (x, y, x)), and that parameter
245 // needs to be copied into the context, it must be the last argument
246 // passed to the parameter that needs to be copied. This is a rare
247 // case so we don't check for it, instead we rely on the copying
248 // order: such a parameter is copied repeatedly into the same
249 // context location and thus the last value is what is seen inside
250 // the function.
251 for (int i = 0; i < scope()->num_parameters(); i++) {
252 Variable* par = scope()->parameter(i);
253 Slot* slot = par->AsSlot();
254 if (slot != NULL && slot->type() == Slot::CONTEXT) {
255 // The use of SlotOperand below is safe in unspilled code
256 // because the slot is guaranteed to be a context slot.
257 //
258 // There are no parameters in the global scope.
259 ASSERT(!scope()->is_global_scope());
260 frame_->PushParameterAt(i);
261 Result value = frame_->Pop();
262 value.ToRegister();
263
264 // SlotOperand loads context.reg() with the context object
265 // stored to, used below in RecordWrite.
266 Result context = allocator_->Allocate();
267 ASSERT(context.is_valid());
268 __ mov(SlotOperand(slot, context.reg()), value.reg());
269 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
270 Result scratch = allocator_->Allocate();
271 ASSERT(scratch.is_valid());
272 frame_->Spill(context.reg());
273 frame_->Spill(value.reg());
274 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
275 }
276 }
277 }
278
279 // Store the arguments object. This must happen after context
280 // initialization because the arguments object may be stored in
281 // the context.
282 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
283 StoreArgumentsObject(true);
284 }
285
286 // Initialize ThisFunction reference if present.
287 if (scope()->is_function_scope() && scope()->function() != NULL) {
288 frame_->Push(FACTORY->the_hole_value());
289 StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
290 }
291
292
293 // Initialize the function return target after the locals are set
294 // up, because it needs the expected frame height from the frame.
295 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
296 function_return_is_shadowed_ = false;
297
298 // Generate code to 'execute' declarations and initialize functions
299 // (source elements). In case of an illegal redeclaration we need to
300 // handle that instead of processing the declarations.
301 if (scope()->HasIllegalRedeclaration()) {
302 Comment cmnt(masm_, "[ illegal redeclarations");
303 scope()->VisitIllegalRedeclaration(this);
304 } else {
305 Comment cmnt(masm_, "[ declarations");
306 ProcessDeclarations(scope()->declarations());
307 // Bail out if a stack-overflow exception occurred when processing
308 // declarations.
309 if (HasStackOverflow()) return;
310 }
311
312 if (FLAG_trace) {
313 frame_->CallRuntime(Runtime::kTraceEnter, 0);
314 // Ignore the return value.
315 }
316 CheckStack();
317
318 // Compile the body of the function in a vanilla state. Don't
319 // bother compiling all the code if the scope has an illegal
320 // redeclaration.
321 if (!scope()->HasIllegalRedeclaration()) {
322 Comment cmnt(masm_, "[ function body");
323 #ifdef DEBUG
324 bool is_builtin = info->isolate()->bootstrapper()->IsActive();
325 bool should_trace =
326 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
327 if (should_trace) {
328 frame_->CallRuntime(Runtime::kDebugTrace, 0);
329 // Ignore the return value.
330 }
331 #endif
332 VisitStatements(info->function()->body());
333
334 // Handle the return from the function.
335 if (has_valid_frame()) {
336 // If there is a valid frame, control flow can fall off the end of
337 // the body. In that case there is an implicit return statement.
338 ASSERT(!function_return_is_shadowed_);
339 CodeForReturnPosition(info->function());
340 frame_->PrepareForReturn();
341 Result undefined(FACTORY->undefined_value());
342 if (function_return_.is_bound()) {
343 function_return_.Jump(&undefined);
344 } else {
345 function_return_.Bind(&undefined);
346 GenerateReturnSequence(&undefined);
347 }
348 } else if (function_return_.is_linked()) {
349 // If the return target has dangling jumps to it, then we have not
350 // yet generated the return sequence. This can happen when (a)
351 // control does not flow off the end of the body so we did not
352 // compile an artificial return statement just above, and (b) there
353 // are return statements in the body but (c) they are all shadowed.
354 Result return_value;
355 function_return_.Bind(&return_value);
356 GenerateReturnSequence(&return_value);
357 }
358 }
359 }
360
361 // Adjust for function-level loop nesting.
362 ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
363 loop_nesting_ = 0;
364
365 // Code generation state must be reset.
366 ASSERT(state_ == NULL);
367 ASSERT(!function_return_is_shadowed_);
368 function_return_.Unuse();
369 DeleteFrame();
370
371 // Process any deferred code using the register allocator.
372 if (!HasStackOverflow()) {
373 info->isolate()->set_jump_target_compiling_deferred_code(true);
374 ProcessDeferred();
375 info->isolate()->set_jump_target_compiling_deferred_code(false);
376 }
377
378 // There is no need to delete the register allocator, it is a
379 // stack-allocated local.
380 allocator_ = NULL;
381 }
382
383
384 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
385 // Currently, this assertion will fail if we try to assign to
386 // a constant variable that is constant because it is read-only
387 // (such as the variable referring to a named function expression).
388 // We need to implement assignments to read-only variables.
389 // Ideally, we should do this during AST generation (by converting
390 // such assignments into expression statements); however, in general
391 // we may not be able to make the decision until past AST generation,
392 // that is when the entire program is known.
393 ASSERT(slot != NULL);
394 int index = slot->index();
395 switch (slot->type()) {
396 case Slot::PARAMETER:
397 return frame_->ParameterAt(index);
398
399 case Slot::LOCAL:
400 return frame_->LocalAt(index);
401
402 case Slot::CONTEXT: {
403 // Follow the context chain if necessary.
404 ASSERT(!tmp.is(esi)); // do not overwrite context register
405 Register context = esi;
406 int chain_length = scope()->ContextChainLength(slot->var()->scope());
407 for (int i = 0; i < chain_length; i++) {
408 // Load the closure.
409 // (All contexts, even 'with' contexts, have a closure,
410 // and it is the same for all contexts inside a function.
411 // There is no need to go to the function context first.)
412 __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
413 // Load the function context (which is the incoming, outer context).
414 __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
415 context = tmp;
416 }
417 // We may have a 'with' context now. Get the function context.
418 // (In fact this mov may never be the needed, since the scope analysis
419 // may not permit a direct context access in this case and thus we are
420 // always at a function context. However it is safe to dereference be-
421 // cause the function context of a function context is itself. Before
422 // deleting this mov we should try to create a counter-example first,
423 // though...)
424 __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
425 return ContextOperand(tmp, index);
426 }
427
428 default:
429 UNREACHABLE();
430 return Operand(eax);
431 }
432 }
433
434
435 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
436 Result tmp,
437 JumpTarget* slow) {
438 ASSERT(slot->type() == Slot::CONTEXT);
439 ASSERT(tmp.is_register());
440 Register context = esi;
441
442 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
443 if (s->num_heap_slots() > 0) {
444 if (s->calls_eval()) {
445 // Check that extension is NULL.
446 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
447 Immediate(0));
448 slow->Branch(not_equal, not_taken);
449 }
450 __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
451 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
452 context = tmp.reg();
453 }
454 }
455 // Check that last extension is NULL.
456 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
457 slow->Branch(not_equal, not_taken);
458 __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
459 return ContextOperand(tmp.reg(), slot->index());
460 }
461
462
463 // Emit code to load the value of an expression to the top of the
464 // frame. If the expression is boolean-valued it may be compiled (or
465 // partially compiled) into control flow to the control destination.
466 // If force_control is true, control flow is forced.
467 void CodeGenerator::LoadCondition(Expression* expr,
468 ControlDestination* dest,
469 bool force_control) {
470 ASSERT(!in_spilled_code());
471 int original_height = frame_->height();
472
473 { CodeGenState new_state(this, dest);
474 Visit(expr);
475
476 // If we hit a stack overflow, we may not have actually visited
477 // the expression. In that case, we ensure that we have a
478 // valid-looking frame state because we will continue to generate
479 // code as we unwind the C++ stack.
480 //
481 // It's possible to have both a stack overflow and a valid frame
482 // state (eg, a subexpression overflowed, visiting it returned
483 // with a dummied frame state, and visiting this expression
484 // returned with a normal-looking state).
485 if (HasStackOverflow() &&
486 !dest->is_used() &&
487 frame_->height() == original_height) {
488 dest->Goto(true);
489 }
490 }
491
492 if (force_control && !dest->is_used()) {
493 // Convert the TOS value into flow to the control destination.
494 ToBoolean(dest);
495 }
496
497 ASSERT(!(force_control && !dest->is_used()));
498 ASSERT(dest->is_used() || frame_->height() == original_height + 1);
499 }
500
501
502 void CodeGenerator::LoadAndSpill(Expression* expression) {
503 ASSERT(in_spilled_code());
504 set_in_spilled_code(false);
505 Load(expression);
506 frame_->SpillAll();
507 set_in_spilled_code(true);
508 }
509
510
511 void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
512 BreakTarget* unsafe_bailout) {
513 set_unsafe_bailout(unsafe_bailout);
514 set_in_safe_int32_mode(true);
515 Load(expr);
516 Result value = frame_->Pop();
517 ASSERT(frame_->HasNoUntaggedInt32Elements());
518 if (expr->GuaranteedSmiResult()) {
519 ConvertInt32ResultToSmi(&value);
520 } else {
521 ConvertInt32ResultToNumber(&value);
522 }
523 set_in_safe_int32_mode(false);
524 set_unsafe_bailout(NULL);
525 frame_->Push(&value);
526 }
527
528
529 void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
530 set_safe_int32_mode_enabled(false);
531 Load(expr);
532 set_safe_int32_mode_enabled(true);
533 }
534
535
536 void CodeGenerator::ConvertInt32ResultToSmi(Result* value) {
537 ASSERT(value->is_untagged_int32());
538 if (value->is_register()) {
539 __ add(value->reg(), Operand(value->reg()));
540 } else {
541 ASSERT(value->is_constant());
542 ASSERT(value->handle()->IsSmi());
543 }
544 value->set_untagged_int32(false);
545 value->set_type_info(TypeInfo::Smi());
546 }
547
548
549 void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
550 ASSERT(value->is_untagged_int32());
551 if (value->is_register()) {
552 Register val = value->reg();
553 JumpTarget done;
554 __ add(val, Operand(val));
555 done.Branch(no_overflow, value);
556 __ sar(val, 1);
557 // If there was an overflow, bits 30 and 31 of the original number disagree.
558 __ xor_(val, 0x80000000u);
559 if (CpuFeatures::IsSupported(SSE2)) {
560 CpuFeatures::Scope fscope(SSE2);
561 __ cvtsi2sd(xmm0, Operand(val));
562 } else {
563 // Move val to ST[0] in the FPU
564 // Push and pop are safe with respect to the virtual frame because
565 // all synced elements are below the actual stack pointer.
566 __ push(val);
567 __ fild_s(Operand(esp, 0));
568 __ pop(val);
569 }
570 Result scratch = allocator_->Allocate();
571 ASSERT(scratch.is_register());
572 Label allocation_failed;
573 __ AllocateHeapNumber(val, scratch.reg(),
574 no_reg, &allocation_failed);
575 VirtualFrame* clone = new VirtualFrame(frame_);
576 scratch.Unuse();
577 if (CpuFeatures::IsSupported(SSE2)) {
578 CpuFeatures::Scope fscope(SSE2);
579 __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
580 } else {
581 __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
582 }
583 done.Jump(value);
584
585 // Establish the virtual frame, cloned from where AllocateHeapNumber
586 // jumped to allocation_failed.
587 RegisterFile empty_regs;
588 SetFrame(clone, &empty_regs);
589 __ bind(&allocation_failed);
590 if (!CpuFeatures::IsSupported(SSE2)) {
591 // Pop the value from the floating point stack.
592 __ fstp(0);
593 }
594 unsafe_bailout_->Jump();
595
596 done.Bind(value);
597 } else {
598 ASSERT(value->is_constant());
599 }
600 value->set_untagged_int32(false);
601 value->set_type_info(TypeInfo::Integer32());
602 }
603
604
605 void CodeGenerator::Load(Expression* expr) {
606 #ifdef DEBUG
607 int original_height = frame_->height();
608 #endif
609 ASSERT(!in_spilled_code());
610
611 // If the expression should be a side-effect-free 32-bit int computation,
612 // compile that SafeInt32 path, and a bailout path.
613 if (!in_safe_int32_mode() &&
614 safe_int32_mode_enabled() &&
615 expr->side_effect_free() &&
616 expr->num_bit_ops() > 2 &&
617 CpuFeatures::IsSupported(SSE2)) {
618 BreakTarget unsafe_bailout;
619 JumpTarget done;
620 unsafe_bailout.set_expected_height(frame_->height());
621 LoadInSafeInt32Mode(expr, &unsafe_bailout);
622 done.Jump();
623
624 if (unsafe_bailout.is_linked()) {
625 unsafe_bailout.Bind();
626 LoadWithSafeInt32ModeDisabled(expr);
627 }
628 done.Bind();
629 } else {
630 JumpTarget true_target;
631 JumpTarget false_target;
632 ControlDestination dest(&true_target, &false_target, true);
633 LoadCondition(expr, &dest, false);
634
635 if (dest.false_was_fall_through()) {
636 // The false target was just bound.
637 JumpTarget loaded;
638 frame_->Push(FACTORY->false_value());
639 // There may be dangling jumps to the true target.
640 if (true_target.is_linked()) {
641 loaded.Jump();
642 true_target.Bind();
643 frame_->Push(FACTORY->true_value());
644 loaded.Bind();
645 }
646
647 } else if (dest.is_used()) {
648 // There is true, and possibly false, control flow (with true as
649 // the fall through).
650 JumpTarget loaded;
651 frame_->Push(FACTORY->true_value());
652 if (false_target.is_linked()) {
653 loaded.Jump();
654 false_target.Bind();
655 frame_->Push(FACTORY->false_value());
656 loaded.Bind();
657 }
658
659 } else {
660 // We have a valid value on top of the frame, but we still may
661 // have dangling jumps to the true and false targets from nested
662 // subexpressions (eg, the left subexpressions of the
663 // short-circuited boolean operators).
664 ASSERT(has_valid_frame());
665 if (true_target.is_linked() || false_target.is_linked()) {
666 JumpTarget loaded;
667 loaded.Jump(); // Don't lose the current TOS.
668 if (true_target.is_linked()) {
669 true_target.Bind();
670 frame_->Push(FACTORY->true_value());
671 if (false_target.is_linked()) {
672 loaded.Jump();
673 }
674 }
675 if (false_target.is_linked()) {
676 false_target.Bind();
677 frame_->Push(FACTORY->false_value());
678 }
679 loaded.Bind();
680 }
681 }
682 }
683 ASSERT(has_valid_frame());
684 ASSERT(frame_->height() == original_height + 1);
685 }
686
687
688 void CodeGenerator::LoadGlobal() {
689 if (in_spilled_code()) {
690 frame_->EmitPush(GlobalObjectOperand());
691 } else {
692 Result temp = allocator_->Allocate();
693 __ mov(temp.reg(), GlobalObjectOperand());
694 frame_->Push(&temp);
695 }
696 }
697
698
699 void CodeGenerator::LoadGlobalReceiver() {
700 Result temp = allocator_->Allocate();
701 Register reg = temp.reg();
702 __ mov(reg, GlobalObjectOperand());
703 __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
704 frame_->Push(&temp);
705 }
706
707
708 void CodeGenerator::LoadTypeofExpression(Expression* expr) {
709 // Special handling of identifiers as subexpressions of typeof.
710 Variable* variable = expr->AsVariableProxy()->AsVariable();
711 if (variable != NULL && !variable->is_this() && variable->is_global()) {
712 // For a global variable we build the property reference
713 // <global>.<variable> and perform a (regular non-contextual) property
714 // load to make sure we do not get reference errors.
715 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
716 Literal key(variable->name());
717 Property property(&global, &key, RelocInfo::kNoPosition);
718 Reference ref(this, &property);
719 ref.GetValue();
720 } else if (variable != NULL && variable->AsSlot() != NULL) {
721 // For a variable that rewrites to a slot, we signal it is the immediate
722 // subexpression of a typeof.
723 LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
724 } else {
725 // Anything else can be handled normally.
726 Load(expr);
727 }
728 }
729
730
731 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
732 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
733
734 // In strict mode there is no need for shadow arguments.
735 ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
736
737 // We don't want to do lazy arguments allocation for functions that
738 // have heap-allocated contexts, because it interfers with the
739 // uninitialized const tracking in the context objects.
740 return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
741 ? EAGER_ARGUMENTS_ALLOCATION
742 : LAZY_ARGUMENTS_ALLOCATION;
743 }
744
745
746 Result CodeGenerator::StoreArgumentsObject(bool initial) {
747 ArgumentsAllocationMode mode = ArgumentsMode();
748 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
749
750 Comment cmnt(masm_, "[ store arguments object");
751 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
752 // When using lazy arguments allocation, we store the arguments marker value
753 // as a sentinel indicating that the arguments object hasn't been
754 // allocated yet.
755 frame_->Push(FACTORY->arguments_marker());
756 } else {
757 ArgumentsAccessStub stub(is_strict_mode()
758 ? ArgumentsAccessStub::NEW_STRICT
759 : ArgumentsAccessStub::NEW_NON_STRICT);
760 frame_->PushFunction();
761 frame_->PushReceiverSlotAddress();
762 frame_->Push(Smi::FromInt(scope()->num_parameters()));
763 Result result = frame_->CallStub(&stub, 3);
764 frame_->Push(&result);
765 }
766
767 Variable* arguments = scope()->arguments();
768 Variable* shadow = scope()->arguments_shadow();
769
770 ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
771 ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
772 scope()->is_strict_mode());
773
774 JumpTarget done;
775 bool skip_arguments = false;
776 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
777 // We have to skip storing into the arguments slot if it has
778 // already been written to. This can happen if the a function
779 // has a local variable named 'arguments'.
780 LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
781 Result probe = frame_->Pop();
782 if (probe.is_constant()) {
783 // We have to skip updating the arguments object if it has
784 // been assigned a proper value.
785 skip_arguments = !probe.handle()->IsArgumentsMarker();
786 } else {
787 __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
788 probe.Unuse();
789 done.Branch(not_equal);
790 }
791 }
792 if (!skip_arguments) {
793 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
794 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
795 }
796 if (shadow != NULL) {
797 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
798 }
799 return frame_->Pop();
800 }
801
802 //------------------------------------------------------------------------------
803 // CodeGenerator implementation of variables, lookups, and stores.
804
805 Reference::Reference(CodeGenerator* cgen,
806 Expression* expression,
807 bool persist_after_get)
808 : cgen_(cgen),
809 expression_(expression),
810 type_(ILLEGAL),
811 persist_after_get_(persist_after_get) {
812 cgen->LoadReference(this);
813 }
814
815
816 Reference::~Reference() {
817 ASSERT(is_unloaded() || is_illegal());
818 }
819
820
821 void CodeGenerator::LoadReference(Reference* ref) {
822 // References are loaded from both spilled and unspilled code. Set the
823 // state to unspilled to allow that (and explicitly spill after
824 // construction at the construction sites).
825 bool was_in_spilled_code = in_spilled_code_;
826 in_spilled_code_ = false;
827
828 Comment cmnt(masm_, "[ LoadReference");
829 Expression* e = ref->expression();
830 Property* property = e->AsProperty();
831 Variable* var = e->AsVariableProxy()->AsVariable();
832
833 if (property != NULL) {
834 // The expression is either a property or a variable proxy that rewrites
835 // to a property.
836 Load(property->obj());
837 if (property->key()->IsPropertyName()) {
838 ref->set_type(Reference::NAMED);
839 } else {
840 Load(property->key());
841 ref->set_type(Reference::KEYED);
842 }
843 } else if (var != NULL) {
844 // The expression is a variable proxy that does not rewrite to a
845 // property. Global variables are treated as named property references.
846 if (var->is_global()) {
847 // If eax is free, the register allocator prefers it. Thus the code
848 // generator will load the global object into eax, which is where
849 // LoadIC wants it. Most uses of Reference call LoadIC directly
850 // after the reference is created.
851 frame_->Spill(eax);
852 LoadGlobal();
853 ref->set_type(Reference::NAMED);
854 } else {
855 ASSERT(var->AsSlot() != NULL);
856 ref->set_type(Reference::SLOT);
857 }
858 } else {
859 // Anything else is a runtime error.
860 Load(e);
861 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
862 }
863
864 in_spilled_code_ = was_in_spilled_code;
865 }
866
867
868 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
869 // convert it to a boolean in the condition code register or jump to
870 // 'false_target'/'true_target' as appropriate.
871 void CodeGenerator::ToBoolean(ControlDestination* dest) {
872 Comment cmnt(masm_, "[ ToBoolean");
873
874 // The value to convert should be popped from the frame.
875 Result value = frame_->Pop();
876 value.ToRegister();
877
878 if (value.is_integer32()) { // Also takes Smi case.
879 Comment cmnt(masm_, "ONLY_INTEGER_32");
880 if (FLAG_debug_code) {
881 Label ok;
882 __ AbortIfNotNumber(value.reg());
883 __ test(value.reg(), Immediate(kSmiTagMask));
884 __ j(zero, &ok);
885 __ fldz();
886 __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
887 __ FCmp();
888 __ j(not_zero, &ok);
889 __ Abort("Smi was wrapped in HeapNumber in output from bitop");
890 __ bind(&ok);
891 }
892 // In the integer32 case there are no Smis hidden in heap numbers, so we
893 // need only test for Smi zero.
894 __ test(value.reg(), Operand(value.reg()));
895 dest->false_target()->Branch(zero);
896 value.Unuse();
897 dest->Split(not_zero);
898 } else if (value.is_number()) {
899 Comment cmnt(masm_, "ONLY_NUMBER");
900 // Fast case if TypeInfo indicates only numbers.
901 if (FLAG_debug_code) {
902 __ AbortIfNotNumber(value.reg());
903 }
904 // Smi => false iff zero.
905 STATIC_ASSERT(kSmiTag == 0);
906 __ test(value.reg(), Operand(value.reg()));
907 dest->false_target()->Branch(zero);
908 __ test(value.reg(), Immediate(kSmiTagMask));
909 dest->true_target()->Branch(zero);
910 __ fldz();
911 __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
912 __ FCmp();
913 value.Unuse();
914 dest->Split(not_zero);
915 } else {
916 // Fast case checks.
917 // 'false' => false.
918 __ cmp(value.reg(), FACTORY->false_value());
919 dest->false_target()->Branch(equal);
920
921 // 'true' => true.
922 __ cmp(value.reg(), FACTORY->true_value());
923 dest->true_target()->Branch(equal);
924
925 // 'undefined' => false.
926 __ cmp(value.reg(), FACTORY->undefined_value());
927 dest->false_target()->Branch(equal);
928
929 // Smi => false iff zero.
930 STATIC_ASSERT(kSmiTag == 0);
931 __ test(value.reg(), Operand(value.reg()));
932 dest->false_target()->Branch(zero);
933 __ test(value.reg(), Immediate(kSmiTagMask));
934 dest->true_target()->Branch(zero);
935
936 // Call the stub for all other cases.
937 frame_->Push(&value); // Undo the Pop() from above.
938 ToBooleanStub stub;
939 Result temp = frame_->CallStub(&stub, 1);
940 // Convert the result to a condition code.
941 __ test(temp.reg(), Operand(temp.reg()));
942 temp.Unuse();
943 dest->Split(not_equal);
944 }
945 }
946
947
948 // Perform or call the specialized stub for a binary operation. Requires the
949 // three registers left, right and dst to be distinct and spilled. This
950 // deferred operation has up to three entry points: The main one calls the
951 // runtime system. The second is for when the result is a non-Smi. The
952 // third is for when at least one of the inputs is non-Smi and we have SSE2.
953 class DeferredInlineBinaryOperation: public DeferredCode {
954 public:
955 DeferredInlineBinaryOperation(Token::Value op,
956 Register dst,
957 Register left,
958 Register right,
959 TypeInfo left_info,
960 TypeInfo right_info,
961 OverwriteMode mode)
962 : op_(op), dst_(dst), left_(left), right_(right),
963 left_info_(left_info), right_info_(right_info), mode_(mode) {
964 set_comment("[ DeferredInlineBinaryOperation");
965 ASSERT(!left.is(right));
966 }
967
968 virtual void Generate();
969
970 // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
971 // Exit().
972 virtual bool AutoSaveAndRestore() { return false; }
973
974 void JumpToAnswerOutOfRange(Condition cond);
975 void JumpToConstantRhs(Condition cond, Smi* smi_value);
976 Label* NonSmiInputLabel();
977
978 private:
979 void GenerateAnswerOutOfRange();
980 void GenerateNonSmiInput();
981
982 Token::Value op_;
983 Register dst_;
984 Register left_;
985 Register right_;
986 TypeInfo left_info_;
987 TypeInfo right_info_;
988 OverwriteMode mode_;
989 Label answer_out_of_range_;
990 Label non_smi_input_;
991 Label constant_rhs_;
992 Smi* smi_value_;
993 };
994
995
996 Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
997 if (Token::IsBitOp(op_) &&
998 CpuFeatures::IsSupported(SSE2)) {
999 return &non_smi_input_;
1000 } else {
1001 return entry_label();
1002 }
1003 }
1004
1005
1006 void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
1007 __ j(cond, &answer_out_of_range_);
1008 }
1009
1010
1011 void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
1012 Smi* smi_value) {
1013 smi_value_ = smi_value;
1014 __ j(cond, &constant_rhs_);
1015 }
1016
1017
1018 void DeferredInlineBinaryOperation::Generate() {
1019 // Registers are not saved implicitly for this stub, so we should not
1020 // tread on the registers that were not passed to us.
1021 if (CpuFeatures::IsSupported(SSE2) &&
1022 ((op_ == Token::ADD) ||
1023 (op_ == Token::SUB) ||
1024 (op_ == Token::MUL) ||
1025 (op_ == Token::DIV))) {
1026 CpuFeatures::Scope use_sse2(SSE2);
1027 Label call_runtime, after_alloc_failure;
1028 Label left_smi, right_smi, load_right, do_op;
1029 if (!left_info_.IsSmi()) {
1030 __ test(left_, Immediate(kSmiTagMask));
1031 __ j(zero, &left_smi);
1032 if (!left_info_.IsNumber()) {
1033 __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
1034 FACTORY->heap_number_map());
1035 __ j(not_equal, &call_runtime);
1036 }
1037 __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
1038 if (mode_ == OVERWRITE_LEFT) {
1039 __ mov(dst_, left_);
1040 }
1041 __ jmp(&load_right);
1042
1043 __ bind(&left_smi);
1044 } else {
1045 if (FLAG_debug_code) __ AbortIfNotSmi(left_);
1046 }
1047 __ SmiUntag(left_);
1048 __ cvtsi2sd(xmm0, Operand(left_));
1049 __ SmiTag(left_);
1050 if (mode_ == OVERWRITE_LEFT) {
1051 Label alloc_failure;
1052 __ push(left_);
1053 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
1054 __ pop(left_);
1055 }
1056
1057 __ bind(&load_right);
1058 if (!right_info_.IsSmi()) {
1059 __ test(right_, Immediate(kSmiTagMask));
1060 __ j(zero, &right_smi);
1061 if (!right_info_.IsNumber()) {
1062 __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
1063 FACTORY->heap_number_map());
1064 __ j(not_equal, &call_runtime);
1065 }
1066 __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
1067 if (mode_ == OVERWRITE_RIGHT) {
1068 __ mov(dst_, right_);
1069 } else if (mode_ == NO_OVERWRITE) {
1070 Label alloc_failure;
1071 __ push(left_);
1072 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
1073 __ pop(left_);
1074 }
1075 __ jmp(&do_op);
1076
1077 __ bind(&right_smi);
1078 } else {
1079 if (FLAG_debug_code) __ AbortIfNotSmi(right_);
1080 }
1081 __ SmiUntag(right_);
1082 __ cvtsi2sd(xmm1, Operand(right_));
1083 __ SmiTag(right_);
1084 if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
1085 __ push(left_);
1086 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
1087 __ pop(left_);
1088 }
1089
1090 __ bind(&do_op);
1091 switch (op_) {
1092 case Token::ADD: __ addsd(xmm0, xmm1); break;
1093 case Token::SUB: __ subsd(xmm0, xmm1); break;
1094 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1095 case Token::DIV: __ divsd(xmm0, xmm1); break;
1096 default: UNREACHABLE();
1097 }
1098 __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
1099 Exit();
1100
1101
1102 __ bind(&after_alloc_failure);
1103 __ pop(left_);
1104 __ bind(&call_runtime);
1105 }
1106 // Register spilling is not done implicitly for this stub.
1107 // We can't postpone it any more now though.
1108 SaveRegisters();
1109
1110 GenericBinaryOpStub stub(op_,
1111 mode_,
1112 NO_SMI_CODE_IN_STUB,
1113 TypeInfo::Combine(left_info_, right_info_));
1114 stub.GenerateCall(masm_, left_, right_);
1115 if (!dst_.is(eax)) __ mov(dst_, eax);
1116 RestoreRegisters();
1117 Exit();
1118
1119 if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
1120 GenerateNonSmiInput();
1121 }
1122 if (answer_out_of_range_.is_linked()) {
1123 GenerateAnswerOutOfRange();
1124 }
1125 }
1126
1127
1128 void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
1129 // We know at least one of the inputs was not a Smi.
1130 // This is a third entry point into the deferred code.
1131 // We may not overwrite left_ because we want to be able
1132 // to call the handling code for non-smi answer and it
1133 // might want to overwrite the heap number in left_.
1134 ASSERT(!right_.is(dst_));
1135 ASSERT(!left_.is(dst_));
1136 ASSERT(!left_.is(right_));
1137 // This entry point is used for bit ops where the right hand side
1138 // is a constant Smi and the left hand side is a heap object. It
1139 // is also used for bit ops where both sides are unknown, but where
1140 // at least one of them is a heap object.
1141 bool rhs_is_constant = constant_rhs_.is_linked();
1142 // We can't generate code for both cases.
1143 ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
1144
1145 if (FLAG_debug_code) {
1146 __ int3(); // We don't fall through into this code.
1147 }
1148
1149 __ bind(&non_smi_input_);
1150
1151 if (rhs_is_constant) {
1152 __ bind(&constant_rhs_);
1153 // In this case the input is a heap object and it is in the dst_ register.
1154 // The left_ and right_ registers have not been initialized yet.
1155 __ mov(right_, Immediate(smi_value_));
1156 __ mov(left_, Operand(dst_));
1157 if (!CpuFeatures::IsSupported(SSE2)) {
1158 __ jmp(entry_label());
1159 return;
1160 } else {
1161 CpuFeatures::Scope use_sse2(SSE2);
1162 __ JumpIfNotNumber(dst_, left_info_, entry_label());
1163 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
1164 __ SmiUntag(right_);
1165 }
1166 } else {
1167 // We know we have SSE2 here because otherwise the label is not linked (see
1168 // NonSmiInputLabel).
1169 CpuFeatures::Scope use_sse2(SSE2);
1170 // Handle the non-constant right hand side situation:
1171 if (left_info_.IsSmi()) {
1172 // Right is a heap object.
1173 __ JumpIfNotNumber(right_, right_info_, entry_label());
1174 __ ConvertToInt32(right_, right_, dst_, right_info_, entry_label());
1175 __ mov(dst_, Operand(left_));
1176 __ SmiUntag(dst_);
1177 } else if (right_info_.IsSmi()) {
1178 // Left is a heap object.
1179 __ JumpIfNotNumber(left_, left_info_, entry_label());
1180 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
1181 __ SmiUntag(right_);
1182 } else {
1183 // Here we don't know if it's one or both that is a heap object.
1184 Label only_right_is_heap_object, got_both;
1185 __ mov(dst_, Operand(left_));
1186 __ SmiUntag(dst_, &only_right_is_heap_object);
1187 // Left was a heap object.
1188 __ JumpIfNotNumber(left_, left_info_, entry_label());
1189 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
1190 __ SmiUntag(right_, &got_both);
1191 // Both were heap objects.
1192 __ rcl(right_, 1); // Put tag back.
1193 __ JumpIfNotNumber(right_, right_info_, entry_label());
1194 __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
1195 __ jmp(&got_both);
1196 __ bind(&only_right_is_heap_object);
1197 __ JumpIfNotNumber(right_, right_info_, entry_label());
1198 __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
1199 __ bind(&got_both);
1200 }
1201 }
1202 ASSERT(op_ == Token::BIT_AND ||
1203 op_ == Token::BIT_OR ||
1204 op_ == Token::BIT_XOR ||
1205 right_.is(ecx));
1206 switch (op_) {
1207 case Token::BIT_AND: __ and_(dst_, Operand(right_)); break;
1208 case Token::BIT_OR: __ or_(dst_, Operand(right_)); break;
1209 case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break;
1210 case Token::SHR: __ shr_cl(dst_); break;
1211 case Token::SAR: __ sar_cl(dst_); break;
1212 case Token::SHL: __ shl_cl(dst_); break;
1213 default: UNREACHABLE();
1214 }
1215 if (op_ == Token::SHR) {
1216 // Check that the *unsigned* result fits in a smi. Neither of
1217 // the two high-order bits can be set:
1218 // * 0x80000000: high bit would be lost when smi tagging.
1219 // * 0x40000000: this number would convert to negative when smi
1220 // tagging.
1221 __ test(dst_, Immediate(0xc0000000));
1222 __ j(not_zero, &answer_out_of_range_);
1223 } else {
1224 // Check that the *signed* result fits in a smi.
1225 __ cmp(dst_, 0xc0000000);
1226 __ j(negative, &answer_out_of_range_);
1227 }
1228 __ SmiTag(dst_);
1229 Exit();
1230 }
1231
1232
1233 void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
1234 Label after_alloc_failure2;
1235 Label allocation_ok;
1236 __ bind(&after_alloc_failure2);
1237 // We have to allocate a number, causing a GC, while keeping hold of
1238 // the answer in dst_. The answer is not a Smi. We can't just call the
1239 // runtime shift function here because we already threw away the inputs.
1240 __ xor_(left_, Operand(left_));
1241 __ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits.
1242 __ rcr(left_, 1); // Rotate with carry.
1243 __ push(dst_); // Smi tagged low 31 bits.
1244 __ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases.
1245 __ CallRuntime(Runtime::kNumberAlloc, 0);
1246 if (!left_.is(eax)) {
1247 __ mov(left_, eax);
1248 }
1249 __ pop(right_); // High bit.
1250 __ pop(dst_); // Low 31 bits.
1251 __ shr(dst_, 1); // Put 0 in top bit.
1252 __ or_(dst_, Operand(right_));
1253 __ jmp(&allocation_ok);
1254
1255 // This is the second entry point to the deferred code. It is used only by
1256 // the bit operations.
1257 // The dst_ register has the answer. It is not Smi tagged. If mode_ is
1258 // OVERWRITE_LEFT then left_ must contain either an overwritable heap number
1259 // or a Smi.
1260 // Put a heap number pointer in left_.
1261 __ bind(&answer_out_of_range_);
1262 SaveRegisters();
1263 if (mode_ == OVERWRITE_LEFT) {
1264 __ test(left_, Immediate(kSmiTagMask));
1265 __ j(not_zero, &allocation_ok);
1266 }
1267 // This trashes right_.
1268 __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
1269 __ bind(&allocation_ok);
1270 if (CpuFeatures::IsSupported(SSE2) &&
1271 op_ != Token::SHR) {
1272 CpuFeatures::Scope use_sse2(SSE2);
1273 ASSERT(Token::IsBitOp(op_));
1274 // Signed conversion.
1275 __ cvtsi2sd(xmm0, Operand(dst_));
1276 __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
1277 } else {
1278 if (op_ == Token::SHR) {
1279 __ push(Immediate(0)); // High word of unsigned value.
1280 __ push(dst_);
1281 __ fild_d(Operand(esp, 0));
1282 __ Drop(2);
1283 } else {
1284 ASSERT(Token::IsBitOp(op_));
1285 __ push(dst_);
1286 __ fild_s(Operand(esp, 0)); // Signed conversion.
1287 __ pop(dst_);
1288 }
1289 __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
1290 }
1291 __ mov(dst_, left_);
1292 RestoreRegisters();
1293 Exit();
1294 }
1295
1296
1297 static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
1298 Token::Value op,
1299 const Result& right,
1300 const Result& left) {
1301 // Set TypeInfo of result according to the operation performed.
1302 // Rely on the fact that smis have a 31 bit payload on ia32.
1303 STATIC_ASSERT(kSmiValueSize == 31);
1304 switch (op) {
1305 case Token::COMMA:
1306 return right.type_info();
1307 case Token::OR:
1308 case Token::AND:
1309 // Result type can be either of the two input types.
1310 return operands_type;
1311 case Token::BIT_AND: {
1312 // Anding with positive Smis will give you a Smi.
1313 if (right.is_constant() && right.handle()->IsSmi() &&
1314 Smi::cast(*right.handle())->value() >= 0) {
1315 return TypeInfo::Smi();
1316 } else if (left.is_constant() && left.handle()->IsSmi() &&
1317 Smi::cast(*left.handle())->value() >= 0) {
1318 return TypeInfo::Smi();
1319 }
1320 return (operands_type.IsSmi())
1321 ? TypeInfo::Smi()
1322 : TypeInfo::Integer32();
1323 }
1324 case Token::BIT_OR: {
1325 // Oring with negative Smis will give you a Smi.
1326 if (right.is_constant() && right.handle()->IsSmi() &&
1327 Smi::cast(*right.handle())->value() < 0) {
1328 return TypeInfo::Smi();
1329 } else if (left.is_constant() && left.handle()->IsSmi() &&
1330 Smi::cast(*left.handle())->value() < 0) {
1331 return TypeInfo::Smi();
1332 }
1333 return (operands_type.IsSmi())
1334 ? TypeInfo::Smi()
1335 : TypeInfo::Integer32();
1336 }
1337 case Token::BIT_XOR:
1338 // Result is always a 32 bit integer. Smi property of inputs is preserved.
1339 return (operands_type.IsSmi())
1340 ? TypeInfo::Smi()
1341 : TypeInfo::Integer32();
1342 case Token::SAR:
1343 if (left.is_smi()) return TypeInfo::Smi();
1344 // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
1345 // Shift amount is masked with 0x1F (ECMA standard 11.7.2).
1346 return (right.is_constant() && right.handle()->IsSmi()
1347 && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
1348 ? TypeInfo::Smi()
1349 : TypeInfo::Integer32();
1350 case Token::SHR:
1351 // Result is a smi if we shift by a constant >= 2, an integer32 if
1352 // we shift by 1, and an unsigned 32-bit integer if we shift by 0.
1353 if (right.is_constant() && right.handle()->IsSmi()) {
1354 int shift_amount = Smi::cast(*right.handle())->value() & 0x1F;
1355 if (shift_amount > 1) {
1356 return TypeInfo::Smi();
1357 } else if (shift_amount > 0) {
1358 return TypeInfo::Integer32();
1359 }
1360 }
1361 return TypeInfo::Number();
1362 case Token::ADD:
1363 if (operands_type.IsSmi()) {
1364 // The Integer32 range is big enough to take the sum of any two Smis.
1365 return TypeInfo::Integer32();
1366 } else if (operands_type.IsNumber()) {
1367 return TypeInfo::Number();
1368 } else if (left.type_info().IsString() || right.type_info().IsString()) {
1369 return TypeInfo::String();
1370 } else {
1371 return TypeInfo::Unknown();
1372 }
1373 case Token::SHL:
1374 return TypeInfo::Integer32();
1375 case Token::SUB:
1376 // The Integer32 range is big enough to take the difference of any two
1377 // Smis.
1378 return (operands_type.IsSmi()) ?
1379 TypeInfo::Integer32() :
1380 TypeInfo::Number();
1381 case Token::MUL:
1382 case Token::DIV:
1383 case Token::MOD:
1384 // Result is always a number.
1385 return TypeInfo::Number();
1386 default:
1387 UNREACHABLE();
1388 }
1389 UNREACHABLE();
1390 return TypeInfo::Unknown();
1391 }
1392
1393
1394 void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
1395 OverwriteMode overwrite_mode) {
1396 Comment cmnt(masm_, "[ BinaryOperation");
1397 Token::Value op = expr->op();
1398 Comment cmnt_token(masm_, Token::String(op));
1399
1400 if (op == Token::COMMA) {
1401 // Simply discard left value.
1402 frame_->Nip(1);
1403 return;
1404 }
1405
1406 Result right = frame_->Pop();
1407 Result left = frame_->Pop();
1408
1409 if (op == Token::ADD) {
1410 const bool left_is_string = left.type_info().IsString();
1411 const bool right_is_string = right.type_info().IsString();
1412 // Make sure constant strings have string type info.
1413 ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
1414 left_is_string);
1415 ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
1416 right_is_string);
1417 if (left_is_string || right_is_string) {
1418 frame_->Push(&left);
1419 frame_->Push(&right);
1420 Result answer;
1421 if (left_is_string) {
1422 if (right_is_string) {
1423 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
1424 answer = frame_->CallStub(&stub, 2);
1425 } else {
1426 StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB);
1427 answer = frame_->CallStub(&stub, 2);
1428 }
1429 } else if (right_is_string) {
1430 StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1431 answer = frame_->CallStub(&stub, 2);
1432 }
1433 answer.set_type_info(TypeInfo::String());
1434 frame_->Push(&answer);
1435 return;
1436 }
1437 // Neither operand is known to be a string.
1438 }
1439
1440 bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
1441 bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
1442 bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
1443 bool right_is_non_smi_constant =
1444 right.is_constant() && !right.handle()->IsSmi();
1445
1446 if (left_is_smi_constant && right_is_smi_constant) {
1447 // Compute the constant result at compile time, and leave it on the frame.
1448 int left_int = Smi::cast(*left.handle())->value();
1449 int right_int = Smi::cast(*right.handle())->value();
1450 if (FoldConstantSmis(op, left_int, right_int)) return;
1451 }
1452
1453 // Get number type of left and right sub-expressions.
1454 TypeInfo operands_type =
1455 TypeInfo::Combine(left.type_info(), right.type_info());
1456
1457 TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
1458
1459 Result answer;
1460 if (left_is_non_smi_constant || right_is_non_smi_constant) {
1461 // Go straight to the slow case, with no smi code.
1462 GenericBinaryOpStub stub(op,
1463 overwrite_mode,
1464 NO_SMI_CODE_IN_STUB,
1465 operands_type);
1466 answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
1467 } else if (right_is_smi_constant) {
1468 answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
1469 false, overwrite_mode);
1470 } else if (left_is_smi_constant) {
1471 answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
1472 true, overwrite_mode);
1473 } else {
1474 // Set the flags based on the operation, type and loop nesting level.
1475 // Bit operations always assume they likely operate on Smis. Still only
1476 // generate the inline Smi check code if this operation is part of a loop.
1477 // For all other operations only inline the Smi check code for likely smis
1478 // if the operation is part of a loop.
1479 if (loop_nesting() > 0 &&
1480 (Token::IsBitOp(op) ||
1481 operands_type.IsInteger32() ||
1482 expr->type()->IsLikelySmi())) {
1483 answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
1484 } else {
1485 GenericBinaryOpStub stub(op,
1486 overwrite_mode,
1487 NO_GENERIC_BINARY_FLAGS,
1488 operands_type);
1489 answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
1490 }
1491 }
1492
1493 answer.set_type_info(result_type);
1494 frame_->Push(&answer);
1495 }
1496
1497
1498 Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
1499 Result* left,
1500 Result* right) {
1501 if (stub->ArgsInRegistersSupported()) {
1502 stub->SetArgsInRegisters();
1503 return frame_->CallStub(stub, left, right);
1504 } else {
1505 frame_->Push(left);
1506 frame_->Push(right);
1507 return frame_->CallStub(stub, 2);
1508 }
1509 }
1510
1511
1512 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
1513 Object* answer_object = HEAP->undefined_value();
1514 switch (op) {
1515 case Token::ADD:
1516 if (Smi::IsValid(left + right)) {
1517 answer_object = Smi::FromInt(left + right);
1518 }
1519 break;
1520 case Token::SUB:
1521 if (Smi::IsValid(left - right)) {
1522 answer_object = Smi::FromInt(left - right);
1523 }
1524 break;
1525 case Token::MUL: {
1526 double answer = static_cast<double>(left) * right;
1527 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
1528 // If the product is zero and the non-zero factor is negative,
1529 // the spec requires us to return floating point negative zero.
1530 if (answer != 0 || (left >= 0 && right >= 0)) {
1531 answer_object = Smi::FromInt(static_cast<int>(answer));
1532 }
1533 }
1534 }
1535 break;
1536 case Token::DIV:
1537 case Token::MOD:
1538 break;
1539 case Token::BIT_OR:
1540 answer_object = Smi::FromInt(left | right);
1541 break;
1542 case Token::BIT_AND:
1543 answer_object = Smi::FromInt(left & right);
1544 break;
1545 case Token::BIT_XOR:
1546 answer_object = Smi::FromInt(left ^ right);
1547 break;
1548
1549 case Token::SHL: {
1550 int shift_amount = right & 0x1F;
1551 if (Smi::IsValid(left << shift_amount)) {
1552 answer_object = Smi::FromInt(left << shift_amount);
1553 }
1554 break;
1555 }
1556 case Token::SHR: {
1557 int shift_amount = right & 0x1F;
1558 unsigned int unsigned_left = left;
1559 unsigned_left >>= shift_amount;
1560 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
1561 answer_object = Smi::FromInt(unsigned_left);
1562 }
1563 break;
1564 }
1565 case Token::SAR: {
1566 int shift_amount = right & 0x1F;
1567 unsigned int unsigned_left = left;
1568 if (left < 0) {
1569 // Perform arithmetic shift of a negative number by
1570 // complementing number, logical shifting, complementing again.
1571 unsigned_left = ~unsigned_left;
1572 unsigned_left >>= shift_amount;
1573 unsigned_left = ~unsigned_left;
1574 } else {
1575 unsigned_left >>= shift_amount;
1576 }
1577 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
1578 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
1579 break;
1580 }
1581 default:
1582 UNREACHABLE();
1583 break;
1584 }
1585 if (answer_object->IsUndefined()) {
1586 return false;
1587 }
1588 frame_->Push(Handle<Object>(answer_object));
1589 return true;
1590 }
1591
1592
1593 void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
1594 Result* right,
1595 JumpTarget* both_smi) {
1596 TypeInfo left_info = left->type_info();
1597 TypeInfo right_info = right->type_info();
1598 if (left_info.IsDouble() || left_info.IsString() ||
1599 right_info.IsDouble() || right_info.IsString()) {
1600 // We know that left and right are not both smi. Don't do any tests.
1601 return;
1602 }
1603
1604 if (left->reg().is(right->reg())) {
1605 if (!left_info.IsSmi()) {
1606 __ test(left->reg(), Immediate(kSmiTagMask));
1607 both_smi->Branch(zero);
1608 } else {
1609 if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
1610 left->Unuse();
1611 right->Unuse();
1612 both_smi->Jump();
1613 }
1614 } else if (!left_info.IsSmi()) {
1615 if (!right_info.IsSmi()) {
1616 Result temp = allocator_->Allocate();
1617 ASSERT(temp.is_valid());
1618 __ mov(temp.reg(), left->reg());
1619 __ or_(temp.reg(), Operand(right->reg()));
1620 __ test(temp.reg(), Immediate(kSmiTagMask));
1621 temp.Unuse();
1622 both_smi->Branch(zero);
1623 } else {
1624 __ test(left->reg(), Immediate(kSmiTagMask));
1625 both_smi->Branch(zero);
1626 }
1627 } else {
1628 if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
1629 if (!right_info.IsSmi()) {
1630 __ test(right->reg(), Immediate(kSmiTagMask));
1631 both_smi->Branch(zero);
1632 } else {
1633 if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
1634 left->Unuse();
1635 right->Unuse();
1636 both_smi->Jump();
1637 }
1638 }
1639 }
1640
1641
1642 void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
1643 Register right,
1644 Register scratch,
1645 TypeInfo left_info,
1646 TypeInfo right_info,
1647 DeferredCode* deferred) {
1648 JumpIfNotBothSmiUsingTypeInfo(left,
1649 right,
1650 scratch,
1651 left_info,
1652 right_info,
1653 deferred->entry_label());
1654 }
1655
1656
1657 void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
1658 Register right,
1659 Register scratch,
1660 TypeInfo left_info,
1661 TypeInfo right_info,
1662 Label* on_not_smi) {
1663 if (left.is(right)) {
1664 if (!left_info.IsSmi()) {
1665 __ test(left, Immediate(kSmiTagMask));
1666 __ j(not_zero, on_not_smi);
1667 } else {
1668 if (FLAG_debug_code) __ AbortIfNotSmi(left);
1669 }
1670 } else if (!left_info.IsSmi()) {
1671 if (!right_info.IsSmi()) {
1672 __ mov(scratch, left);
1673 __ or_(scratch, Operand(right));
1674 __ test(scratch, Immediate(kSmiTagMask));
1675 __ j(not_zero, on_not_smi);
1676 } else {
1677 __ test(left, Immediate(kSmiTagMask));
1678 __ j(not_zero, on_not_smi);
1679 if (FLAG_debug_code) __ AbortIfNotSmi(right);
1680 }
1681 } else {
1682 if (FLAG_debug_code) __ AbortIfNotSmi(left);
1683 if (!right_info.IsSmi()) {
1684 __ test(right, Immediate(kSmiTagMask));
1685 __ j(not_zero, on_not_smi);
1686 } else {
1687 if (FLAG_debug_code) __ AbortIfNotSmi(right);
1688 }
1689 }
1690 }
1691
1692
1693 // Implements a binary operation using a deferred code object and some
1694 // inline code to operate on smis quickly.
1695 Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1696 Result* left,
1697 Result* right,
1698 OverwriteMode overwrite_mode) {
1699 // Copy the type info because left and right may be overwritten.
1700 TypeInfo left_type_info = left->type_info();
1701 TypeInfo right_type_info = right->type_info();
1702 Token::Value op = expr->op();
1703 Result answer;
1704 // Special handling of div and mod because they use fixed registers.
1705 if (op == Token::DIV || op == Token::MOD) {
1706 // We need eax as the quotient register, edx as the remainder
1707 // register, neither left nor right in eax or edx, and left copied
1708 // to eax.
1709 Result quotient;
1710 Result remainder;
1711 bool left_is_in_eax = false;
1712 // Step 1: get eax for quotient.
1713 if ((left->is_register() && left->reg().is(eax)) ||
1714 (right->is_register() && right->reg().is(eax))) {
1715 // One or both is in eax. Use a fresh non-edx register for
1716 // them.
1717 Result fresh = allocator_->Allocate();
1718 ASSERT(fresh.is_valid());
1719 if (fresh.reg().is(edx)) {
1720 remainder = fresh;
1721 fresh = allocator_->Allocate();
1722 ASSERT(fresh.is_valid());
1723 }
1724 if (left->is_register() && left->reg().is(eax)) {
1725 quotient = *left;
1726 *left = fresh;
1727 left_is_in_eax = true;
1728 }
1729 if (right->is_register() && right->reg().is(eax)) {
1730 quotient = *right;
1731 *right = fresh;
1732 }
1733 __ mov(fresh.reg(), eax);
1734 } else {
1735 // Neither left nor right is in eax.
1736 quotient = allocator_->Allocate(eax);
1737 }
1738 ASSERT(quotient.is_register() && quotient.reg().is(eax));
1739 ASSERT(!(left->is_register() && left->reg().is(eax)));
1740 ASSERT(!(right->is_register() && right->reg().is(eax)));
1741
1742 // Step 2: get edx for remainder if necessary.
1743 if (!remainder.is_valid()) {
1744 if ((left->is_register() && left->reg().is(edx)) ||
1745 (right->is_register() && right->reg().is(edx))) {
1746 Result fresh = allocator_->Allocate();
1747 ASSERT(fresh.is_valid());
1748 if (left->is_register() && left->reg().is(edx)) {
1749 remainder = *left;
1750 *left = fresh;
1751 }
1752 if (right->is_register() && right->reg().is(edx)) {
1753 remainder = *right;
1754 *right = fresh;
1755 }
1756 __ mov(fresh.reg(), edx);
1757 } else {
1758 // Neither left nor right is in edx.
1759 remainder = allocator_->Allocate(edx);
1760 }
1761 }
1762 ASSERT(remainder.is_register() && remainder.reg().is(edx));
1763 ASSERT(!(left->is_register() && left->reg().is(edx)));
1764 ASSERT(!(right->is_register() && right->reg().is(edx)));
1765
1766 left->ToRegister();
1767 right->ToRegister();
1768 frame_->Spill(eax);
1769 frame_->Spill(edx);
1770 // DeferredInlineBinaryOperation requires all the registers that it is
1771 // told about to be spilled and distinct.
1772 Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
1773
1774 // Check that left and right are smi tagged.
1775 DeferredInlineBinaryOperation* deferred =
1776 new DeferredInlineBinaryOperation(op,
1777 (op == Token::DIV) ? eax : edx,
1778 left->reg(),
1779 distinct_right.reg(),
1780 left_type_info,
1781 right_type_info,
1782 overwrite_mode);
1783 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
1784 left_type_info, right_type_info, deferred);
1785 if (!left_is_in_eax) {
1786 __ mov(eax, left->reg());
1787 }
1788 // Sign extend eax into edx:eax.
1789 __ cdq();
1790 // Check for 0 divisor.
1791 __ test(right->reg(), Operand(right->reg()));
1792 deferred->Branch(zero);
1793 // Divide edx:eax by the right operand.
1794 __ idiv(right->reg());
1795
1796 // Complete the operation.
1797 if (op == Token::DIV) {
1798 // Check for negative zero result. If result is zero, and divisor
1799 // is negative, return a floating point negative zero. The
1800 // virtual frame is unchanged in this block, so local control flow
1801 // can use a Label rather than a JumpTarget. If the context of this
1802 // expression will treat -0 like 0, do not do this test.
1803 if (!expr->no_negative_zero()) {
1804 Label non_zero_result;
1805 __ test(left->reg(), Operand(left->reg()));
1806 __ j(not_zero, &non_zero_result);
1807 __ test(right->reg(), Operand(right->reg()));
1808 deferred->Branch(negative);
1809 __ bind(&non_zero_result);
1810 }
1811 // Check for the corner case of dividing the most negative smi by
1812 // -1. We cannot use the overflow flag, since it is not set by
1813 // idiv instruction.
1814 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1815 __ cmp(eax, 0x40000000);
1816 deferred->Branch(equal);
1817 // Check that the remainder is zero.
1818 __ test(edx, Operand(edx));
1819 deferred->Branch(not_zero);
1820 // Tag the result and store it in the quotient register.
1821 __ SmiTag(eax);
1822 deferred->BindExit();
1823 left->Unuse();
1824 right->Unuse();
1825 answer = quotient;
1826 } else {
1827 ASSERT(op == Token::MOD);
1828 // Check for a negative zero result. If the result is zero, and
1829 // the dividend is negative, return a floating point negative
1830 // zero. The frame is unchanged in this block, so local control
1831 // flow can use a Label rather than a JumpTarget.
1832 if (!expr->no_negative_zero()) {
1833 Label non_zero_result;
1834 __ test(edx, Operand(edx));
1835 __ j(not_zero, &non_zero_result, taken);
1836 __ test(left->reg(), Operand(left->reg()));
1837 deferred->Branch(negative);
1838 __ bind(&non_zero_result);
1839 }
1840 deferred->BindExit();
1841 left->Unuse();
1842 right->Unuse();
1843 answer = remainder;
1844 }
1845 ASSERT(answer.is_valid());
1846 return answer;
1847 }
1848
1849 // Special handling of shift operations because they use fixed
1850 // registers.
1851 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
1852 // Move left out of ecx if necessary.
1853 if (left->is_register() && left->reg().is(ecx)) {
1854 *left = allocator_->Allocate();
1855 ASSERT(left->is_valid());
1856 __ mov(left->reg(), ecx);
1857 }
1858 right->ToRegister(ecx);
1859 left->ToRegister();
1860 ASSERT(left->is_register() && !left->reg().is(ecx));
1861 ASSERT(right->is_register() && right->reg().is(ecx));
1862 if (left_type_info.IsSmi()) {
1863 if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
1864 }
1865 if (right_type_info.IsSmi()) {
1866 if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
1867 }
1868
1869 // We will modify right, it must be spilled.
1870 frame_->Spill(ecx);
1871 // DeferredInlineBinaryOperation requires all the registers that it is told
1872 // about to be spilled and distinct. We know that right is ecx and left is
1873 // not ecx.
1874 frame_->Spill(left->reg());
1875
1876 // Use a fresh answer register to avoid spilling the left operand.
1877 answer = allocator_->Allocate();
1878 ASSERT(answer.is_valid());
1879
1880 DeferredInlineBinaryOperation* deferred =
1881 new DeferredInlineBinaryOperation(op,
1882 answer.reg(),
1883 left->reg(),
1884 ecx,
1885 left_type_info,
1886 right_type_info,
1887 overwrite_mode);
1888 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
1889 left_type_info, right_type_info,
1890 deferred->NonSmiInputLabel());
1891
1892 // Untag both operands.
1893 __ mov(answer.reg(), left->reg());
1894 __ SmiUntag(answer.reg());
1895 __ SmiUntag(right->reg()); // Right is ecx.
1896
1897 // Perform the operation.
1898 ASSERT(right->reg().is(ecx));
1899 switch (op) {
1900 case Token::SAR: {
1901 __ sar_cl(answer.reg());
1902 if (!left_type_info.IsSmi()) {
1903 // Check that the *signed* result fits in a smi.
1904 __ cmp(answer.reg(), 0xc0000000);
1905 deferred->JumpToAnswerOutOfRange(negative);
1906 }
1907 break;
1908 }
1909 case Token::SHR: {
1910 __ shr_cl(answer.reg());
1911 // Check that the *unsigned* result fits in a smi. Neither of
1912 // the two high-order bits can be set:
1913 // * 0x80000000: high bit would be lost when smi tagging.
1914 // * 0x40000000: this number would convert to negative when smi
1915 // tagging.
1916 // These two cases can only happen with shifts by 0 or 1 when
1917 // handed a valid smi. If the answer cannot be represented by a
1918 // smi, restore the left and right arguments, and jump to slow
1919 // case. The low bit of the left argument may be lost, but only
1920 // in a case where it is dropped anyway.
1921 __ test(answer.reg(), Immediate(0xc0000000));
1922 deferred->JumpToAnswerOutOfRange(not_zero);
1923 break;
1924 }
1925 case Token::SHL: {
1926 __ shl_cl(answer.reg());
1927 // Check that the *signed* result fits in a smi.
1928 __ cmp(answer.reg(), 0xc0000000);
1929 deferred->JumpToAnswerOutOfRange(negative);
1930 break;
1931 }
1932 default:
1933 UNREACHABLE();
1934 }
1935 // Smi-tag the result in answer.
1936 __ SmiTag(answer.reg());
1937 deferred->BindExit();
1938 left->Unuse();
1939 right->Unuse();
1940 ASSERT(answer.is_valid());
1941 return answer;
1942 }
1943
1944 // Handle the other binary operations.
1945 left->ToRegister();
1946 right->ToRegister();
1947 // DeferredInlineBinaryOperation requires all the registers that it is told
1948 // about to be spilled.
1949 Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
1950 // A newly allocated register answer is used to hold the answer. The
1951 // registers containing left and right are not modified so they don't
1952 // need to be spilled in the fast case.
1953 answer = allocator_->Allocate();
1954 ASSERT(answer.is_valid());
1955
1956 // Perform the smi tag check.
1957 DeferredInlineBinaryOperation* deferred =
1958 new DeferredInlineBinaryOperation(op,
1959 answer.reg(),
1960 left->reg(),
1961 distinct_right.reg(),
1962 left_type_info,
1963 right_type_info,
1964 overwrite_mode);
1965 Label non_smi_bit_op;
1966 if (op != Token::BIT_OR) {
1967 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
1968 left_type_info, right_type_info,
1969 deferred->NonSmiInputLabel());
1970 }
1971
1972 __ mov(answer.reg(), left->reg());
1973 switch (op) {
1974 case Token::ADD:
1975 __ add(answer.reg(), Operand(right->reg()));
1976 deferred->Branch(overflow);
1977 break;
1978
1979 case Token::SUB:
1980 __ sub(answer.reg(), Operand(right->reg()));
1981 deferred->Branch(overflow);
1982 break;
1983
1984 case Token::MUL: {
1985 // If the smi tag is 0 we can just leave the tag on one operand.
1986 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1987 // Remove smi tag from the left operand (but keep sign).
1988 // Left-hand operand has been copied into answer.
1989 __ SmiUntag(answer.reg());
1990 // Do multiplication of smis, leaving result in answer.
1991 __ imul(answer.reg(), Operand(right->reg()));
1992 // Go slow on overflows.
1993 deferred->Branch(overflow);
1994 // Check for negative zero result. If product is zero, and one
1995 // argument is negative, go to slow case. The frame is unchanged
1996 // in this block, so local control flow can use a Label rather
1997 // than a JumpTarget.
1998 if (!expr->no_negative_zero()) {
1999 Label non_zero_result;
2000 __ test(answer.reg(), Operand(answer.reg()));
2001 __ j(not_zero, &non_zero_result, taken);
2002 __ mov(answer.reg(), left->reg());
2003 __ or_(answer.reg(), Operand(right->reg()));
2004 deferred->Branch(negative);
2005 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
2006 __ bind(&non_zero_result);
2007 }
2008 break;
2009 }
2010
2011 case Token::BIT_OR:
2012 __ or_(answer.reg(), Operand(right->reg()));
2013 __ test(answer.reg(), Immediate(kSmiTagMask));
2014 __ j(not_zero, deferred->NonSmiInputLabel());
2015 break;
2016
2017 case Token::BIT_AND:
2018 __ and_(answer.reg(), Operand(right->reg()));
2019 break;
2020
2021 case Token::BIT_XOR:
2022 __ xor_(answer.reg(), Operand(right->reg()));
2023 break;
2024
2025 default:
2026 UNREACHABLE();
2027 break;
2028 }
2029
2030 deferred->BindExit();
2031 left->Unuse();
2032 right->Unuse();
2033 ASSERT(answer.is_valid());
2034 return answer;
2035 }
2036
2037
2038 // Call the appropriate binary operation stub to compute src op value
2039 // and leave the result in dst.
2040 class DeferredInlineSmiOperation: public DeferredCode {
2041 public:
2042 DeferredInlineSmiOperation(Token::Value op,
2043 Register dst,
2044 Register src,
2045 TypeInfo type_info,
2046 Smi* value,
2047 OverwriteMode overwrite_mode)
2048 : op_(op),
2049 dst_(dst),
2050 src_(src),
2051 type_info_(type_info),
2052 value_(value),
2053 overwrite_mode_(overwrite_mode) {
2054 if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
2055 set_comment("[ DeferredInlineSmiOperation");
2056 }
2057
2058 virtual void Generate();
2059
2060 private:
2061 Token::Value op_;
2062 Register dst_;
2063 Register src_;
2064 TypeInfo type_info_;
2065 Smi* value_;
2066 OverwriteMode overwrite_mode_;
2067 };
2068
2069
2070 void DeferredInlineSmiOperation::Generate() {
2071 // For mod we don't generate all the Smi code inline.
2072 GenericBinaryOpStub stub(
2073 op_,
2074 overwrite_mode_,
2075 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
2076 TypeInfo::Combine(TypeInfo::Smi(), type_info_));
2077 stub.GenerateCall(masm_, src_, value_);
2078 if (!dst_.is(eax)) __ mov(dst_, eax);
2079 }
2080
2081
2082 // Call the appropriate binary operation stub to compute value op src
2083 // and leave the result in dst.
2084 class DeferredInlineSmiOperationReversed: public DeferredCode {
2085 public:
2086 DeferredInlineSmiOperationReversed(Token::Value op,
2087 Register dst,
2088 Smi* value,
2089 Register src,
2090 TypeInfo type_info,
2091 OverwriteMode overwrite_mode)
2092 : op_(op),
2093 dst_(dst),
2094 type_info_(type_info),
2095 value_(value),
2096 src_(src),
2097 overwrite_mode_(overwrite_mode) {
2098 set_comment("[ DeferredInlineSmiOperationReversed");
2099 }
2100
2101 virtual void Generate();
2102
2103 private:
2104 Token::Value op_;
2105 Register dst_;
2106 TypeInfo type_info_;
2107 Smi* value_;
2108 Register src_;
2109 OverwriteMode overwrite_mode_;
2110 };
2111
2112
2113 void DeferredInlineSmiOperationReversed::Generate() {
2114 GenericBinaryOpStub stub(
2115 op_,
2116 overwrite_mode_,
2117 NO_SMI_CODE_IN_STUB,
2118 TypeInfo::Combine(TypeInfo::Smi(), type_info_));
2119 stub.GenerateCall(masm_, value_, src_);
2120 if (!dst_.is(eax)) __ mov(dst_, eax);
2121 }
2122
2123
2124 // The result of src + value is in dst. It either overflowed or was not
2125 // smi tagged. Undo the speculative addition and call the appropriate
2126 // specialized stub for add. The result is left in dst.
2127 class DeferredInlineSmiAdd: public DeferredCode {
2128 public:
2129 DeferredInlineSmiAdd(Register dst,
2130 TypeInfo type_info,
2131 Smi* value,
2132 OverwriteMode overwrite_mode)
2133 : dst_(dst),
2134 type_info_(type_info),
2135 value_(value),
2136 overwrite_mode_(overwrite_mode) {
2137 if (type_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
2138 set_comment("[ DeferredInlineSmiAdd");
2139 }
2140
2141 virtual void Generate();
2142
2143 private:
2144 Register dst_;
2145 TypeInfo type_info_;
2146 Smi* value_;
2147 OverwriteMode overwrite_mode_;
2148 };
2149
2150
2151 void DeferredInlineSmiAdd::Generate() {
2152 // Undo the optimistic add operation and call the shared stub.
2153 __ sub(Operand(dst_), Immediate(value_));
2154 GenericBinaryOpStub igostub(
2155 Token::ADD,
2156 overwrite_mode_,
2157 NO_SMI_CODE_IN_STUB,
2158 TypeInfo::Combine(TypeInfo::Smi(), type_info_));
2159 igostub.GenerateCall(masm_, dst_, value_);
2160 if (!dst_.is(eax)) __ mov(dst_, eax);
2161 }
2162
2163
2164 // The result of value + src is in dst. It either overflowed or was not
2165 // smi tagged. Undo the speculative addition and call the appropriate
2166 // specialized stub for add. The result is left in dst.
2167 class DeferredInlineSmiAddReversed: public DeferredCode {
2168 public:
2169 DeferredInlineSmiAddReversed(Register dst,
2170 TypeInfo type_info,
2171 Smi* value,
2172 OverwriteMode overwrite_mode)
2173 : dst_(dst),
2174 type_info_(type_info),
2175 value_(value),
2176 overwrite_mode_(overwrite_mode) {
2177 set_comment("[ DeferredInlineSmiAddReversed");
2178 }
2179
2180 virtual void Generate();
2181
2182 private:
2183 Register dst_;
2184 TypeInfo type_info_;
2185 Smi* value_;
2186 OverwriteMode overwrite_mode_;
2187 };
2188
2189
2190 void DeferredInlineSmiAddReversed::Generate() {
2191 // Undo the optimistic add operation and call the shared stub.
2192 __ sub(Operand(dst_), Immediate(value_));
2193 GenericBinaryOpStub igostub(
2194 Token::ADD,
2195 overwrite_mode_,
2196 NO_SMI_CODE_IN_STUB,
2197 TypeInfo::Combine(TypeInfo::Smi(), type_info_));
2198 igostub.GenerateCall(masm_, value_, dst_);
2199 if (!dst_.is(eax)) __ mov(dst_, eax);
2200 }
2201
2202
2203 // The result of src - value is in dst. It either overflowed or was not
2204 // smi tagged. Undo the speculative subtraction and call the
2205 // appropriate specialized stub for subtract. The result is left in
2206 // dst.
2207 class DeferredInlineSmiSub: public DeferredCode {
2208 public:
2209 DeferredInlineSmiSub(Register dst,
2210 TypeInfo type_info,
2211 Smi* value,
2212 OverwriteMode overwrite_mode)
2213 : dst_(dst),
2214 type_info_(type_info),
2215 value_(value),
2216 overwrite_mode_(overwrite_mode) {
2217 if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
2218 set_comment("[ DeferredInlineSmiSub");
2219 }
2220
2221 virtual void Generate();
2222
2223 private:
2224 Register dst_;
2225 TypeInfo type_info_;
2226 Smi* value_;
2227 OverwriteMode overwrite_mode_;
2228 };
2229
2230
2231 void DeferredInlineSmiSub::Generate() {
2232 // Undo the optimistic sub operation and call the shared stub.
2233 __ add(Operand(dst_), Immediate(value_));
2234 GenericBinaryOpStub igostub(
2235 Token::SUB,
2236 overwrite_mode_,
2237 NO_SMI_CODE_IN_STUB,
2238 TypeInfo::Combine(TypeInfo::Smi(), type_info_));
2239 igostub.GenerateCall(masm_, dst_, value_);
2240 if (!dst_.is(eax)) __ mov(dst_, eax);
2241 }
2242
2243
2244 Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
2245 Result* operand,
2246 Handle<Object> value,
2247 bool reversed,
2248 OverwriteMode overwrite_mode) {
2249 // Generate inline code for a binary operation when one of the
2250 // operands is a constant smi. Consumes the argument "operand".
2251 if (IsUnsafeSmi(value)) {
2252 Result unsafe_operand(value);
2253 if (reversed) {
2254 return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
2255 overwrite_mode);
2256 } else {
2257 return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
2258 overwrite_mode);
2259 }
2260 }
2261
2262 // Get the literal value.
2263 Smi* smi_value = Smi::cast(*value);
2264 int int_value = smi_value->value();
2265
2266 Token::Value op = expr->op();
2267 Result answer;
2268 switch (op) {
2269 case Token::ADD: {
2270 operand->ToRegister();
2271 frame_->Spill(operand->reg());
2272
2273 // Optimistically add. Call the specialized add stub if the
2274 // result is not a smi or overflows.
2275 DeferredCode* deferred = NULL;
2276 if (reversed) {
2277 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
2278 operand->type_info(),
2279 smi_value,
2280 overwrite_mode);
2281 } else {
2282 deferred = new DeferredInlineSmiAdd(operand->reg(),
2283 operand->type_info(),
2284 smi_value,
2285 overwrite_mode);
2286 }
2287 __ add(Operand(operand->reg()), Immediate(value));
2288 deferred->Branch(overflow);
2289 if (!operand->type_info().IsSmi()) {
2290 __ test(operand->reg(), Immediate(kSmiTagMask));
2291 deferred->Branch(not_zero);
2292 } else if (FLAG_debug_code) {
2293 __ AbortIfNotSmi(operand->reg());
2294 }
2295 deferred->BindExit();
2296 answer = *operand;
2297 break;
2298 }
2299
2300 case Token::SUB: {
2301 DeferredCode* deferred = NULL;
2302 if (reversed) {
2303 // The reversed case is only hit when the right operand is not a
2304 // constant.
2305 ASSERT(operand->is_register());
2306 answer = allocator()->Allocate();
2307 ASSERT(answer.is_valid());
2308 __ Set(answer.reg(), Immediate(value));
2309 deferred =
2310 new DeferredInlineSmiOperationReversed(op,
2311 answer.reg(),
2312 smi_value,
2313 operand->reg(),
2314 operand->type_info(),
2315 overwrite_mode);
2316 __ sub(answer.reg(), Operand(operand->reg()));
2317 } else {
2318 operand->ToRegister();
2319 frame_->Spill(operand->reg());
2320 answer = *operand;
2321 deferred = new DeferredInlineSmiSub(operand->reg(),
2322 operand->type_info(),
2323 smi_value,
2324 overwrite_mode);
2325 __ sub(Operand(operand->reg()), Immediate(value));
2326 }
2327 deferred->Branch(overflow);
2328 if (!operand->type_info().IsSmi()) {
2329 __ test(answer.reg(), Immediate(kSmiTagMask));
2330 deferred->Branch(not_zero);
2331 } else if (FLAG_debug_code) {
2332 __ AbortIfNotSmi(operand->reg());
2333 }
2334 deferred->BindExit();
2335 operand->Unuse();
2336 break;
2337 }
2338
2339 case Token::SAR:
2340 if (reversed) {
2341 Result constant_operand(value);
2342 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
2343 overwrite_mode);
2344 } else {
2345 // Only the least significant 5 bits of the shift value are used.
2346 // In the slow case, this masking is done inside the runtime call.
2347 int shift_value = int_value & 0x1f;
2348 operand->ToRegister();
2349 frame_->Spill(operand->reg());
2350 if (!operand->type_info().IsSmi()) {
2351 DeferredInlineSmiOperation* deferred =
2352 new DeferredInlineSmiOperation(op,
2353 operand->reg(),
2354 operand->reg(),
2355 operand->type_info(),
2356 smi_value,
2357 overwrite_mode);
2358 __ test(operand->reg(), Immediate(kSmiTagMask));
2359 deferred->Branch(not_zero);
2360 if (shift_value > 0) {
2361 __ sar(operand->reg(), shift_value);
2362 __ and_(operand->reg(), ~kSmiTagMask);
2363 }
2364 deferred->BindExit();
2365 } else {
2366 if (FLAG_debug_code) {
2367 __ AbortIfNotSmi(operand->reg());
2368 }
2369 if (shift_value > 0) {
2370 __ sar(operand->reg(), shift_value);
2371 __ and_(operand->reg(), ~kSmiTagMask);
2372 }
2373 }
2374 answer = *operand;
2375 }
2376 break;
2377
2378 case Token::SHR:
2379 if (reversed) {
2380 Result constant_operand(value);
2381 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
2382 overwrite_mode);
2383 } else {
2384 // Only the least significant 5 bits of the shift value are used.
2385 // In the slow case, this masking is done inside the runtime call.
2386 int shift_value = int_value & 0x1f;
2387 operand->ToRegister();
2388 answer = allocator()->Allocate();
2389 ASSERT(answer.is_valid());
2390 DeferredInlineSmiOperation* deferred =
2391 new DeferredInlineSmiOperation(op,
2392 answer.reg(),
2393 operand->reg(),
2394 operand->type_info(),
2395 smi_value,
2396 overwrite_mode);
2397 if (!operand->type_info().IsSmi()) {
2398 __ test(operand->reg(), Immediate(kSmiTagMask));
2399 deferred->Branch(not_zero);
2400 } else if (FLAG_debug_code) {
2401 __ AbortIfNotSmi(operand->reg());
2402 }
2403 __ mov(answer.reg(), operand->reg());
2404 __ SmiUntag(answer.reg());
2405 __ shr(answer.reg(), shift_value);
2406 // A negative Smi shifted right two is in the positive Smi range.
2407 if (shift_value < 2) {
2408 __ test(answer.reg(), Immediate(0xc0000000));
2409 deferred->Branch(not_zero);
2410 }
2411 operand->Unuse();
2412 __ SmiTag(answer.reg());
2413 deferred->BindExit();
2414 }
2415 break;
2416
2417 case Token::SHL:
2418 if (reversed) {
2419 // Move operand into ecx and also into a second register.
2420 // If operand is already in a register, take advantage of that.
2421 // This lets us modify ecx, but still bail out to deferred code.
2422 Result right;
2423 Result right_copy_in_ecx;
2424 TypeInfo right_type_info = operand->type_info();
2425 operand->ToRegister();
2426 if (operand->reg().is(ecx)) {
2427 right = allocator()->Allocate();
2428 __ mov(right.reg(), ecx);
2429 frame_->Spill(ecx);
2430 right_copy_in_ecx = *operand;
2431 } else {
2432 right_copy_in_ecx = allocator()->Allocate(ecx);
2433 __ mov(ecx, operand->reg());
2434 right = *operand;
2435 }
2436 operand->Unuse();
2437
2438 answer = allocator()->Allocate();
2439 DeferredInlineSmiOperationReversed* deferred =
2440 new DeferredInlineSmiOperationReversed(op,
2441 answer.reg(),
2442 smi_value,
2443 right.reg(),
2444 right_type_info,
2445 overwrite_mode);
2446 __ mov(answer.reg(), Immediate(int_value));
2447 __ sar(ecx, kSmiTagSize);
2448 if (!right_type_info.IsSmi()) {
2449 deferred->Branch(carry);
2450 } else if (FLAG_debug_code) {
2451 __ AbortIfNotSmi(right.reg());
2452 }
2453 __ shl_cl(answer.reg());
2454 __ cmp(answer.reg(), 0xc0000000);
2455 deferred->Branch(sign);
2456 __ SmiTag(answer.reg());
2457
2458 deferred->BindExit();
2459 } else {
2460 // Only the least significant 5 bits of the shift value are used.
2461 // In the slow case, this masking is done inside the runtime call.
2462 int shift_value = int_value & 0x1f;
2463 operand->ToRegister();
2464 if (shift_value == 0) {
2465 // Spill operand so it can be overwritten in the slow case.
2466 frame_->Spill(operand->reg());
2467 DeferredInlineSmiOperation* deferred =
2468 new DeferredInlineSmiOperation(op,
2469 operand->reg(),
2470 operand->reg(),
2471 operand->type_info(),
2472 smi_value,
2473 overwrite_mode);
2474 __ test(operand->reg(), Immediate(kSmiTagMask));
2475 deferred->Branch(not_zero);
2476 deferred->BindExit();
2477 answer = *operand;
2478 } else {
2479 // Use a fresh temporary for nonzero shift values.
2480 answer = allocator()->Allocate();
2481 ASSERT(answer.is_valid());
2482 DeferredInlineSmiOperation* deferred =
2483 new DeferredInlineSmiOperation(op,
2484 answer.reg(),
2485 operand->reg(),
2486 operand->type_info(),
2487 smi_value,
2488 overwrite_mode);
2489 if (!operand->type_info().IsSmi()) {
2490 __ test(operand->reg(), Immediate(kSmiTagMask));
2491 deferred->Branch(not_zero);
2492 } else if (FLAG_debug_code) {
2493 __ AbortIfNotSmi(operand->reg());
2494 }
2495 __ mov(answer.reg(), operand->reg());
2496 STATIC_ASSERT(kSmiTag == 0); // adjust code if not the case
2497 // We do no shifts, only the Smi conversion, if shift_value is 1.
2498 if (shift_value > 1) {
2499 __ shl(answer.reg(), shift_value - 1);
2500 }
2501 // Convert int result to Smi, checking that it is in int range.
2502 STATIC_ASSERT(kSmiTagSize == 1); // adjust code if not the case
2503 __ add(answer.reg(), Operand(answer.reg()));
2504 deferred->Branch(overflow);
2505 deferred->BindExit();
2506 operand->Unuse();
2507 }
2508 }
2509 break;
2510
2511 case Token::BIT_OR:
2512 case Token::BIT_XOR:
2513 case Token::BIT_AND: {
2514 operand->ToRegister();
2515 // DeferredInlineBinaryOperation requires all the registers that it is
2516 // told about to be spilled.
2517 frame_->Spill(operand->reg());
2518 DeferredInlineBinaryOperation* deferred = NULL;
2519 if (!operand->type_info().IsSmi()) {
2520 Result left = allocator()->Allocate();
2521 ASSERT(left.is_valid());
2522 Result right = allocator()->Allocate();
2523 ASSERT(right.is_valid());
2524 deferred = new DeferredInlineBinaryOperation(
2525 op,
2526 operand->reg(),
2527 left.reg(),
2528 right.reg(),
2529 operand->type_info(),
2530 TypeInfo::Smi(),
2531 overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
2532 __ test(operand->reg(), Immediate(kSmiTagMask));
2533 deferred->JumpToConstantRhs(not_zero, smi_value);
2534 } else if (FLAG_debug_code) {
2535 __ AbortIfNotSmi(operand->reg());
2536 }
2537 if (op == Token::BIT_AND) {
2538 __ and_(Operand(operand->reg()), Immediate(value));
2539 } else if (op == Token::BIT_XOR) {
2540 if (int_value != 0) {
2541 __ xor_(Operand(operand->reg()), Immediate(value));
2542 }
2543 } else {
2544 ASSERT(op == Token::BIT_OR);
2545 if (int_value != 0) {
2546 __ or_(Operand(operand->reg()), Immediate(value));
2547 }
2548 }
2549 if (deferred != NULL) deferred->BindExit();
2550 answer = *operand;
2551 break;
2552 }
2553
2554 case Token::DIV:
2555 if (!reversed && int_value == 2) {
2556 operand->ToRegister();
2557 frame_->Spill(operand->reg());
2558
2559 DeferredInlineSmiOperation* deferred =
2560 new DeferredInlineSmiOperation(op,
2561 operand->reg(),
2562 operand->reg(),
2563 operand->type_info(),
2564 smi_value,
2565 overwrite_mode);
2566 // Check that lowest log2(value) bits of operand are zero, and test
2567 // smi tag at the same time.
2568 STATIC_ASSERT(kSmiTag == 0);
2569 STATIC_ASSERT(kSmiTagSize == 1);
2570 __ test(operand->reg(), Immediate(3));
2571 deferred->Branch(not_zero); // Branch if non-smi or odd smi.
2572 __ sar(operand->reg(), 1);
2573 deferred->BindExit();
2574 answer = *operand;
2575 } else {
2576 // Cannot fall through MOD to default case, so we duplicate the
2577 // default case here.
2578 Result constant_operand(value);
2579 if (reversed) {
2580 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
2581 overwrite_mode);
2582 } else {
2583 answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
2584 overwrite_mode);
2585 }
2586 }
2587 break;
2588
2589 // Generate inline code for mod of powers of 2 and negative powers of 2.
2590 case Token::MOD:
2591 if (!reversed &&
2592 int_value != 0 &&
2593 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
2594 operand->ToRegister();
2595 frame_->Spill(operand->reg());
2596 DeferredCode* deferred =
2597 new DeferredInlineSmiOperation(op,
2598 operand->reg(),
2599 operand->reg(),
2600 operand->type_info(),
2601 smi_value,
2602 overwrite_mode);
2603 // Check for negative or non-Smi left hand side.
2604 __ test(operand->reg(), Immediate(kSmiTagMask | kSmiSignMask));
2605 deferred->Branch(not_zero);
2606 if (int_value < 0) int_value = -int_value;
2607 if (int_value == 1) {
2608 __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
2609 } else {
2610 __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
2611 }
2612 deferred->BindExit();
2613 answer = *operand;
2614 break;
2615 }
2616 // Fall through if we did not find a power of 2 on the right hand side!
2617 // The next case must be the default.
2618
2619 default: {
2620 Result constant_operand(value);
2621 if (reversed) {
2622 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
2623 overwrite_mode);
2624 } else {
2625 answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
2626 overwrite_mode);
2627 }
2628 break;
2629 }
2630 }
2631 ASSERT(answer.is_valid());
2632 return answer;
2633 }
2634
2635
2636 static bool CouldBeNaN(const Result& result) {
2637 if (result.type_info().IsSmi()) return false;
2638 if (result.type_info().IsInteger32()) return false;
2639 if (!result.is_constant()) return true;
2640 if (!result.handle()->IsHeapNumber()) return false;
2641 return isnan(HeapNumber::cast(*result.handle())->value());
2642 }
2643
2644
2645 // Convert from signed to unsigned comparison to match the way EFLAGS are set
2646 // by FPU and XMM compare instructions.
2647 static Condition DoubleCondition(Condition cc) {
2648 switch (cc) {
2649 case less: return below;
2650 case equal: return equal;
2651 case less_equal: return below_equal;
2652 case greater: return above;
2653 case greater_equal: return above_equal;
2654 default: UNREACHABLE();
2655 }
2656 UNREACHABLE();
2657 return equal;
2658 }
2659
2660
2661 static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
2662 bool inline_number_compare) {
2663 CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
2664 if (nan_info == kCantBothBeNaN) {
2665 flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
2666 }
2667 if (inline_number_compare) {
2668 flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
2669 }
2670 return flags;
2671 }
2672
2673
2674 void CodeGenerator::Comparison(AstNode* node,
2675 Condition cc,
2676 bool strict,
2677 ControlDestination* dest) {
2678 // Strict only makes sense for equality comparisons.
2679 ASSERT(!strict || cc == equal);
2680
2681 Result left_side;
2682 Result right_side;
2683 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
2684 if (cc == greater || cc == less_equal) {
2685 cc = ReverseCondition(cc);
2686 left_side = frame_->Pop();
2687 right_side = frame_->Pop();
2688 } else {
2689 right_side = frame_->Pop();
2690 left_side = frame_->Pop();
2691 }
2692 ASSERT(cc == less || cc == equal || cc == greater_equal);
2693
2694 // If either side is a constant smi, optimize the comparison.
2695 bool left_side_constant_smi = false;
2696 bool left_side_constant_null = false;
2697 bool left_side_constant_1_char_string = false;
2698 if (left_side.is_constant()) {
2699 left_side_constant_smi = left_side.handle()->IsSmi();
2700 left_side_constant_null = left_side.handle()->IsNull();
2701 left_side_constant_1_char_string =
2702 (left_side.handle()->IsString() &&
2703 String::cast(*left_side.handle())->length() == 1 &&
2704 String::cast(*left_side.handle())->IsAsciiRepresentation());
2705 }
2706 bool right_side_constant_smi = false;
2707 bool right_side_constant_null = false;
2708 bool right_side_constant_1_char_string = false;
2709 if (right_side.is_constant()) {
2710 right_side_constant_smi = right_side.handle()->IsSmi();
2711 right_side_constant_null = right_side.handle()->IsNull();
2712 right_side_constant_1_char_string =
2713 (right_side.handle()->IsString() &&
2714 String::cast(*right_side.handle())->length() == 1 &&
2715 String::cast(*right_side.handle())->IsAsciiRepresentation());
2716 }
2717
2718 if (left_side_constant_smi || right_side_constant_smi) {
2719 bool is_loop_condition = (node->AsExpression() != NULL) &&
2720 node->AsExpression()->is_loop_condition();
2721 ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
2722 left_side_constant_smi, right_side_constant_smi,
2723 is_loop_condition);
2724 } else if (left_side_constant_1_char_string ||
2725 right_side_constant_1_char_string) {
2726 if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
2727 // Trivial case, comparing two constants.
2728 int left_value = String::cast(*left_side.handle())->Get(0);
2729 int right_value = String::cast(*right_side.handle())->Get(0);
2730 switch (cc) {
2731 case less:
2732 dest->Goto(left_value < right_value);
2733 break;
2734 case equal:
2735 dest->Goto(left_value == right_value);
2736 break;
2737 case greater_equal:
2738 dest->Goto(left_value >= right_value);
2739 break;
2740 default:
2741 UNREACHABLE();
2742 }
2743 } else {
2744 // Only one side is a constant 1 character string.
2745 // If left side is a constant 1-character string, reverse the operands.
2746 // Since one side is a constant string, conversion order does not matter.
2747 if (left_side_constant_1_char_string) {
2748 Result temp = left_side;
2749 left_side = right_side;
2750 right_side = temp;
2751 cc = ReverseCondition(cc);
2752 // This may reintroduce greater or less_equal as the value of cc.
2753 // CompareStub and the inline code both support all values of cc.
2754 }
2755 // Implement comparison against a constant string, inlining the case
2756 // where both sides are strings.
2757 left_side.ToRegister();
2758
2759 // Here we split control flow to the stub call and inlined cases
2760 // before finally splitting it to the control destination. We use
2761 // a jump target and branching to duplicate the virtual frame at
2762 // the first split. We manually handle the off-frame references
2763 // by reconstituting them on the non-fall-through path.
2764 JumpTarget is_not_string, is_string;
2765 Register left_reg = left_side.reg();
2766 Handle<Object> right_val = right_side.handle();
2767 ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
2768 __ test(left_side.reg(), Immediate(kSmiTagMask));
2769 is_not_string.Branch(zero, &left_side);
2770 Result temp = allocator_->Allocate();
2771 ASSERT(temp.is_valid());
2772 __ mov(temp.reg(),
2773 FieldOperand(left_side.reg(), HeapObject::kMapOffset));
2774 __ movzx_b(temp.reg(),
2775 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
2776 // If we are testing for equality then make use of the symbol shortcut.
2777 // Check if the right left hand side has the same type as the left hand
2778 // side (which is always a symbol).
2779 if (cc == equal) {
2780 Label not_a_symbol;
2781 STATIC_ASSERT(kSymbolTag != 0);
2782 // Ensure that no non-strings have the symbol bit set.
2783 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
2784 __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
2785 __ j(zero, &not_a_symbol);
2786 // They are symbols, so do identity compare.
2787 __ cmp(left_side.reg(), right_side.handle());
2788 dest->true_target()->Branch(equal);
2789 dest->false_target()->Branch(not_equal);
2790 __ bind(&not_a_symbol);
2791 }
2792 // Call the compare stub if the left side is not a flat ascii string.
2793 __ and_(temp.reg(),
2794 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
2795 __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
2796 temp.Unuse();
2797 is_string.Branch(equal, &left_side);
2798
2799 // Setup and call the compare stub.
2800 is_not_string.Bind(&left_side);
2801 CompareFlags flags =
2802 static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
2803 CompareStub stub(cc, strict, flags);
2804 Result result = frame_->CallStub(&stub, &left_side, &right_side);
2805 result.ToRegister();
2806 __ cmp(result.reg(), 0);
2807 result.Unuse();
2808 dest->true_target()->Branch(cc);
2809 dest->false_target()->Jump();
2810
2811 is_string.Bind(&left_side);
2812 // left_side is a sequential ASCII string.
2813 left_side = Result(left_reg);
2814 right_side = Result(right_val);
2815 // Test string equality and comparison.
2816 Label comparison_done;
2817 if (cc == equal) {
2818 __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
2819 Immediate(Smi::FromInt(1)));
2820 __ j(not_equal, &comparison_done);
2821 uint8_t char_value =
2822 static_cast<uint8_t>(String::cast(*right_val)->Get(0));
2823 __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
2824 char_value);
2825 } else {
2826 __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
2827 Immediate(Smi::FromInt(1)));
2828 // If the length is 0 then the jump is taken and the flags
2829 // correctly represent being less than the one-character string.
2830 __ j(below, &comparison_done);
2831 // Compare the first character of the string with the
2832 // constant 1-character string.
2833 uint8_t char_value =
2834 static_cast<uint8_t>(String::cast(*right_val)->Get(0));
2835 __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
2836 char_value);
2837 __ j(not_equal, &comparison_done);
2838 // If the first character is the same then the long string sorts after
2839 // the short one.
2840 __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
2841 Immediate(Smi::FromInt(1)));
2842 }
2843 __ bind(&comparison_done);
2844 left_side.Unuse();
2845 right_side.Unuse();
2846 dest->Split(cc);
2847 }
2848 } else {
2849 // Neither side is a constant Smi, constant 1-char string or constant null.
2850 // If either side is a non-smi constant, or known to be a heap number,
2851 // skip the smi check.
2852 bool known_non_smi =
2853 (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
2854 (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
2855 left_side.type_info().IsDouble() ||
2856 right_side.type_info().IsDouble();
2857
2858 NaNInformation nan_info =
2859 (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
2860 kBothCouldBeNaN :
2861 kCantBothBeNaN;
2862
2863 // Inline number comparison handling any combination of smi's and heap
2864 // numbers if:
2865 // code is in a loop
2866 // the compare operation is different from equal
2867 // compare is not a for-loop comparison
2868 // The reason for excluding equal is that it will most likely be done
2869 // with smi's (not heap numbers) and the code to comparing smi's is inlined
2870 // separately. The same reason applies for for-loop comparison which will
2871 // also most likely be smi comparisons.
2872 bool is_loop_condition = (node->AsExpression() != NULL)
2873 && node->AsExpression()->is_loop_condition();
2874 bool inline_number_compare =
2875 loop_nesting() > 0 && cc != equal && !is_loop_condition;
2876
2877 // Left and right needed in registers for the following code.
2878 left_side.ToRegister();
2879 right_side.ToRegister();
2880
2881 if (known_non_smi) {
2882 // Inlined equality check:
2883 // If at least one of the objects is not NaN, then if the objects
2884 // are identical, they are equal.
2885 if (nan_info == kCantBothBeNaN && cc == equal) {
2886 __ cmp(left_side.reg(), Operand(right_side.reg()));
2887 dest->true_target()->Branch(equal);
2888 }
2889
2890 // Inlined number comparison:
2891 if (inline_number_compare) {
2892 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
2893 }
2894
2895 // End of in-line compare, call out to the compare stub. Don't include
2896 // number comparison in the stub if it was inlined.
2897 CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
2898 CompareStub stub(cc, strict, flags);
2899 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
2900 __ test(answer.reg(), Operand(answer.reg()));
2901 answer.Unuse();
2902 dest->Split(cc);
2903 } else {
2904 // Here we split control flow to the stub call and inlined cases
2905 // before finally splitting it to the control destination. We use
2906 // a jump target and branching to duplicate the virtual frame at
2907 // the first split. We manually handle the off-frame references
2908 // by reconstituting them on the non-fall-through path.
2909 JumpTarget is_smi;
2910 Register left_reg = left_side.reg();
2911 Register right_reg = right_side.reg();
2912
2913 // In-line check for comparing two smis.
2914 JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
2915
2916 if (has_valid_frame()) {
2917 // Inline the equality check if both operands can't be a NaN. If both
2918 // objects are the same they are equal.
2919 if (nan_info == kCantBothBeNaN && cc == equal) {
2920 __ cmp(left_side.reg(), Operand(right_side.reg()));
2921 dest->true_target()->Branch(equal);
2922 }
2923
2924 // Inlined number comparison:
2925 if (inline_number_compare) {
2926 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
2927 }
2928
2929 // End of in-line compare, call out to the compare stub. Don't include
2930 // number comparison in the stub if it was inlined.
2931 CompareFlags flags =
2932 ComputeCompareFlags(nan_info, inline_number_compare);
2933 CompareStub stub(cc, strict, flags);
2934 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
2935 __ test(answer.reg(), Operand(answer.reg()));
2936 answer.Unuse();
2937 if (is_smi.is_linked()) {
2938 dest->true_target()->Branch(cc);
2939 dest->false_target()->Jump();
2940 } else {
2941 dest->Split(cc);
2942 }
2943 }
2944
2945 if (is_smi.is_linked()) {
2946 is_smi.Bind();
2947 left_side = Result(left_reg);
2948 right_side = Result(right_reg);
2949 __ cmp(left_side.reg(), Operand(right_side.reg()));
2950 right_side.Unuse();
2951 left_side.Unuse();
2952 dest->Split(cc);
2953 }
2954 }
2955 }
2956 }
2957
2958
2959 void CodeGenerator::ConstantSmiComparison(Condition cc,
2960 bool strict,
2961 ControlDestination* dest,
2962 Result* left_side,
2963 Result* right_side,
2964 bool left_side_constant_smi,
2965 bool right_side_constant_smi,
2966 bool is_loop_condition) {
2967 if (left_side_constant_smi && right_side_constant_smi) {
2968 // Trivial case, comparing two constants.
2969 int left_value = Smi::cast(*left_side->handle())->value();
2970 int right_value = Smi::cast(*right_side->handle())->value();
2971 switch (cc) {
2972 case less:
2973 dest->Goto(left_value < right_value);
2974 break;
2975 case equal:
2976 dest->Goto(left_value == right_value);
2977 break;
2978 case greater_equal:
2979 dest->Goto(left_value >= right_value);
2980 break;
2981 default:
2982 UNREACHABLE();
2983 }
2984 } else {
2985 // Only one side is a constant Smi.
2986 // If left side is a constant Smi, reverse the operands.
2987 // Since one side is a constant Smi, conversion order does not matter.
2988 if (left_side_constant_smi) {
2989 Result* temp = left_side;
2990 left_side = right_side;
2991 right_side = temp;
2992 cc = ReverseCondition(cc);
2993 // This may re-introduce greater or less_equal as the value of cc.
2994 // CompareStub and the inline code both support all values of cc.
2995 }
2996 // Implement comparison against a constant Smi, inlining the case
2997 // where both sides are Smis.
2998 left_side->ToRegister();
2999 Register left_reg = left_side->reg();
3000 Handle<Object> right_val = right_side->handle();
3001
3002 if (left_side->is_smi()) {
3003 if (FLAG_debug_code) {
3004 __ AbortIfNotSmi(left_reg);
3005 }
3006 // Test smi equality and comparison by signed int comparison.
3007 if (IsUnsafeSmi(right_side->handle())) {
3008 right_side->ToRegister();
3009 __ cmp(left_reg, Operand(right_side->reg()));
3010 } else {
3011 __ cmp(Operand(left_reg), Immediate(right_side->handle()));
3012 }
3013 left_side->Unuse();
3014 right_side->Unuse();
3015 dest->Split(cc);
3016 } else {
3017 // Only the case where the left side could possibly be a non-smi is left.
3018 JumpTarget is_smi;
3019 if (cc == equal) {
3020 // We can do the equality comparison before the smi check.
3021 __ cmp(Operand(left_reg), Immediate(right_side->handle()));
3022 dest->true_target()->Branch(equal);
3023 __ test(left_reg, Immediate(kSmiTagMask));
3024 dest->false_target()->Branch(zero);
3025 } else {
3026 // Do the smi check, then the comparison.
3027 __ test(left_reg, Immediate(kSmiTagMask));
3028 is_smi.Branch(zero, left_side, right_side);
3029 }
3030
3031 // Jump or fall through to here if we are comparing a non-smi to a
3032 // constant smi. If the non-smi is a heap number and this is not
3033 // a loop condition, inline the floating point code.
3034 if (!is_loop_condition &&
3035 CpuFeatures::IsSupported(SSE2)) {
3036 // Right side is a constant smi and left side has been checked
3037 // not to be a smi.
3038 CpuFeatures::Scope use_sse2(SSE2);
3039 JumpTarget not_number;
3040 __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
3041 Immediate(FACTORY->heap_number_map()));
3042 not_number.Branch(not_equal, left_side);
3043 __ movdbl(xmm1,
3044 FieldOperand(left_reg, HeapNumber::kValueOffset));
3045 int value = Smi::cast(*right_val)->value();
3046 if (value == 0) {
3047 __ xorpd(xmm0, xmm0);
3048 } else {
3049 Result temp = allocator()->Allocate();
3050 __ mov(temp.reg(), Immediate(value));
3051 __ cvtsi2sd(xmm0, Operand(temp.reg()));
3052 temp.Unuse();
3053 }
3054 __ ucomisd(xmm1, xmm0);
3055 // Jump to builtin for NaN.
3056 not_number.Branch(parity_even, left_side);
3057 left_side->Unuse();
3058 dest->true_target()->Branch(DoubleCondition(cc));
3059 dest->false_target()->Jump();
3060 not_number.Bind(left_side);
3061 }
3062
3063 // Setup and call the compare stub.
3064 CompareFlags flags =
3065 static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
3066 CompareStub stub(cc, strict, flags);
3067 Result result = frame_->CallStub(&stub, left_side, right_side);
3068 result.ToRegister();
3069 __ test(result.reg(), Operand(result.reg()));
3070 result.Unuse();
3071 if (cc == equal) {
3072 dest->Split(cc);
3073 } else {
3074 dest->true_target()->Branch(cc);
3075 dest->false_target()->Jump();
3076
3077 // It is important for performance for this case to be at the end.
3078 is_smi.Bind(left_side, right_side);
3079 if (IsUnsafeSmi(right_side->handle())) {
3080 right_side->ToRegister();
3081 __ cmp(left_reg, Operand(right_side->reg()));
3082 } else {
3083 __ cmp(Operand(left_reg), Immediate(right_side->handle()));
3084 }
3085 left_side->Unuse();
3086 right_side->Unuse();
3087 dest->Split(cc);
3088 }
3089 }
3090 }
3091 }
3092
3093
3094 // Check that the comparison operand is a number. Jump to not_numbers jump
3095 // target passing the left and right result if the operand is not a number.
3096 static void CheckComparisonOperand(MacroAssembler* masm_,
3097 Result* operand,
3098 Result* left_side,
3099 Result* right_side,
3100 JumpTarget* not_numbers) {
3101 // Perform check if operand is not known to be a number.
3102 if (!operand->type_info().IsNumber()) {
3103 Label done;
3104 __ test(operand->reg(), Immediate(kSmiTagMask));
3105 __ j(zero, &done);
3106 __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
3107 Immediate(FACTORY->heap_number_map()));
3108 not_numbers->Branch(not_equal, left_side, right_side, not_taken);
3109 __ bind(&done);
3110 }
3111 }
3112
3113
3114 // Load a comparison operand to the FPU stack. This assumes that the operand has
3115 // already been checked and is a number.
3116 static void LoadComparisonOperand(MacroAssembler* masm_,
3117 Result* operand) {
3118 Label done;
3119 if (operand->type_info().IsDouble()) {
3120 // Operand is known to be a heap number, just load it.
3121 __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
3122 } else if (operand->type_info().IsSmi()) {
3123 // Operand is known to be a smi. Convert it to double and keep the original
3124 // smi.
3125 __ SmiUntag(operand->reg());
3126 __ push(operand->reg());
3127 __ fild_s(Operand(esp, 0));
3128 __ pop(operand->reg());
3129 __ SmiTag(operand->reg());
3130 } else {
3131 // Operand type not known, check for smi otherwise assume heap number.
3132 Label smi;
3133 __ test(operand->reg(), Immediate(kSmiTagMask));
3134 __ j(zero, &smi);
3135 __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
3136 __ jmp(&done);
3137 __ bind(&smi);
3138 __ SmiUntag(operand->reg());
3139 __ push(operand->reg());
3140 __ fild_s(Operand(esp, 0));
3141 __ pop(operand->reg());
3142 __ SmiTag(operand->reg());
3143 __ jmp(&done);
3144 }
3145 __ bind(&done);
3146 }
3147
3148
3149 // Load a comparison operand into into a XMM register. Jump to not_numbers jump
3150 // target passing the left and right result if the operand is not a number.
3151 static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
3152 Result* operand,
3153 XMMRegister xmm_reg,
3154 Result* left_side,
3155 Result* right_side,
3156 JumpTarget* not_numbers) {
3157 Label done;
3158 if (operand->type_info().IsDouble()) {
3159 // Operand is known to be a heap number, just load it.
3160 __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
3161 } else if (operand->type_info().IsSmi()) {
3162 // Operand is known to be a smi. Convert it to double and keep the original
3163 // smi.
3164 __ SmiUntag(operand->reg());
3165 __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
3166 __ SmiTag(operand->reg());
3167 } else {
3168 // Operand type not known, check for smi or heap number.
3169 Label smi;
3170 __ test(operand->reg(), Immediate(kSmiTagMask));
3171 __ j(zero, &smi);
3172 if (!operand->type_info().IsNumber()) {
3173 __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
3174 Immediate(FACTORY->heap_number_map()));
3175 not_numbers->Branch(not_equal, left_side, right_side, taken);
3176 }
3177 __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
3178 __ jmp(&done);
3179
3180 __ bind(&smi);
3181 // Comvert smi to float and keep the original smi.
3182 __ SmiUntag(operand->reg());
3183 __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
3184 __ SmiTag(operand->reg());
3185 __ jmp(&done);
3186 }
3187 __ bind(&done);
3188 }
3189
3190
3191 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
3192 Result* right_side,
3193 Condition cc,
3194 ControlDestination* dest) {
3195 ASSERT(left_side->is_register());
3196 ASSERT(right_side->is_register());
3197
3198 JumpTarget not_numbers;
3199 if (CpuFeatures::IsSupported(SSE2)) {
3200 CpuFeatures::Scope use_sse2(SSE2);
3201
3202 // Load left and right operand into registers xmm0 and xmm1 and compare.
3203 LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
3204 &not_numbers);
3205 LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
3206 &not_numbers);
3207 __ ucomisd(xmm0, xmm1);
3208 } else {
3209 Label check_right, compare;
3210
3211 // Make sure that both comparison operands are numbers.
3212 CheckComparisonOperand(masm_, left_side, left_side, right_side,
3213 &not_numbers);
3214 CheckComparisonOperand(masm_, right_side, left_side, right_side,
3215 &not_numbers);
3216
3217 // Load right and left operand to FPU stack and compare.
3218 LoadComparisonOperand(masm_, right_side);
3219 LoadComparisonOperand(masm_, left_side);
3220 __ FCmp();
3221 }
3222
3223 // Bail out if a NaN is involved.
3224 not_numbers.Branch(parity_even, left_side, right_side, not_taken);
3225
3226 // Split to destination targets based on comparison.
3227 left_side->Unuse();
3228 right_side->Unuse();
3229 dest->true_target()->Branch(DoubleCondition(cc));
3230 dest->false_target()->Jump();
3231
3232 not_numbers.Bind(left_side, right_side);
3233 }
3234
3235
3236 // Call the function just below TOS on the stack with the given
3237 // arguments. The receiver is the TOS.
3238 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
3239 CallFunctionFlags flags,
3240 int position) {
3241 // Push the arguments ("left-to-right") on the stack.
3242 int arg_count = args->length();
3243 for (int i = 0; i < arg_count; i++) {
3244 Load(args->at(i));
3245 frame_->SpillTop();
3246 }
3247
3248 // Record the position for debugging purposes.
3249 CodeForSourcePosition(position);
3250
3251 // Use the shared code stub to call the function.
3252 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3253 CallFunctionStub call_function(arg_count, in_loop, flags);
3254 Result answer = frame_->CallStub(&call_function, arg_count + 1);
3255 // Restore context and replace function on the stack with the
3256 // result of the stub invocation.
3257 frame_->RestoreContextRegister();
3258 frame_->SetElementAt(0, &answer);
3259 }
3260
3261
3262 void CodeGenerator::CallApplyLazy(Expression* applicand,
3263 Expression* receiver,
3264 VariableProxy* arguments,
3265 int position) {
3266 // An optimized implementation of expressions of the form
3267 // x.apply(y, arguments).
3268 // If the arguments object of the scope has not been allocated,
3269 // and x.apply is Function.prototype.apply, this optimization
3270 // just copies y and the arguments of the current function on the
3271 // stack, as receiver and arguments, and calls x.
3272 // In the implementation comments, we call x the applicand
3273 // and y the receiver.
3274 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
3275 ASSERT(arguments->IsArguments());
3276
3277 // Load applicand.apply onto the stack. This will usually
3278 // give us a megamorphic load site. Not super, but it works.
3279 Load(applicand);
3280 frame()->Dup();
3281 Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
3282 frame()->Push(name);
3283 Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
3284 __ nop();
3285 frame()->Push(&answer);
3286
3287 // Load the receiver and the existing arguments object onto the
3288 // expression stack. Avoid allocating the arguments object here.
3289 Load(receiver);
3290 LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
3291
3292 // Emit the source position information after having loaded the
3293 // receiver and the arguments.
3294 CodeForSourcePosition(position);
3295 // Contents of frame at this point:
3296 // Frame[0]: arguments object of the current function or the hole.
3297 // Frame[1]: receiver
3298 // Frame[2]: applicand.apply
3299 // Frame[3]: applicand.
3300
3301 // Check if the arguments object has been lazily allocated
3302 // already. If so, just use that instead of copying the arguments
3303 // from the stack. This also deals with cases where a local variable
3304 // named 'arguments' has been introduced.
3305 frame_->Dup();
3306 Result probe = frame_->Pop();
3307 { VirtualFrame::SpilledScope spilled_scope;
3308 Label slow, done;
3309 bool try_lazy = true;
3310 if (probe.is_constant()) {
3311 try_lazy = probe.handle()->IsArgumentsMarker();
3312 } else {
3313 __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
3314 probe.Unuse();
3315 __ j(not_equal, &slow);
3316 }
3317
3318 if (try_lazy) {
3319 Label build_args;
3320 // Get rid of the arguments object probe.
3321 frame_->Drop(); // Can be called on a spilled frame.
3322 // Stack now has 3 elements on it.
3323 // Contents of stack at this point:
3324 // esp[0]: receiver
3325 // esp[1]: applicand.apply
3326 // esp[2]: applicand.
3327
3328 // Check that the receiver really is a JavaScript object.
3329 __ mov(eax, Operand(esp, 0));
3330 __ test(eax, Immediate(kSmiTagMask));
3331 __ j(zero, &build_args);
3332 // We allow all JSObjects including JSFunctions. As long as
3333 // JS_FUNCTION_TYPE is the last instance type and it is right
3334 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
3335 // bound.
3336 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
3337 STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
3338 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
3339 __ j(below, &build_args);
3340
3341 // Check that applicand.apply is Function.prototype.apply.
3342 __ mov(eax, Operand(esp, kPointerSize));
3343 __ test(eax, Immediate(kSmiTagMask));
3344 __ j(zero, &build_args);
3345 __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
3346 __ j(not_equal, &build_args);
3347 __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
3348 __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
3349 Handle<Code> apply_code(masm()->isolate()->builtins()->builtin(
3350 Builtins::kFunctionApply));
3351 __ cmp(Operand(ecx), Immediate(apply_code));
3352 __ j(not_equal, &build_args);
3353
3354 // Check that applicand is a function.
3355 __ mov(edi, Operand(esp, 2 * kPointerSize));
3356 __ test(edi, Immediate(kSmiTagMask));
3357 __ j(zero, &build_args);
3358 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
3359 __ j(not_equal, &build_args);
3360
3361 // Copy the arguments to this function possibly from the
3362 // adaptor frame below it.
3363 Label invoke, adapted;
3364 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3365 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3366 __ cmp(Operand(ecx),
3367 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3368 __ j(equal, &adapted);
3369
3370 // No arguments adaptor frame. Copy fixed number of arguments.
3371 __ mov(eax, Immediate(scope()->num_parameters()));
3372 for (int i = 0; i < scope()->num_parameters(); i++) {
3373 __ push(frame_->ParameterAt(i));
3374 }
3375 __ jmp(&invoke);
3376
3377 // Arguments adaptor frame present. Copy arguments from there, but
3378 // avoid copying too many arguments to avoid stack overflows.
3379 __ bind(&adapted);
3380 static const uint32_t kArgumentsLimit = 1 * KB;
3381 __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3382 __ SmiUntag(eax);
3383 __ mov(ecx, Operand(eax));
3384 __ cmp(eax, kArgumentsLimit);
3385 __ j(above, &build_args);
3386
3387 // Loop through the arguments pushing them onto the execution
3388 // stack. We don't inform the virtual frame of the push, so we don't
3389 // have to worry about getting rid of the elements from the virtual
3390 // frame.
3391 Label loop;
3392 // ecx is a small non-negative integer, due to the test above.
3393 __ test(ecx, Operand(ecx));
3394 __ j(zero, &invoke);
3395 __ bind(&loop);
3396 __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
3397 __ dec(ecx);
3398 __ j(not_zero, &loop);
3399
3400 // Invoke the function.
3401 __ bind(&invoke);
3402 ParameterCount actual(eax);
3403 __ InvokeFunction(edi, actual, CALL_FUNCTION);
3404 // Drop applicand.apply and applicand from the stack, and push
3405 // the result of the function call, but leave the spilled frame
3406 // unchanged, with 3 elements, so it is correct when we compile the
3407 // slow-case code.
3408 __ add(Operand(esp), Immediate(2 * kPointerSize));
3409 __ push(eax);
3410 // Stack now has 1 element:
3411 // esp[0]: result
3412 __ jmp(&done);
3413
3414 // Slow-case: Allocate the arguments object since we know it isn't
3415 // there, and fall-through to the slow-case where we call
3416 // applicand.apply.
3417 __ bind(&build_args);
3418 // Stack now has 3 elements, because we have jumped from where:
3419 // esp[0]: receiver
3420 // esp[1]: applicand.apply
3421 // esp[2]: applicand.
3422
3423 // StoreArgumentsObject requires a correct frame, and may modify it.
3424 Result arguments_object = StoreArgumentsObject(false);
3425 frame_->SpillAll();
3426 arguments_object.ToRegister();
3427 frame_->EmitPush(arguments_object.reg());
3428 arguments_object.Unuse();
3429 // Stack and frame now have 4 elements.
3430 __ bind(&slow);
3431 }
3432
3433 // Generic computation of x.apply(y, args) with no special optimization.
3434 // Flip applicand.apply and applicand on the stack, so
3435 // applicand looks like the receiver of the applicand.apply call.
3436 // Then process it as a normal function call.
3437 __ mov(eax, Operand(esp, 3 * kPointerSize));
3438 __ mov(ebx, Operand(esp, 2 * kPointerSize));
3439 __ mov(Operand(esp, 2 * kPointerSize), eax);
3440 __ mov(Operand(esp, 3 * kPointerSize), ebx);
3441
3442 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
3443 Result res = frame_->CallStub(&call_function, 3);
3444 // The function and its two arguments have been dropped.
3445 frame_->Drop(1); // Drop the receiver as well.
3446 res.ToRegister();
3447 frame_->EmitPush(res.reg());
3448 // Stack now has 1 element:
3449 // esp[0]: result
3450 if (try_lazy) __ bind(&done);
3451 } // End of spilled scope.
3452 // Restore the context register after a call.
3453 frame_->RestoreContextRegister();
3454 }
3455
3456
3457 class DeferredStackCheck: public DeferredCode {
3458 public:
3459 DeferredStackCheck() {
3460 set_comment("[ DeferredStackCheck");
3461 }
3462
3463 virtual void Generate();
3464 };
3465
3466
3467 void DeferredStackCheck::Generate() {
3468 StackCheckStub stub;
3469 __ CallStub(&stub);
3470 }
3471
3472
3473 void CodeGenerator::CheckStack() {
3474 DeferredStackCheck* deferred = new DeferredStackCheck;
3475 ExternalReference stack_limit =
3476 ExternalReference::address_of_stack_limit(masm()->isolate());
3477 __ cmp(esp, Operand::StaticVariable(stack_limit));
3478 deferred->Branch(below);
3479 deferred->BindExit();
3480 }
3481
3482
3483 void CodeGenerator::VisitAndSpill(Statement* statement) {
3484 ASSERT(in_spilled_code());
3485 set_in_spilled_code(false);
3486 Visit(statement);
3487 if (frame_ != NULL) {
3488 frame_->SpillAll();
3489 }
3490 set_in_spilled_code(true);
3491 }
3492
3493
3494 void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
3495 #ifdef DEBUG
3496 int original_height = frame_->height();
3497 #endif
3498 ASSERT(in_spilled_code());
3499 set_in_spilled_code(false);
3500 VisitStatements(statements);
3501 if (frame_ != NULL) {
3502 frame_->SpillAll();
3503 }
3504 set_in_spilled_code(true);
3505
3506 ASSERT(!has_valid_frame() || frame_->height() == original_height);
3507 }
3508
3509
3510 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
3511 #ifdef DEBUG
3512 int original_height = frame_->height();
3513 #endif
3514 ASSERT(!in_spilled_code());
3515 for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
3516 Visit(statements->at(i));
3517 }
3518 ASSERT(!has_valid_frame() || frame_->height() == original_height);
3519 }
3520
3521
3522 void CodeGenerator::VisitBlock(Block* node) {
3523 ASSERT(!in_spilled_code());
3524 Comment cmnt(masm_, "[ Block");
3525 CodeForStatementPosition(node);
3526 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3527 VisitStatements(node->statements());
3528 if (node->break_target()->is_linked()) {
3529 node->break_target()->Bind();
3530 }
3531 node->break_target()->Unuse();
3532 }
3533
3534
3535 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
3536 // Call the runtime to declare the globals. The inevitable call
3537 // will sync frame elements to memory anyway, so we do it eagerly to
3538 // allow us to push the arguments directly into place.
3539 frame_->SyncRange(0, frame_->element_count() - 1);
3540
3541 frame_->EmitPush(esi); // The context is the first argument.
3542 frame_->EmitPush(Immediate(pairs));
3543 frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
3544 frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag())));
3545 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
3546 // Return value is ignored.
3547 }
3548
3549
3550 void CodeGenerator::VisitDeclaration(Declaration* node) {
3551 Comment cmnt(masm_, "[ Declaration");
3552 Variable* var = node->proxy()->var();
3553 ASSERT(var != NULL); // must have been resolved
3554 Slot* slot = var->AsSlot();
3555
3556 // If it was not possible to allocate the variable at compile time,
3557 // we need to "declare" it at runtime to make sure it actually
3558 // exists in the local context.
3559 if (slot != NULL && slot->type() == Slot::LOOKUP) {
3560 // Variables with a "LOOKUP" slot were introduced as non-locals
3561 // during variable resolution and must have mode DYNAMIC.
3562 ASSERT(var->is_dynamic());
3563 // For now, just do a runtime call. Sync the virtual frame eagerly
3564 // so we can simply push the arguments into place.
3565 frame_->SyncRange(0, frame_->element_count() - 1);
3566 frame_->EmitPush(esi);
3567 frame_->EmitPush(Immediate(var->name()));
3568 // Declaration nodes are always introduced in one of two modes.
3569 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
3570 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
3571 frame_->EmitPush(Immediate(Smi::FromInt(attr)));
3572 // Push initial value, if any.
3573 // Note: For variables we must not push an initial value (such as
3574 // 'undefined') because we may have a (legal) redeclaration and we
3575 // must not destroy the current value.
3576 if (node->mode() == Variable::CONST) {
3577 frame_->EmitPush(Immediate(FACTORY->the_hole_value()));
3578 } else if (node->fun() != NULL) {
3579 Load(node->fun());
3580 } else {
3581 frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
3582 }
3583 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
3584 // Ignore the return value (declarations are statements).
3585 return;
3586 }
3587
3588 ASSERT(!var->is_global());
3589
3590 // If we have a function or a constant, we need to initialize the variable.
3591 Expression* val = NULL;
3592 if (node->mode() == Variable::CONST) {
3593 val = new Literal(FACTORY->the_hole_value());
3594 } else {
3595 val = node->fun(); // NULL if we don't have a function
3596 }
3597
3598 if (val != NULL) {
3599 {
3600 // Set the initial value.
3601 Reference target(this, node->proxy());
3602 Load(val);
3603 target.SetValue(NOT_CONST_INIT);
3604 // The reference is removed from the stack (preserving TOS) when
3605 // it goes out of scope.
3606 }
3607 // Get rid of the assigned value (declarations are statements).
3608 frame_->Drop();
3609 }
3610 }
3611
3612
3613 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
3614 ASSERT(!in_spilled_code());
3615 Comment cmnt(masm_, "[ ExpressionStatement");
3616 CodeForStatementPosition(node);
3617 Expression* expression = node->expression();
3618 expression->MarkAsStatement();
3619 Load(expression);
3620 // Remove the lingering expression result from the top of stack.
3621 frame_->Drop();
3622 }
3623
3624
3625 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
3626 ASSERT(!in_spilled_code());
3627 Comment cmnt(masm_, "// EmptyStatement");
3628 CodeForStatementPosition(node);
3629 // nothing to do
3630 }
3631
3632
3633 void CodeGenerator::VisitIfStatement(IfStatement* node) {
3634 ASSERT(!in_spilled_code());
3635 Comment cmnt(masm_, "[ IfStatement");
3636 // Generate different code depending on which parts of the if statement
3637 // are present or not.
3638 bool has_then_stm = node->HasThenStatement();
3639 bool has_else_stm = node->HasElseStatement();
3640
3641 CodeForStatementPosition(node);
3642 JumpTarget exit;
3643 if (has_then_stm && has_else_stm) {
3644 JumpTarget then;
3645 JumpTarget else_;
3646 ControlDestination dest(&then, &else_, true);
3647 LoadCondition(node->condition(), &dest, true);
3648
3649 if (dest.false_was_fall_through()) {
3650 // The else target was bound, so we compile the else part first.
3651 Visit(node->else_statement());
3652
3653 // We may have dangling jumps to the then part.
3654 if (then.is_linked()) {
3655 if (has_valid_frame()) exit.Jump();
3656 then.Bind();
3657 Visit(node->then_statement());
3658 }
3659 } else {
3660 // The then target was bound, so we compile the then part first.
3661 Visit(node->then_statement());
3662
3663 if (else_.is_linked()) {
3664 if (has_valid_frame()) exit.Jump();
3665 else_.Bind();
3666 Visit(node->else_statement());
3667 }
3668 }
3669
3670 } else if (has_then_stm) {
3671 ASSERT(!has_else_stm);
3672 JumpTarget then;
3673 ControlDestination dest(&then, &exit, true);
3674 LoadCondition(node->condition(), &dest, true);
3675
3676 if (dest.false_was_fall_through()) {
3677 // The exit label was bound. We may have dangling jumps to the
3678 // then part.
3679 if (then.is_linked()) {
3680 exit.Unuse();
3681 exit.Jump();
3682 then.Bind();
3683 Visit(node->then_statement());
3684 }
3685 } else {
3686 // The then label was bound.
3687 Visit(node->then_statement());
3688 }
3689
3690 } else if (has_else_stm) {
3691 ASSERT(!has_then_stm);
3692 JumpTarget else_;
3693 ControlDestination dest(&exit, &else_, false);
3694 LoadCondition(node->condition(), &dest, true);
3695
3696 if (dest.true_was_fall_through()) {
3697 // The exit label was bound. We may have dangling jumps to the
3698 // else part.
3699 if (else_.is_linked()) {
3700 exit.Unuse();
3701 exit.Jump();
3702 else_.Bind();
3703 Visit(node->else_statement());
3704 }
3705 } else {
3706 // The else label was bound.
3707 Visit(node->else_statement());
3708 }
3709
3710 } else {
3711 ASSERT(!has_then_stm && !has_else_stm);
3712 // We only care about the condition's side effects (not its value
3713 // or control flow effect). LoadCondition is called without
3714 // forcing control flow.
3715 ControlDestination dest(&exit, &exit, true);
3716 LoadCondition(node->condition(), &dest, false);
3717 if (!dest.is_used()) {
3718 // We got a value on the frame rather than (or in addition to)
3719 // control flow.
3720 frame_->Drop();
3721 }
3722 }
3723
3724 if (exit.is_linked()) {
3725 exit.Bind();
3726 }
3727 }
3728
3729
3730 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
3731 ASSERT(!in_spilled_code());
3732 Comment cmnt(masm_, "[ ContinueStatement");
3733 CodeForStatementPosition(node);
3734 node->target()->continue_target()->Jump();
3735 }
3736
3737
3738 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
3739 ASSERT(!in_spilled_code());
3740 Comment cmnt(masm_, "[ BreakStatement");
3741 CodeForStatementPosition(node);
3742 node->target()->break_target()->Jump();
3743 }
3744
3745
3746 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
3747 ASSERT(!in_spilled_code());
3748 Comment cmnt(masm_, "[ ReturnStatement");
3749
3750 CodeForStatementPosition(node);
3751 Load(node->expression());
3752 Result return_value = frame_->Pop();
3753 masm()->positions_recorder()->WriteRecordedPositions();
3754 if (function_return_is_shadowed_) {
3755 function_return_.Jump(&return_value);
3756 } else {
3757 frame_->PrepareForReturn();
3758 if (function_return_.is_bound()) {
3759 // If the function return label is already bound we reuse the
3760 // code by jumping to the return site.
3761 function_return_.Jump(&return_value);
3762 } else {
3763 function_return_.Bind(&return_value);
3764 GenerateReturnSequence(&return_value);
3765 }
3766 }
3767 }
3768
3769
3770 void CodeGenerator::GenerateReturnSequence(Result* return_value) {
3771 // The return value is a live (but not currently reference counted)
3772 // reference to eax. This is safe because the current frame does not
3773 // contain a reference to eax (it is prepared for the return by spilling
3774 // all registers).
3775 if (FLAG_trace) {
3776 frame_->Push(return_value);
3777 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
3778 }
3779 return_value->ToRegister(eax);
3780
3781 // Add a label for checking the size of the code used for returning.
3782 #ifdef DEBUG
3783 Label check_exit_codesize;
3784 masm_->bind(&check_exit_codesize);
3785 #endif
3786
3787 // Leave the frame and return popping the arguments and the
3788 // receiver.
3789 frame_->Exit();
3790 int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
3791 __ Ret(arguments_bytes, ecx);
3792 DeleteFrame();
3793
3794 #ifdef ENABLE_DEBUGGER_SUPPORT
3795 // Check that the size of the code used for returning is large enough
3796 // for the debugger's requirements.
3797 ASSERT(Assembler::kJSReturnSequenceLength <=
3798 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
3799 #endif
3800 }
3801
3802
3803 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
3804 ASSERT(!in_spilled_code());
3805 Comment cmnt(masm_, "[ WithEnterStatement");
3806 CodeForStatementPosition(node);
3807 Load(node->expression());
3808 Result context;
3809 if (node->is_catch_block()) {
3810 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
3811 } else {
3812 context = frame_->CallRuntime(Runtime::kPushContext, 1);
3813 }
3814
3815 // Update context local.
3816 frame_->SaveContextRegister();
3817
3818 // Verify that the runtime call result and esi agree.
3819 if (FLAG_debug_code) {
3820 __ cmp(context.reg(), Operand(esi));
3821 __ Assert(equal, "Runtime::NewContext should end up in esi");
3822 }
3823 }
3824
3825
3826 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
3827 ASSERT(!in_spilled_code());
3828 Comment cmnt(masm_, "[ WithExitStatement");
3829 CodeForStatementPosition(node);
3830 // Pop context.
3831 __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
3832 // Update context local.
3833 frame_->SaveContextRegister();
3834 }
3835
3836
3837 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
3838 ASSERT(!in_spilled_code());
3839 Comment cmnt(masm_, "[ SwitchStatement");
3840 CodeForStatementPosition(node);
3841 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3842
3843 // Compile the switch value.
3844 Load(node->tag());
3845
3846 ZoneList<CaseClause*>* cases = node->cases();
3847 int length = cases->length();
3848 CaseClause* default_clause = NULL;
3849
3850 JumpTarget next_test;
3851 // Compile the case label expressions and comparisons. Exit early
3852 // if a comparison is unconditionally true. The target next_test is
3853 // bound before the loop in order to indicate control flow to the
3854 // first comparison.
3855 next_test.Bind();
3856 for (int i = 0; i < length && !next_test.is_unused(); i++) {
3857 CaseClause* clause = cases->at(i);
3858 // The default is not a test, but remember it for later.
3859 if (clause->is_default()) {
3860 default_clause = clause;
3861 continue;
3862 }
3863
3864 Comment cmnt(masm_, "[ Case comparison");
3865 // We recycle the same target next_test for each test. Bind it if
3866 // the previous test has not done so and then unuse it for the
3867 // loop.
3868 if (next_test.is_linked()) {
3869 next_test.Bind();
3870 }
3871 next_test.Unuse();
3872
3873 // Duplicate the switch value.
3874 frame_->Dup();
3875
3876 // Compile the label expression.
3877 Load(clause->label());
3878
3879 // Compare and branch to the body if true or the next test if
3880 // false. Prefer the next test as a fall through.
3881 ControlDestination dest(clause->body_target(), &next_test, false);
3882 Comparison(node, equal, true, &dest);
3883
3884 // If the comparison fell through to the true target, jump to the
3885 // actual body.
3886 if (dest.true_was_fall_through()) {
3887 clause->body_target()->Unuse();
3888 clause->body_target()->Jump();
3889 }
3890 }
3891
3892 // If there was control flow to a next test from the last one
3893 // compiled, compile a jump to the default or break target.
3894 if (!next_test.is_unused()) {
3895 if (next_test.is_linked()) {
3896 next_test.Bind();
3897 }
3898 // Drop the switch value.
3899 frame_->Drop();
3900 if (default_clause != NULL) {
3901 default_clause->body_target()->Jump();
3902 } else {
3903 node->break_target()->Jump();
3904 }
3905 }
3906
3907 // The last instruction emitted was a jump, either to the default
3908 // clause or the break target, or else to a case body from the loop
3909 // that compiles the tests.
3910 ASSERT(!has_valid_frame());
3911 // Compile case bodies as needed.
3912 for (int i = 0; i < length; i++) {
3913 CaseClause* clause = cases->at(i);
3914
3915 // There are two ways to reach the body: from the corresponding
3916 // test or as the fall through of the previous body.
3917 if (clause->body_target()->is_linked() || has_valid_frame()) {
3918 if (clause->body_target()->is_linked()) {
3919 if (has_valid_frame()) {
3920 // If we have both a jump to the test and a fall through, put
3921 // a jump on the fall through path to avoid the dropping of
3922 // the switch value on the test path. The exception is the
3923 // default which has already had the switch value dropped.
3924 if (clause->is_default()) {
3925 clause->body_target()->Bind();
3926 } else {
3927 JumpTarget body;
3928 body.Jump();
3929 clause->body_target()->Bind();
3930 frame_->Drop();
3931 body.Bind();
3932 }
3933 } else {
3934 // No fall through to worry about.
3935 clause->body_target()->Bind();
3936 if (!clause->is_default()) {
3937 frame_->Drop();
3938 }
3939 }
3940 } else {
3941 // Otherwise, we have only fall through.
3942 ASSERT(has_valid_frame());
3943 }
3944
3945 // We are now prepared to compile the body.
3946 Comment cmnt(masm_, "[ Case body");
3947 VisitStatements(clause->statements());
3948 }
3949 clause->body_target()->Unuse();
3950 }
3951
3952 // We may not have a valid frame here so bind the break target only
3953 // if needed.
3954 if (node->break_target()->is_linked()) {
3955 node->break_target()->Bind();
3956 }
3957 node->break_target()->Unuse();
3958 }
3959
3960
3961 void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
3962 ASSERT(!in_spilled_code());
3963 Comment cmnt(masm_, "[ DoWhileStatement");
3964 CodeForStatementPosition(node);
3965 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3966 JumpTarget body(JumpTarget::BIDIRECTIONAL);
3967 IncrementLoopNesting();
3968
3969 ConditionAnalysis info = AnalyzeCondition(node->cond());
3970 // Label the top of the loop for the backward jump if necessary.
3971 switch (info) {
3972 case ALWAYS_TRUE:
3973 // Use the continue target.
3974 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3975 node->continue_target()->Bind();
3976 break;
3977 case ALWAYS_FALSE:
3978 // No need to label it.
3979 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3980 break;
3981 case DONT_KNOW:
3982 // Continue is the test, so use the backward body target.
3983 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3984 body.Bind();
3985 break;
3986 }
3987
3988 CheckStack(); // TODO(1222600): ignore if body contains calls.
3989 Visit(node->body());
3990
3991 // Compile the test.
3992 switch (info) {
3993 case ALWAYS_TRUE:
3994 // If control flow can fall off the end of the body, jump back
3995 // to the top and bind the break target at the exit.
3996 if (has_valid_frame()) {
3997 node->continue_target()->Jump();
3998 }
3999 if (node->break_target()->is_linked()) {
4000 node->break_target()->Bind();
4001 }
4002 break;
4003 case ALWAYS_FALSE:
4004 // We may have had continues or breaks in the body.
4005 if (node->continue_target()->is_linked()) {
4006 node->continue_target()->Bind();
4007 }
4008 if (node->break_target()->is_linked()) {
4009 node->break_target()->Bind();
4010 }
4011 break;
4012 case DONT_KNOW:
4013 // We have to compile the test expression if it can be reached by
4014 // control flow falling out of the body or via continue.
4015 if (node->continue_target()->is_linked()) {
4016 node->continue_target()->Bind();
4017 }
4018 if (has_valid_frame()) {
4019 Comment cmnt(masm_, "[ DoWhileCondition");
4020 CodeForDoWhileConditionPosition(node);
4021 ControlDestination dest(&body, node->break_target(), false);
4022 LoadCondition(node->cond(), &dest, true);
4023 }
4024 if (node->break_target()->is_linked()) {
4025 node->break_target()->Bind();
4026 }
4027 break;
4028 }
4029
4030 DecrementLoopNesting();
4031 node->continue_target()->Unuse();
4032 node->break_target()->Unuse();
4033 }
4034
4035
4036 void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
4037 ASSERT(!in_spilled_code());
4038 Comment cmnt(masm_, "[ WhileStatement");
4039 CodeForStatementPosition(node);
4040
4041 // If the condition is always false and has no side effects, we do not
4042 // need to compile anything.
4043 ConditionAnalysis info = AnalyzeCondition(node->cond());
4044 if (info == ALWAYS_FALSE) return;
4045
4046 // Do not duplicate conditions that may have function literal
4047 // subexpressions. This can cause us to compile the function literal
4048 // twice.
4049 bool test_at_bottom = !node->may_have_function_literal();
4050 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
4051 IncrementLoopNesting();
4052 JumpTarget body;
4053 if (test_at_bottom) {
4054 body.set_direction(JumpTarget::BIDIRECTIONAL);
4055 }
4056
4057 // Based on the condition analysis, compile the test as necessary.
4058 switch (info) {
4059 case ALWAYS_TRUE:
4060 // We will not compile the test expression. Label the top of the
4061 // loop with the continue target.
4062 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
4063 node->continue_target()->Bind();
4064 break;
4065 case DONT_KNOW: {
4066 if (test_at_bottom) {
4067 // Continue is the test at the bottom, no need to label the test
4068 // at the top. The body is a backward target.
4069 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
4070 } else {
4071 // Label the test at the top as the continue target. The body
4072 // is a forward-only target.
4073 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
4074 node->continue_target()->Bind();
4075 }
4076 // Compile the test with the body as the true target and preferred
4077 // fall-through and with the break target as the false target.
4078 ControlDestination dest(&body, node->break_target(), true);
4079 LoadCondition(node->cond(), &dest, true);
4080
4081 if (dest.false_was_fall_through()) {
4082 // If we got the break target as fall-through, the test may have
4083 // been unconditionally false (if there are no jumps to the
4084 // body).
4085 if (!body.is_linked()) {
4086 DecrementLoopNesting();
4087 return;
4088 }
4089
4090 // Otherwise, jump around the body on the fall through and then
4091 // bind the body target.
4092 node->break_target()->Unuse();
4093 node->break_target()->Jump();
4094 body.Bind();
4095 }
4096 break;
4097 }
4098 case ALWAYS_FALSE:
4099 UNREACHABLE();
4100 break;
4101 }
4102
4103 CheckStack(); // TODO(1222600): ignore if body contains calls.
4104 Visit(node->body());
4105
4106 // Based on the condition analysis, compile the backward jump as
4107 // necessary.
4108 switch (info) {
4109 case ALWAYS_TRUE:
4110 // The loop body has been labeled with the continue target.
4111 if (has_valid_frame()) {
4112 node->continue_target()->Jump();
4113 }
4114 break;
4115 case DONT_KNOW:
4116 if (test_at_bottom) {
4117 // If we have chosen to recompile the test at the bottom,
4118 // then it is the continue target.
4119 if (node->continue_target()->is_linked()) {
4120 node->continue_target()->Bind();
4121 }
4122 if (has_valid_frame()) {
4123 // The break target is the fall-through (body is a backward
4124 // jump from here and thus an invalid fall-through).
4125 ControlDestination dest(&body, node->break_target(), false);
4126 LoadCondition(node->cond(), &dest, true);
4127 }
4128 } else {
4129 // If we have chosen not to recompile the test at the bottom,
4130 // jump back to the one at the top.
4131 if (has_valid_frame()) {
4132 node->continue_target()->Jump();
4133 }
4134 }
4135 break;
4136 case ALWAYS_FALSE:
4137 UNREACHABLE();
4138 break;
4139 }
4140
4141 // The break target may be already bound (by the condition), or there
4142 // may not be a valid frame. Bind it only if needed.
4143 if (node->break_target()->is_linked()) {
4144 node->break_target()->Bind();
4145 }
4146 DecrementLoopNesting();
4147 }
4148
4149
4150 void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
4151 ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
4152 if (slot->type() == Slot::LOCAL) {
4153 frame_->SetTypeForLocalAt(slot->index(), info);
4154 } else {
4155 frame_->SetTypeForParamAt(slot->index(), info);
4156 }
4157 if (FLAG_debug_code && info.IsSmi()) {
4158 if (slot->type() == Slot::LOCAL) {
4159 frame_->PushLocalAt(slot->index());
4160 } else {
4161 frame_->PushParameterAt(slot->index());
4162 }
4163 Result var = frame_->Pop();
4164 var.ToRegister();
4165 __ AbortIfNotSmi(var.reg());
4166 }
4167 }
4168
4169
4170 void CodeGenerator::VisitForStatement(ForStatement* node) {
4171 ASSERT(!in_spilled_code());
4172 Comment cmnt(masm_, "[ ForStatement");
4173 CodeForStatementPosition(node);
4174
4175 // Compile the init expression if present.
4176 if (node->init() != NULL) {
4177 Visit(node->init());
4178 }
4179
4180 // If the condition is always false and has no side effects, we do not
4181 // need to compile anything else.
4182 ConditionAnalysis info = AnalyzeCondition(node->cond());
4183 if (info == ALWAYS_FALSE) return;
4184
4185 // Do not duplicate conditions that may have function literal
4186 // subexpressions. This can cause us to compile the function literal
4187 // twice.
4188 bool test_at_bottom = !node->may_have_function_literal();
4189 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
4190 IncrementLoopNesting();
4191
4192 // Target for backward edge if no test at the bottom, otherwise
4193 // unused.
4194 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
4195
4196 // Target for backward edge if there is a test at the bottom,
4197 // otherwise used as target for test at the top.
4198 JumpTarget body;
4199 if (test_at_bottom) {
4200 body.set_direction(JumpTarget::BIDIRECTIONAL);
4201 }
4202
4203 // Based on the condition analysis, compile the test as necessary.
4204 switch (info) {
4205 case ALWAYS_TRUE:
4206 // We will not compile the test expression. Label the top of the
4207 // loop.
4208 if (node->next() == NULL) {
4209 // Use the continue target if there is no update expression.
4210 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
4211 node->continue_target()->Bind();
4212 } else {
4213 // Otherwise use the backward loop target.
4214 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
4215 loop.Bind();
4216 }
4217 break;
4218 case DONT_KNOW: {
4219 if (test_at_bottom) {
4220 // Continue is either the update expression or the test at the
4221 // bottom, no need to label the test at the top.
4222 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
4223 } else if (node->next() == NULL) {
4224 // We are not recompiling the test at the bottom and there is no
4225 // update expression.
4226 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
4227 node->continue_target()->Bind();
4228 } else {
4229 // We are not recompiling the test at the bottom and there is an
4230 // update expression.
4231 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
4232 loop.Bind();
4233 }
4234
4235 // Compile the test with the body as the true target and preferred
4236 // fall-through and with the break target as the false target.
4237 ControlDestination dest(&body, node->break_target(), true);
4238 LoadCondition(node->cond(), &dest, true);
4239
4240 if (dest.false_was_fall_through()) {
4241 // If we got the break target as fall-through, the test may have
4242 // been unconditionally false (if there are no jumps to the
4243 // body).
4244 if (!body.is_linked()) {
4245 DecrementLoopNesting();
4246 return;
4247 }
4248
4249 // Otherwise, jump around the body on the fall through and then
4250 // bind the body target.
4251 node->break_target()->Unuse();
4252 node->break_target()->Jump();
4253 body.Bind();
4254 }
4255 break;
4256 }
4257 case ALWAYS_FALSE:
4258 UNREACHABLE();
4259 break;
4260 }
4261
4262 CheckStack(); // TODO(1222600): ignore if body contains calls.
4263
4264 // We know that the loop index is a smi if it is not modified in the
4265 // loop body and it is checked against a constant limit in the loop
4266 // condition. In this case, we reset the static type information of the
4267 // loop index to smi before compiling the body, the update expression, and
4268 // the bottom check of the loop condition.
4269 if (node->is_fast_smi_loop()) {
4270 // Set number type of the loop variable to smi.
4271 SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
4272 }
4273
4274 Visit(node->body());
4275
4276 // If there is an update expression, compile it if necessary.
4277 if (node->next() != NULL) {
4278 if (node->continue_target()->is_linked()) {
4279 node->continue_target()->Bind();
4280 }
4281
4282 // Control can reach the update by falling out of the body or by a
4283 // continue.
4284 if (has_valid_frame()) {
4285 // Record the source position of the statement as this code which
4286 // is after the code for the body actually belongs to the loop
4287 // statement and not the body.
4288 CodeForStatementPosition(node);
4289 Visit(node->next());
4290 }
4291 }
4292
4293 // Set the type of the loop variable to smi before compiling the test
4294 // expression if we are in a fast smi loop condition.
4295 if (node->is_fast_smi_loop() && has_valid_frame()) {
4296 // Set number type of the loop variable to smi.
4297 SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
4298 }
4299
4300 // Based on the condition analysis, compile the backward jump as
4301 // necessary.
4302 switch (info) {
4303 case ALWAYS_TRUE:
4304 if (has_valid_frame()) {
4305 if (node->next() == NULL) {
4306 node->continue_target()->Jump();
4307 } else {
4308 loop.Jump();
4309 }
4310 }
4311 break;
4312 case DONT_KNOW:
4313 if (test_at_bottom) {
4314 if (node->continue_target()->is_linked()) {
4315 // We can have dangling jumps to the continue target if there
4316 // was no update expression.
4317 node->continue_target()->Bind();
4318 }
4319 // Control can reach the test at the bottom by falling out of
4320 // the body, by a continue in the body, or from the update
4321 // expression.
4322 if (has_valid_frame()) {
4323 // The break target is the fall-through (body is a backward
4324 // jump from here).
4325 ControlDestination dest(&body, node->break_target(), false);
4326 LoadCondition(node->cond(), &dest, true);
4327 }
4328 } else {
4329 // Otherwise, jump back to the test at the top.
4330 if (has_valid_frame()) {
4331 if (node->next() == NULL) {
4332 node->continue_target()->Jump();
4333 } else {
4334 loop.Jump();
4335 }
4336 }
4337 }
4338 break;
4339 case ALWAYS_FALSE:
4340 UNREACHABLE();
4341 break;
4342 }
4343
4344 // The break target may be already bound (by the condition), or there
4345 // may not be a valid frame. Bind it only if needed.
4346 if (node->break_target()->is_linked()) {
4347 node->break_target()->Bind();
4348 }
4349 DecrementLoopNesting();
4350 }
4351
4352
4353 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
4354 ASSERT(!in_spilled_code());
4355 VirtualFrame::SpilledScope spilled_scope;
4356 Comment cmnt(masm_, "[ ForInStatement");
4357 CodeForStatementPosition(node);
4358
4359 JumpTarget primitive;
4360 JumpTarget jsobject;
4361 JumpTarget fixed_array;
4362 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
4363 JumpTarget end_del_check;
4364 JumpTarget exit;
4365
4366 // Get the object to enumerate over (converted to JSObject).
4367 LoadAndSpill(node->enumerable());
4368
4369 // Both SpiderMonkey and kjs ignore null and undefined in contrast
4370 // to the specification. 12.6.4 mandates a call to ToObject.
4371 frame_->EmitPop(eax);
4372
4373 // eax: value to be iterated over
4374 __ cmp(eax, FACTORY->undefined_value());
4375 exit.Branch(equal);
4376 __ cmp(eax, FACTORY->null_value());
4377 exit.Branch(equal);
4378
4379 // Stack layout in body:
4380 // [iteration counter (smi)] <- slot 0
4381 // [length of array] <- slot 1
4382 // [FixedArray] <- slot 2
4383 // [Map or 0] <- slot 3
4384 // [Object] <- slot 4
4385
4386 // Check if enumerable is already a JSObject
4387 // eax: value to be iterated over
4388 __ test(eax, Immediate(kSmiTagMask));
4389 primitive.Branch(zero);
4390 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
4391 jsobject.Branch(above_equal);
4392
4393 primitive.Bind();
4394 frame_->EmitPush(eax);
4395 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
4396 // function call returns the value in eax, which is where we want it below
4397
4398 jsobject.Bind();
4399 // Get the set of properties (as a FixedArray or Map).
4400 // eax: value to be iterated over
4401 frame_->EmitPush(eax); // Push the object being iterated over.
4402
4403 // Check cache validity in generated code. This is a fast case for
4404 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
4405 // guarantee cache validity, call the runtime system to check cache
4406 // validity or get the property names in a fixed array.
4407 JumpTarget call_runtime;
4408 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
4409 JumpTarget check_prototype;
4410 JumpTarget use_cache;
4411 __ mov(ecx, eax);
4412 loop.Bind();
4413 // Check that there are no elements.
4414 __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
4415 __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
4416 call_runtime.Branch(not_equal);
4417 // Check that instance descriptors are not empty so that we can
4418 // check for an enum cache. Leave the map in ebx for the subsequent
4419 // prototype load.
4420 __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
4421 __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
4422 __ cmp(Operand(edx), Immediate(FACTORY->empty_descriptor_array()));
4423 call_runtime.Branch(equal);
4424 // Check that there in an enum cache in the non-empty instance
4425 // descriptors. This is the case if the next enumeration index
4426 // field does not contain a smi.
4427 __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
4428 __ test(edx, Immediate(kSmiTagMask));
4429 call_runtime.Branch(zero);
4430 // For all objects but the receiver, check that the cache is empty.
4431 __ cmp(ecx, Operand(eax));
4432 check_prototype.Branch(equal);
4433 __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
4434 __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
4435 call_runtime.Branch(not_equal);
4436 check_prototype.Bind();
4437 // Load the prototype from the map and loop if non-null.
4438 __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
4439 __ cmp(Operand(ecx), Immediate(FACTORY->null_value()));
4440 loop.Branch(not_equal);
4441 // The enum cache is valid. Load the map of the object being
4442 // iterated over and use the cache for the iteration.
4443 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
4444 use_cache.Jump();
4445
4446 call_runtime.Bind();
4447 // Call the runtime to get the property names for the object.
4448 frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
4449 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
4450
4451 // If we got a map from the runtime call, we can do a fast
4452 // modification check. Otherwise, we got a fixed array, and we have
4453 // to do a slow check.
4454 // eax: map or fixed array (result from call to
4455 // Runtime::kGetPropertyNamesFast)
4456 __ mov(edx, Operand(eax));
4457 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
4458 __ cmp(ecx, FACTORY->meta_map());
4459 fixed_array.Branch(not_equal);
4460
4461 use_cache.Bind();
4462 // Get enum cache
4463 // eax: map (either the result from a call to
4464 // Runtime::kGetPropertyNamesFast or has been fetched directly from
4465 // the object)
4466 __ mov(ecx, Operand(eax));
4467
4468 __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
4469 // Get the bridge array held in the enumeration index field.
4470 __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
4471 // Get the cache from the bridge array.
4472 __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
4473
4474 frame_->EmitPush(eax); // <- slot 3
4475 frame_->EmitPush(edx); // <- slot 2
4476 __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
4477 frame_->EmitPush(eax); // <- slot 1
4478 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
4479 entry.Jump();
4480
4481 fixed_array.Bind();
4482 // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
4483 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
4484 frame_->EmitPush(eax); // <- slot 2
4485
4486 // Push the length of the array and the initial index onto the stack.
4487 __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
4488 frame_->EmitPush(eax); // <- slot 1
4489 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
4490
4491 // Condition.
4492 entry.Bind();
4493 // Grab the current frame's height for the break and continue
4494 // targets only after all the state is pushed on the frame.
4495 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
4496 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
4497
4498 __ mov(eax, frame_->ElementAt(0)); // load the current count
4499 __ cmp(eax, frame_->ElementAt(1)); // compare to the array length
4500 node->break_target()->Branch(above_equal);
4501
4502 // Get the i'th entry of the array.
4503 __ mov(edx, frame_->ElementAt(2));
4504 __ mov(ebx, FixedArrayElementOperand(edx, eax));
4505
4506 // Get the expected map from the stack or a zero map in the
4507 // permanent slow case eax: current iteration count ebx: i'th entry
4508 // of the enum cache
4509 __ mov(edx, frame_->ElementAt(3));
4510 // Check if the expected map still matches that of the enumerable.
4511 // If not, we have to filter the key.
4512 // eax: current iteration count
4513 // ebx: i'th entry of the enum cache
4514 // edx: expected map value
4515 __ mov(ecx, frame_->ElementAt(4));
4516 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
4517 __ cmp(ecx, Operand(edx));
4518 end_del_check.Branch(equal);
4519
4520 // Convert the entry to a string (or null if it isn't a property anymore).
4521 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
4522 frame_->EmitPush(ebx); // push entry
4523 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
4524 __ mov(ebx, Operand(eax));
4525
4526 // If the property has been removed while iterating, we just skip it.
4527 __ test(ebx, Operand(ebx));
4528 node->continue_target()->Branch(equal);
4529
4530 end_del_check.Bind();
4531 // Store the entry in the 'each' expression and take another spin in the
4532 // loop. edx: i'th entry of the enum cache (or string there of)
4533 frame_->EmitPush(ebx);
4534 { Reference each(this, node->each());
4535 if (!each.is_illegal()) {
4536 if (each.size() > 0) {
4537 // Loading a reference may leave the frame in an unspilled state.
4538 frame_->SpillAll();
4539 // Get the value (under the reference on the stack) from memory.
4540 frame_->EmitPush(frame_->ElementAt(each.size()));
4541 each.SetValue(NOT_CONST_INIT);
4542 frame_->Drop(2);
4543 } else {
4544 // If the reference was to a slot we rely on the convenient property
4545 // that it doesn't matter whether a value (eg, ebx pushed above) is
4546 // right on top of or right underneath a zero-sized reference.
4547 each.SetValue(NOT_CONST_INIT);
4548 frame_->Drop();
4549 }
4550 }
4551 }
4552 // Unloading a reference may leave the frame in an unspilled state.
4553 frame_->SpillAll();
4554
4555 // Body.
4556 CheckStack(); // TODO(1222600): ignore if body contains calls.
4557 VisitAndSpill(node->body());
4558
4559 // Next. Reestablish a spilled frame in case we are coming here via
4560 // a continue in the body.
4561 node->continue_target()->Bind();
4562 frame_->SpillAll();
4563 frame_->EmitPop(eax);
4564 __ add(Operand(eax), Immediate(Smi::FromInt(1)));
4565 frame_->EmitPush(eax);
4566 entry.Jump();
4567
4568 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
4569 // any frame.
4570 node->break_target()->Bind();
4571 frame_->Drop(5);
4572
4573 // Exit.
4574 exit.Bind();
4575
4576 node->continue_target()->Unuse();
4577 node->break_target()->Unuse();
4578 }
4579
4580
4581 void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
4582 ASSERT(!in_spilled_code());
4583 VirtualFrame::SpilledScope spilled_scope;
4584 Comment cmnt(masm_, "[ TryCatchStatement");
4585 CodeForStatementPosition(node);
4586
4587 JumpTarget try_block;
4588 JumpTarget exit;
4589
4590 try_block.Call();
4591 // --- Catch block ---
4592 frame_->EmitPush(eax);
4593
4594 // Store the caught exception in the catch variable.
4595 Variable* catch_var = node->catch_var()->var();
4596 ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
4597 StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
4598
4599 // Remove the exception from the stack.
4600 frame_->Drop();
4601
4602 VisitStatementsAndSpill(node->catch_block()->statements());
4603 if (has_valid_frame()) {
4604 exit.Jump();
4605 }
4606
4607
4608 // --- Try block ---
4609 try_block.Bind();
4610
4611 frame_->PushTryHandler(TRY_CATCH_HANDLER);
4612 int handler_height = frame_->height();
4613
4614 // Shadow the jump targets for all escapes from the try block, including
4615 // returns. During shadowing, the original target is hidden as the
4616 // ShadowTarget and operations on the original actually affect the
4617 // shadowing target.
4618 //
4619 // We should probably try to unify the escaping targets and the return
4620 // target.
4621 int nof_escapes = node->escaping_targets()->length();
4622 List<ShadowTarget*> shadows(1 + nof_escapes);
4623
4624 // Add the shadow target for the function return.
4625 static const int kReturnShadowIndex = 0;
4626 shadows.Add(new ShadowTarget(&function_return_));
4627 bool function_return_was_shadowed = function_return_is_shadowed_;
4628 function_return_is_shadowed_ = true;
4629 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
4630
4631 // Add the remaining shadow targets.
4632 for (int i = 0; i < nof_escapes; i++) {
4633 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
4634 }
4635
4636 // Generate code for the statements in the try block.
4637 VisitStatementsAndSpill(node->try_block()->statements());
4638
4639 // Stop the introduced shadowing and count the number of required unlinks.
4640 // After shadowing stops, the original targets are unshadowed and the
4641 // ShadowTargets represent the formerly shadowing targets.
4642 bool has_unlinks = false;
4643 for (int i = 0; i < shadows.length(); i++) {
4644 shadows[i]->StopShadowing();
4645 has_unlinks = has_unlinks || shadows[i]->is_linked();
4646 }
4647 function_return_is_shadowed_ = function_return_was_shadowed;
4648
4649 // Get an external reference to the handler address.
4650 ExternalReference handler_address(Isolate::k_handler_address,
4651 masm()->isolate());
4652
4653 // Make sure that there's nothing left on the stack above the
4654 // handler structure.
4655 if (FLAG_debug_code) {
4656 __ mov(eax, Operand::StaticVariable(handler_address));
4657 __ cmp(esp, Operand(eax));
4658 __ Assert(equal, "stack pointer should point to top handler");
4659 }
4660
4661 // If we can fall off the end of the try block, unlink from try chain.
4662 if (has_valid_frame()) {
4663 // The next handler address is on top of the frame. Unlink from
4664 // the handler list and drop the rest of this handler from the
4665 // frame.
4666 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4667 frame_->EmitPop(Operand::StaticVariable(handler_address));
4668 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4669 if (has_unlinks) {
4670 exit.Jump();
4671 }
4672 }
4673
4674 // Generate unlink code for the (formerly) shadowing targets that
4675 // have been jumped to. Deallocate each shadow target.
4676 Result return_value;
4677 for (int i = 0; i < shadows.length(); i++) {
4678 if (shadows[i]->is_linked()) {
4679 // Unlink from try chain; be careful not to destroy the TOS if
4680 // there is one.
4681 if (i == kReturnShadowIndex) {
4682 shadows[i]->Bind(&return_value);
4683 return_value.ToRegister(eax);
4684 } else {
4685 shadows[i]->Bind();
4686 }
4687 // Because we can be jumping here (to spilled code) from
4688 // unspilled code, we need to reestablish a spilled frame at
4689 // this block.
4690 frame_->SpillAll();
4691
4692 // Reload sp from the top handler, because some statements that we
4693 // break from (eg, for...in) may have left stuff on the stack.
4694 __ mov(esp, Operand::StaticVariable(handler_address));
4695 frame_->Forget(frame_->height() - handler_height);
4696
4697 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4698 frame_->EmitPop(Operand::StaticVariable(handler_address));
4699 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4700
4701 if (i == kReturnShadowIndex) {
4702 if (!function_return_is_shadowed_) frame_->PrepareForReturn();
4703 shadows[i]->other_target()->Jump(&return_value);
4704 } else {
4705 shadows[i]->other_target()->Jump();
4706 }
4707 }
4708 }
4709
4710 exit.Bind();
4711 }
4712
4713
4714 void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
4715 ASSERT(!in_spilled_code());
4716 VirtualFrame::SpilledScope spilled_scope;
4717 Comment cmnt(masm_, "[ TryFinallyStatement");
4718 CodeForStatementPosition(node);
4719
4720 // State: Used to keep track of reason for entering the finally
4721 // block. Should probably be extended to hold information for
4722 // break/continue from within the try block.
4723 enum { FALLING, THROWING, JUMPING };
4724
4725 JumpTarget try_block;
4726 JumpTarget finally_block;
4727
4728 try_block.Call();
4729
4730 frame_->EmitPush(eax);
4731 // In case of thrown exceptions, this is where we continue.
4732 __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
4733 finally_block.Jump();
4734
4735 // --- Try block ---
4736 try_block.Bind();
4737
4738 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
4739 int handler_height = frame_->height();
4740
4741 // Shadow the jump targets for all escapes from the try block, including
4742 // returns. During shadowing, the original target is hidden as the
4743 // ShadowTarget and operations on the original actually affect the
4744 // shadowing target.
4745 //
4746 // We should probably try to unify the escaping targets and the return
4747 // target.
4748 int nof_escapes = node->escaping_targets()->length();
4749 List<ShadowTarget*> shadows(1 + nof_escapes);
4750
4751 // Add the shadow target for the function return.
4752 static const int kReturnShadowIndex = 0;
4753 shadows.Add(new ShadowTarget(&function_return_));
4754 bool function_return_was_shadowed = function_return_is_shadowed_;
4755 function_return_is_shadowed_ = true;
4756 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
4757
4758 // Add the remaining shadow targets.
4759 for (int i = 0; i < nof_escapes; i++) {
4760 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
4761 }
4762
4763 // Generate code for the statements in the try block.
4764 VisitStatementsAndSpill(node->try_block()->statements());
4765
4766 // Stop the introduced shadowing and count the number of required unlinks.
4767 // After shadowing stops, the original targets are unshadowed and the
4768 // ShadowTargets represent the formerly shadowing targets.
4769 int nof_unlinks = 0;
4770 for (int i = 0; i < shadows.length(); i++) {
4771 shadows[i]->StopShadowing();
4772 if (shadows[i]->is_linked()) nof_unlinks++;
4773 }
4774 function_return_is_shadowed_ = function_return_was_shadowed;
4775
4776 // Get an external reference to the handler address.
4777 ExternalReference handler_address(Isolate::k_handler_address,
4778 masm()->isolate());
4779
4780 // If we can fall off the end of the try block, unlink from the try
4781 // chain and set the state on the frame to FALLING.
4782 if (has_valid_frame()) {
4783 // The next handler address is on top of the frame.
4784 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4785 frame_->EmitPop(Operand::StaticVariable(handler_address));
4786 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4787
4788 // Fake a top of stack value (unneeded when FALLING) and set the
4789 // state in ecx, then jump around the unlink blocks if any.
4790 frame_->EmitPush(Immediate(FACTORY->undefined_value()));
4791 __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
4792 if (nof_unlinks > 0) {
4793 finally_block.Jump();
4794 }
4795 }
4796
4797 // Generate code to unlink and set the state for the (formerly)
4798 // shadowing targets that have been jumped to.
4799 for (int i = 0; i < shadows.length(); i++) {
4800 if (shadows[i]->is_linked()) {
4801 // If we have come from the shadowed return, the return value is
4802 // on the virtual frame. We must preserve it until it is
4803 // pushed.
4804 if (i == kReturnShadowIndex) {
4805 Result return_value;
4806 shadows[i]->Bind(&return_value);
4807 return_value.ToRegister(eax);
4808 } else {
4809 shadows[i]->Bind();
4810 }
4811 // Because we can be jumping here (to spilled code) from
4812 // unspilled code, we need to reestablish a spilled frame at
4813 // this block.
4814 frame_->SpillAll();
4815
4816 // Reload sp from the top handler, because some statements that
4817 // we break from (eg, for...in) may have left stuff on the
4818 // stack.
4819 __ mov(esp, Operand::StaticVariable(handler_address));
4820 frame_->Forget(frame_->height() - handler_height);
4821
4822 // Unlink this handler and drop it from the frame.
4823 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4824 frame_->EmitPop(Operand::StaticVariable(handler_address));
4825 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4826
4827 if (i == kReturnShadowIndex) {
4828 // If this target shadowed the function return, materialize
4829 // the return value on the stack.
4830 frame_->EmitPush(eax);
4831 } else {
4832 // Fake TOS for targets that shadowed breaks and continues.
4833 frame_->EmitPush(Immediate(FACTORY->undefined_value()));
4834 }
4835 __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
4836 if (--nof_unlinks > 0) {
4837 // If this is not the last unlink block, jump around the next.
4838 finally_block.Jump();
4839 }
4840 }
4841 }
4842
4843 // --- Finally block ---
4844 finally_block.Bind();
4845
4846 // Push the state on the stack.
4847 frame_->EmitPush(ecx);
4848
4849 // We keep two elements on the stack - the (possibly faked) result
4850 // and the state - while evaluating the finally block.
4851 //
4852 // Generate code for the statements in the finally block.
4853 VisitStatementsAndSpill(node->finally_block()->statements());
4854
4855 if (has_valid_frame()) {
4856 // Restore state and return value or faked TOS.
4857 frame_->EmitPop(ecx);
4858 frame_->EmitPop(eax);
4859 }
4860
4861 // Generate code to jump to the right destination for all used
4862 // formerly shadowing targets. Deallocate each shadow target.
4863 for (int i = 0; i < shadows.length(); i++) {
4864 if (has_valid_frame() && shadows[i]->is_bound()) {
4865 BreakTarget* original = shadows[i]->other_target();
4866 __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
4867 if (i == kReturnShadowIndex) {
4868 // The return value is (already) in eax.
4869 Result return_value = allocator_->Allocate(eax);
4870 ASSERT(return_value.is_valid());
4871 if (function_return_is_shadowed_) {
4872 original->Branch(equal, &return_value);
4873 } else {
4874 // Branch around the preparation for return which may emit
4875 // code.
4876 JumpTarget skip;
4877 skip.Branch(not_equal);
4878 frame_->PrepareForReturn();
4879 original->Jump(&return_value);
4880 skip.Bind();
4881 }
4882 } else {
4883 original->Branch(equal);
4884 }
4885 }
4886 }
4887
4888 if (has_valid_frame()) {
4889 // Check if we need to rethrow the exception.
4890 JumpTarget exit;
4891 __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
4892 exit.Branch(not_equal);
4893
4894 // Rethrow exception.
4895 frame_->EmitPush(eax); // undo pop from above
4896 frame_->CallRuntime(Runtime::kReThrow, 1);
4897
4898 // Done.
4899 exit.Bind();
4900 }
4901 }
4902
4903
4904 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
4905 ASSERT(!in_spilled_code());
4906 Comment cmnt(masm_, "[ DebuggerStatement");
4907 CodeForStatementPosition(node);
4908 #ifdef ENABLE_DEBUGGER_SUPPORT
4909 // Spill everything, even constants, to the frame.
4910 frame_->SpillAll();
4911
4912 frame_->DebugBreak();
4913 // Ignore the return value.
4914 #endif
4915 }
4916
4917
4918 Result CodeGenerator::InstantiateFunction(
4919 Handle<SharedFunctionInfo> function_info,
4920 bool pretenure) {
4921 // The inevitable call will sync frame elements to memory anyway, so
4922 // we do it eagerly to allow us to push the arguments directly into
4923 // place.
4924 frame()->SyncRange(0, frame()->element_count() - 1);
4925
4926 // Use the fast case closure allocation code that allocates in new
4927 // space for nested functions that don't need literals cloning.
4928 if (!pretenure &&
4929 scope()->is_function_scope() &&
4930 function_info->num_literals() == 0) {
4931 FastNewClosureStub stub(
4932 function_info->strict_mode() ? kStrictMode : kNonStrictMode);
4933 frame()->EmitPush(Immediate(function_info));
4934 return frame()->CallStub(&stub, 1);
4935 } else {
4936 // Call the runtime to instantiate the function based on the
4937 // shared function info.
4938 frame()->EmitPush(esi);
4939 frame()->EmitPush(Immediate(function_info));
4940 frame()->EmitPush(Immediate(pretenure
4941 ? FACTORY->true_value()
4942 : FACTORY->false_value()));
4943 return frame()->CallRuntime(Runtime::kNewClosure, 3);
4944 }
4945 }
4946
4947
4948 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
4949 Comment cmnt(masm_, "[ FunctionLiteral");
4950 ASSERT(!in_safe_int32_mode());
4951 // Build the function info and instantiate it.
4952 Handle<SharedFunctionInfo> function_info =
4953 Compiler::BuildFunctionInfo(node, script());
4954 // Check for stack-overflow exception.
4955 if (function_info.is_null()) {
4956 SetStackOverflow();
4957 return;
4958 }
4959 Result result = InstantiateFunction(function_info, node->pretenure());
4960 frame()->Push(&result);
4961 }
4962
4963
4964 void CodeGenerator::VisitSharedFunctionInfoLiteral(
4965 SharedFunctionInfoLiteral* node) {
4966 ASSERT(!in_safe_int32_mode());
4967 Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
4968 Result result = InstantiateFunction(node->shared_function_info(), false);
4969 frame()->Push(&result);
4970 }
4971
4972
4973 void CodeGenerator::VisitConditional(Conditional* node) {
4974 Comment cmnt(masm_, "[ Conditional");
4975 ASSERT(!in_safe_int32_mode());
4976 JumpTarget then;
4977 JumpTarget else_;
4978 JumpTarget exit;
4979 ControlDestination dest(&then, &else_, true);
4980 LoadCondition(node->condition(), &dest, true);
4981
4982 if (dest.false_was_fall_through()) {
4983 // The else target was bound, so we compile the else part first.
4984 Load(node->else_expression());
4985
4986 if (then.is_linked()) {
4987 exit.Jump();
4988 then.Bind();
4989 Load(node->then_expression());
4990 }
4991 } else {
4992 // The then target was bound, so we compile the then part first.
4993 Load(node->then_expression());
4994
4995 if (else_.is_linked()) {
4996 exit.Jump();
4997 else_.Bind();
4998 Load(node->else_expression());
4999 }
5000 }
5001
5002 exit.Bind();
5003 }
5004
5005
5006 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
5007 if (slot->type() == Slot::LOOKUP) {
5008 ASSERT(slot->var()->is_dynamic());
5009 JumpTarget slow;
5010 JumpTarget done;
5011 Result value;
5012
5013 // Generate fast case for loading from slots that correspond to
5014 // local/global variables or arguments unless they are shadowed by
5015 // eval-introduced bindings.
5016 EmitDynamicLoadFromSlotFastCase(slot,
5017 typeof_state,
5018 &value,
5019 &slow,
5020 &done);
5021
5022 slow.Bind();
5023 // A runtime call is inevitable. We eagerly sync frame elements
5024 // to memory so that we can push the arguments directly into place
5025 // on top of the frame.
5026 frame()->SyncRange(0, frame()->element_count() - 1);
5027 frame()->EmitPush(esi);
5028 frame()->EmitPush(Immediate(slot->var()->name()));
5029 if (typeof_state == INSIDE_TYPEOF) {
5030 value =
5031 frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
5032 } else {
5033 value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
5034 }
5035
5036 done.Bind(&value);
5037 frame_->Push(&value);
5038
5039 } else if (slot->var()->mode() == Variable::CONST) {
5040 // Const slots may contain 'the hole' value (the constant hasn't been
5041 // initialized yet) which needs to be converted into the 'undefined'
5042 // value.
5043 //
5044 // We currently spill the virtual frame because constants use the
5045 // potentially unsafe direct-frame access of SlotOperand.
5046 VirtualFrame::SpilledScope spilled_scope;
5047 Comment cmnt(masm_, "[ Load const");
5048 Label exit;
5049 __ mov(ecx, SlotOperand(slot, ecx));
5050 __ cmp(ecx, FACTORY->the_hole_value());
5051 __ j(not_equal, &exit);
5052 __ mov(ecx, FACTORY->undefined_value());
5053 __ bind(&exit);
5054 frame()->EmitPush(ecx);
5055
5056 } else if (slot->type() == Slot::PARAMETER) {
5057 frame()->PushParameterAt(slot->index());
5058
5059 } else if (slot->type() == Slot::LOCAL) {
5060 frame()->PushLocalAt(slot->index());
5061
5062 } else {
5063 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
5064 // here.
5065 //
5066 // The use of SlotOperand below is safe for an unspilled frame
5067 // because it will always be a context slot.
5068 ASSERT(slot->type() == Slot::CONTEXT);
5069 Result temp = allocator()->Allocate();
5070 ASSERT(temp.is_valid());
5071 __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
5072 frame()->Push(&temp);
5073 }
5074 }
5075
5076
5077 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
5078 TypeofState state) {
5079 LoadFromSlot(slot, state);
5080
5081 // Bail out quickly if we're not using lazy arguments allocation.
5082 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
5083
5084 // ... or if the slot isn't a non-parameter arguments slot.
5085 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
5086
5087 // If the loaded value is a constant, we know if the arguments
5088 // object has been lazily loaded yet.
5089 Result result = frame()->Pop();
5090 if (result.is_constant()) {
5091 if (result.handle()->IsArgumentsMarker()) {
5092 result = StoreArgumentsObject(false);
5093 }
5094 frame()->Push(&result);
5095 return;
5096 }
5097 ASSERT(result.is_register());
5098 // The loaded value is in a register. If it is the sentinel that
5099 // indicates that we haven't loaded the arguments object yet, we
5100 // need to do it now.
5101 JumpTarget exit;
5102 __ cmp(Operand(result.reg()), Immediate(FACTORY->arguments_marker()));
5103 frame()->Push(&result);
5104 exit.Branch(not_equal);
5105
5106 result = StoreArgumentsObject(false);
5107 frame()->SetElementAt(0, &result);
5108 result.Unuse();
5109 exit.Bind();
5110 return;
5111 }
5112
5113
5114 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
5115 Slot* slot,
5116 TypeofState typeof_state,
5117 JumpTarget* slow) {
5118 ASSERT(!in_safe_int32_mode());
5119 // Check that no extension objects have been created by calls to
5120 // eval from the current scope to the global scope.
5121 Register context = esi;
5122 Result tmp = allocator_->Allocate();
5123 ASSERT(tmp.is_valid()); // All non-reserved registers were available.
5124
5125 Scope* s = scope();
5126 while (s != NULL) {
5127 if (s->num_heap_slots() > 0) {
5128 if (s->calls_eval()) {
5129 // Check that extension is NULL.
5130 __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
5131 Immediate(0));
5132 slow->Branch(not_equal, not_taken);
5133 }
5134 // Load next context in chain.
5135 __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
5136 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
5137 context = tmp.reg();
5138 }
5139 // If no outer scope calls eval, we do not need to check more
5140 // context extensions. If we have reached an eval scope, we check
5141 // all extensions from this point.
5142 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
5143 s = s->outer_scope();
5144 }
5145
5146 if (s != NULL && s->is_eval_scope()) {
5147 // Loop up the context chain. There is no frame effect so it is
5148 // safe to use raw labels here.
5149 Label next, fast;
5150 if (!context.is(tmp.reg())) {
5151 __ mov(tmp.reg(), context);
5152 }
5153 __ bind(&next);
5154 // Terminate at global context.
5155 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
5156 Immediate(FACTORY->global_context_map()));
5157 __ j(equal, &fast);
5158 // Check that extension is NULL.
5159 __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
5160 slow->Branch(not_equal, not_taken);
5161 // Load next context in chain.
5162 __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
5163 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
5164 __ jmp(&next);
5165 __ bind(&fast);
5166 }
5167 tmp.Unuse();
5168
5169 // All extension objects were empty and it is safe to use a global
5170 // load IC call.
5171 // The register allocator prefers eax if it is free, so the code generator
5172 // will load the global object directly into eax, which is where the LoadIC
5173 // expects it.
5174 frame_->Spill(eax);
5175 LoadGlobal();
5176 frame_->Push(slot->var()->name());
5177 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
5178 ? RelocInfo::CODE_TARGET
5179 : RelocInfo::CODE_TARGET_CONTEXT;
5180 Result answer = frame_->CallLoadIC(mode);
5181 // A test eax instruction following the call signals that the inobject
5182 // property case was inlined. Ensure that there is not a test eax
5183 // instruction here.
5184 __ nop();
5185 return answer;
5186 }
5187
5188
5189 void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
5190 TypeofState typeof_state,
5191 Result* result,
5192 JumpTarget* slow,
5193 JumpTarget* done) {
5194 // Generate fast-case code for variables that might be shadowed by
5195 // eval-introduced variables. Eval is used a lot without
5196 // introducing variables. In those cases, we do not want to
5197 // perform a runtime call for all variables in the scope
5198 // containing the eval.
5199 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
5200 *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
5201 done->Jump(result);
5202
5203 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
5204 Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
5205 Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
5206 if (potential_slot != NULL) {
5207 // Generate fast case for locals that rewrite to slots.
5208 // Allocate a fresh register to use as a temp in
5209 // ContextSlotOperandCheckExtensions and to hold the result
5210 // value.
5211 *result = allocator()->Allocate();
5212 ASSERT(result->is_valid());
5213 __ mov(result->reg(),
5214 ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
5215 if (potential_slot->var()->mode() == Variable::CONST) {
5216 __ cmp(result->reg(), FACTORY->the_hole_value());
5217 done->Branch(not_equal, result);
5218 __ mov(result->reg(), FACTORY->undefined_value());
5219 }
5220 done->Jump(result);
5221 } else if (rewrite != NULL) {
5222 // Generate fast case for calls of an argument function.
5223 Property* property = rewrite->AsProperty();
5224 if (property != NULL) {
5225 VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
5226 Literal* key_literal = property->key()->AsLiteral();
5227 if (obj_proxy != NULL &&
5228 key_literal != NULL &&
5229 obj_proxy->IsArguments() &&
5230 key_literal->handle()->IsSmi()) {
5231 // Load arguments object if there are no eval-introduced
5232 // variables. Then load the argument from the arguments
5233 // object using keyed load.
5234 Result arguments = allocator()->Allocate();
5235 ASSERT(arguments.is_valid());
5236 __ mov(arguments.reg(),
5237 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
5238 arguments,
5239 slow));
5240 frame_->Push(&arguments);
5241 frame_->Push(key_literal->handle());
5242 *result = EmitKeyedLoad();
5243 done->Jump(result);
5244 }
5245 }
5246 }
5247 }
5248 }
5249
5250
5251 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
5252 if (slot->type() == Slot::LOOKUP) {
5253 ASSERT(slot->var()->is_dynamic());
5254
5255 // For now, just do a runtime call. Since the call is inevitable,
5256 // we eagerly sync the virtual frame so we can directly push the
5257 // arguments into place.
5258 frame_->SyncRange(0, frame_->element_count() - 1);
5259
5260 frame_->EmitPush(esi);
5261 frame_->EmitPush(Immediate(slot->var()->name()));
5262
5263 Result value;
5264 if (init_state == CONST_INIT) {
5265 // Same as the case for a normal store, but ignores attribute
5266 // (e.g. READ_ONLY) of context slot so that we can initialize const
5267 // properties (introduced via eval("const foo = (some expr);")). Also,
5268 // uses the current function context instead of the top context.
5269 //
5270 // Note that we must declare the foo upon entry of eval(), via a
5271 // context slot declaration, but we cannot initialize it at the same
5272 // time, because the const declaration may be at the end of the eval
5273 // code (sigh...) and the const variable may have been used before
5274 // (where its value is 'undefined'). Thus, we can only do the
5275 // initialization when we actually encounter the expression and when
5276 // the expression operands are defined and valid, and thus we need the
5277 // split into 2 operations: declaration of the context slot followed
5278 // by initialization.
5279 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
5280 } else {
5281 frame_->Push(Smi::FromInt(strict_mode_flag()));
5282 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
5283 }
5284 // Storing a variable must keep the (new) value on the expression
5285 // stack. This is necessary for compiling chained assignment
5286 // expressions.
5287 frame_->Push(&value);
5288
5289 } else {
5290 ASSERT(!slot->var()->is_dynamic());
5291
5292 JumpTarget exit;
5293 if (init_state == CONST_INIT) {
5294 ASSERT(slot->var()->mode() == Variable::CONST);
5295 // Only the first const initialization must be executed (the slot
5296 // still contains 'the hole' value). When the assignment is executed,
5297 // the code is identical to a normal store (see below).
5298 //
5299 // We spill the frame in the code below because the direct-frame
5300 // access of SlotOperand is potentially unsafe with an unspilled
5301 // frame.
5302 VirtualFrame::SpilledScope spilled_scope;
5303 Comment cmnt(masm_, "[ Init const");
5304 __ mov(ecx, SlotOperand(slot, ecx));
5305 __ cmp(ecx, FACTORY->the_hole_value());
5306 exit.Branch(not_equal);
5307 }
5308
5309 // We must execute the store. Storing a variable must keep the (new)
5310 // value on the stack. This is necessary for compiling assignment
5311 // expressions.
5312 //
5313 // Note: We will reach here even with slot->var()->mode() ==
5314 // Variable::CONST because of const declarations which will initialize
5315 // consts to 'the hole' value and by doing so, end up calling this code.
5316 if (slot->type() == Slot::PARAMETER) {
5317 frame_->StoreToParameterAt(slot->index());
5318 } else if (slot->type() == Slot::LOCAL) {
5319 frame_->StoreToLocalAt(slot->index());
5320 } else {
5321 // The other slot types (LOOKUP and GLOBAL) cannot reach here.
5322 //
5323 // The use of SlotOperand below is safe for an unspilled frame
5324 // because the slot is a context slot.
5325 ASSERT(slot->type() == Slot::CONTEXT);
5326 frame_->Dup();
5327 Result value = frame_->Pop();
5328 value.ToRegister();
5329 Result start = allocator_->Allocate();
5330 ASSERT(start.is_valid());
5331 __ mov(SlotOperand(slot, start.reg()), value.reg());
5332 // RecordWrite may destroy the value registers.
5333 //
5334 // TODO(204): Avoid actually spilling when the value is not
5335 // needed (probably the common case).
5336 frame_->Spill(value.reg());
5337 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
5338 Result temp = allocator_->Allocate();
5339 ASSERT(temp.is_valid());
5340 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
5341 // The results start, value, and temp are unused by going out of
5342 // scope.
5343 }
5344
5345 exit.Bind();
5346 }
5347 }
5348
5349
5350 void CodeGenerator::VisitSlot(Slot* slot) {
5351 Comment cmnt(masm_, "[ Slot");
5352 if (in_safe_int32_mode()) {
5353 if ((slot->type() == Slot::LOCAL && !slot->is_arguments())) {
5354 frame()->UntaggedPushLocalAt(slot->index());
5355 } else if (slot->type() == Slot::PARAMETER) {
5356 frame()->UntaggedPushParameterAt(slot->index());
5357 } else {
5358 UNREACHABLE();
5359 }
5360 } else {
5361 LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
5362 }
5363 }
5364
5365
5366 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
5367 Comment cmnt(masm_, "[ VariableProxy");
5368 Variable* var = node->var();
5369 Expression* expr = var->rewrite();
5370 if (expr != NULL) {
5371 Visit(expr);
5372 } else {
5373 ASSERT(var->is_global());
5374 ASSERT(!in_safe_int32_mode());
5375 Reference ref(this, node);
5376 ref.GetValue();
5377 }
5378 }
5379
5380
5381 void CodeGenerator::VisitLiteral(Literal* node) {
5382 Comment cmnt(masm_, "[ Literal");
5383 if (frame_->ConstantPoolOverflowed()) {
5384 Result temp = allocator_->Allocate();
5385 ASSERT(temp.is_valid());
5386 if (in_safe_int32_mode()) {
5387 temp.set_untagged_int32(true);
5388 }
5389 __ Set(temp.reg(), Immediate(node->handle()));
5390 frame_->Push(&temp);
5391 } else {
5392 if (in_safe_int32_mode()) {
5393 frame_->PushUntaggedElement(node->handle());
5394 } else {
5395 frame_->Push(node->handle());
5396 }
5397 }
5398 }
5399
5400
5401 void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
5402 ASSERT(value->IsSmi());
5403 int bits = reinterpret_cast<int>(*value);
5404 __ push(Immediate(bits ^ jit_cookie_));
5405 __ xor_(Operand(esp, 0), Immediate(jit_cookie_));
5406 }
5407
5408
5409 void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
5410 ASSERT(value->IsSmi());
5411 int bits = reinterpret_cast<int>(*value);
5412 __ mov(Operand(ebp, offset), Immediate(bits ^ jit_cookie_));
5413 __ xor_(Operand(ebp, offset), Immediate(jit_cookie_));
5414 }
5415
5416
5417 void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
5418 ASSERT(target.is_valid());
5419 ASSERT(value->IsSmi());
5420 int bits = reinterpret_cast<int>(*value);
5421 __ Set(target, Immediate(bits ^ jit_cookie_));
5422 __ xor_(target, jit_cookie_);
5423 }
5424
5425
5426 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
5427 if (!value->IsSmi()) return false;
5428 int int_value = Smi::cast(*value)->value();
5429 return !is_intn(int_value, kMaxSmiInlinedBits);
5430 }
5431
5432
5433 // Materialize the regexp literal 'node' in the literals array
5434 // 'literals' of the function. Leave the regexp boilerplate in
5435 // 'boilerplate'.
5436 class DeferredRegExpLiteral: public DeferredCode {
5437 public:
5438 DeferredRegExpLiteral(Register boilerplate,
5439 Register literals,
5440 RegExpLiteral* node)
5441 : boilerplate_(boilerplate), literals_(literals), node_(node) {
5442 set_comment("[ DeferredRegExpLiteral");
5443 }
5444
5445 void Generate();
5446
5447 private:
5448 Register boilerplate_;
5449 Register literals_;
5450 RegExpLiteral* node_;
5451 };
5452
5453
5454 void DeferredRegExpLiteral::Generate() {
5455 // Since the entry is undefined we call the runtime system to
5456 // compute the literal.
5457 // Literal array (0).
5458 __ push(literals_);
5459 // Literal index (1).
5460 __ push(Immediate(Smi::FromInt(node_->literal_index())));
5461 // RegExp pattern (2).
5462 __ push(Immediate(node_->pattern()));
5463 // RegExp flags (3).
5464 __ push(Immediate(node_->flags()));
5465 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
5466 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
5467 }
5468
5469
5470 class DeferredAllocateInNewSpace: public DeferredCode {
5471 public:
5472 DeferredAllocateInNewSpace(int size,
5473 Register target,
5474 int registers_to_save = 0)
5475 : size_(size), target_(target), registers_to_save_(registers_to_save) {
5476 ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
5477 ASSERT_EQ(0, registers_to_save & target.bit());
5478 set_comment("[ DeferredAllocateInNewSpace");
5479 }
5480 void Generate();
5481
5482 private:
5483 int size_;
5484 Register target_;
5485 int registers_to_save_;
5486 };
5487
5488
5489 void DeferredAllocateInNewSpace::Generate() {
5490 for (int i = 0; i < kNumRegs; i++) {
5491 if (registers_to_save_ & (1 << i)) {
5492 Register save_register = { i };
5493 __ push(save_register);
5494 }
5495 }
5496 __ push(Immediate(Smi::FromInt(size_)));
5497 __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
5498 if (!target_.is(eax)) {
5499 __ mov(target_, eax);
5500 }
5501 for (int i = kNumRegs - 1; i >= 0; i--) {
5502 if (registers_to_save_ & (1 << i)) {
5503 Register save_register = { i };
5504 __ pop(save_register);
5505 }
5506 }
5507 }
5508
5509
5510 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
5511 ASSERT(!in_safe_int32_mode());
5512 Comment cmnt(masm_, "[ RegExp Literal");
5513
5514 // Retrieve the literals array and check the allocated entry. Begin
5515 // with a writable copy of the function of this activation in a
5516 // register.
5517 frame_->PushFunction();
5518 Result literals = frame_->Pop();
5519 literals.ToRegister();
5520 frame_->Spill(literals.reg());
5521
5522 // Load the literals array of the function.
5523 __ mov(literals.reg(),
5524 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
5525
5526 // Load the literal at the ast saved index.
5527 Result boilerplate = allocator_->Allocate();
5528 ASSERT(boilerplate.is_valid());
5529 int literal_offset =
5530 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
5531 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
5532
5533 // Check whether we need to materialize the RegExp object. If so,
5534 // jump to the deferred code passing the literals array.
5535 DeferredRegExpLiteral* deferred =
5536 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
5537 __ cmp(boilerplate.reg(), FACTORY->undefined_value());
5538 deferred->Branch(equal);
5539 deferred->BindExit();
5540
5541 // Register of boilerplate contains RegExp object.
5542
5543 Result tmp = allocator()->Allocate();
5544 ASSERT(tmp.is_valid());
5545
5546 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5547
5548 DeferredAllocateInNewSpace* allocate_fallback =
5549 new DeferredAllocateInNewSpace(size, literals.reg());
5550 frame_->Push(&boilerplate);
5551 frame_->SpillTop();
5552 __ AllocateInNewSpace(size,
5553 literals.reg(),
5554 tmp.reg(),
5555 no_reg,
5556 allocate_fallback->entry_label(),
5557 TAG_OBJECT);
5558 allocate_fallback->BindExit();
5559 boilerplate = frame_->Pop();
5560 // Copy from boilerplate to clone and return clone.
5561
5562 for (int i = 0; i < size; i += kPointerSize) {
5563 __ mov(tmp.reg(), FieldOperand(boilerplate.reg(), i));
5564 __ mov(FieldOperand(literals.reg(), i), tmp.reg());
5565 }
5566 frame_->Push(&literals);
5567 }
5568
5569
5570 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
5571 ASSERT(!in_safe_int32_mode());
5572 Comment cmnt(masm_, "[ ObjectLiteral");
5573
5574 // Load a writable copy of the function of this activation in a
5575 // register.
5576 frame_->PushFunction();
5577 Result literals = frame_->Pop();
5578 literals.ToRegister();
5579 frame_->Spill(literals.reg());
5580
5581 // Load the literals array of the function.
5582 __ mov(literals.reg(),
5583 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
5584 // Literal array.
5585 frame_->Push(&literals);
5586 // Literal index.
5587 frame_->Push(Smi::FromInt(node->literal_index()));
5588 // Constant properties.
5589 frame_->Push(node->constant_properties());
5590 // Should the object literal have fast elements?
5591 frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
5592 Result clone;
5593 if (node->depth() > 1) {
5594 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
5595 } else {
5596 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
5597 }
5598 frame_->Push(&clone);
5599
5600 // Mark all computed expressions that are bound to a key that
5601 // is shadowed by a later occurrence of the same key. For the
5602 // marked expressions, no store code is emitted.
5603 node->CalculateEmitStore();
5604
5605 for (int i = 0; i < node->properties()->length(); i++) {
5606 ObjectLiteral::Property* property = node->properties()->at(i);
5607 switch (property->kind()) {
5608 case ObjectLiteral::Property::CONSTANT:
5609 break;
5610 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
5611 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
5612 // else fall through.
5613 case ObjectLiteral::Property::COMPUTED: {
5614 Handle<Object> key(property->key()->handle());
5615 if (key->IsSymbol()) {
5616 // Duplicate the object as the IC receiver.
5617 frame_->Dup();
5618 Load(property->value());
5619 if (property->emit_store()) {
5620 Result ignored =
5621 frame_->CallStoreIC(Handle<String>::cast(key), false,
5622 strict_mode_flag());
5623 // A test eax instruction following the store IC call would
5624 // indicate the presence of an inlined version of the
5625 // store. Add a nop to indicate that there is no such
5626 // inlined version.
5627 __ nop();
5628 } else {
5629 frame_->Drop(2);
5630 }
5631 break;
5632 }
5633 // Fall through
5634 }
5635 case ObjectLiteral::Property::PROTOTYPE: {
5636 // Duplicate the object as an argument to the runtime call.
5637 frame_->Dup();
5638 Load(property->key());
5639 Load(property->value());
5640 if (property->emit_store()) {
5641 frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
5642 // Ignore the result.
5643 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
5644 } else {
5645 frame_->Drop(3);
5646 }
5647 break;
5648 }
5649 case ObjectLiteral::Property::SETTER: {
5650 // Duplicate the object as an argument to the runtime call.
5651 frame_->Dup();
5652 Load(property->key());
5653 frame_->Push(Smi::FromInt(1));
5654 Load(property->value());
5655 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
5656 // Ignore the result.
5657 break;
5658 }
5659 case ObjectLiteral::Property::GETTER: {
5660 // Duplicate the object as an argument to the runtime call.
5661 frame_->Dup();
5662 Load(property->key());
5663 frame_->Push(Smi::FromInt(0));
5664 Load(property->value());
5665 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
5666 // Ignore the result.
5667 break;
5668 }
5669 default: UNREACHABLE();
5670 }
5671 }
5672 }
5673
5674
5675 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
5676 ASSERT(!in_safe_int32_mode());
5677 Comment cmnt(masm_, "[ ArrayLiteral");
5678
5679 // Load a writable copy of the function of this activation in a
5680 // register.
5681 frame_->PushFunction();
5682 Result literals = frame_->Pop();
5683 literals.ToRegister();
5684 frame_->Spill(literals.reg());
5685
5686 // Load the literals array of the function.
5687 __ mov(literals.reg(),
5688 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
5689
5690 frame_->Push(&literals);
5691 frame_->Push(Smi::FromInt(node->literal_index()));
5692 frame_->Push(node->constant_elements());
5693 int length = node->values()->length();
5694 Result clone;
5695 if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
5696 FastCloneShallowArrayStub stub(
5697 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
5698 clone = frame_->CallStub(&stub, 3);
5699 Counters* counters = masm()->isolate()->counters();
5700 __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
5701 } else if (node->depth() > 1) {
5702 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
5703 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
5704 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
5705 } else {
5706 FastCloneShallowArrayStub stub(
5707 FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
5708 clone = frame_->CallStub(&stub, 3);
5709 }
5710 frame_->Push(&clone);
5711
5712 // Generate code to set the elements in the array that are not
5713 // literals.
5714 for (int i = 0; i < length; i++) {
5715 Expression* value = node->values()->at(i);
5716
5717 if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
5718 continue;
5719 }
5720
5721 // The property must be set by generated code.
5722 Load(value);
5723
5724 // Get the property value off the stack.
5725 Result prop_value = frame_->Pop();
5726 prop_value.ToRegister();
5727
5728 // Fetch the array literal while leaving a copy on the stack and
5729 // use it to get the elements array.
5730 frame_->Dup();
5731 Result elements = frame_->Pop();
5732 elements.ToRegister();
5733 frame_->Spill(elements.reg());
5734 // Get the elements array.
5735 __ mov(elements.reg(),
5736 FieldOperand(elements.reg(), JSObject::kElementsOffset));
5737
5738 // Write to the indexed properties array.
5739 int offset = i * kPointerSize + FixedArray::kHeaderSize;
5740 __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
5741
5742 // Update the write barrier for the array address.
5743 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
5744 Result scratch = allocator_->Allocate();
5745 ASSERT(scratch.is_valid());
5746 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
5747 }
5748 }
5749
5750
5751 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
5752 ASSERT(!in_safe_int32_mode());
5753 ASSERT(!in_spilled_code());
5754 // Call runtime routine to allocate the catch extension object and
5755 // assign the exception value to the catch variable.
5756 Comment cmnt(masm_, "[ CatchExtensionObject");
5757 Load(node->key());
5758 Load(node->value());
5759 Result result =
5760 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
5761 frame_->Push(&result);
5762 }
5763
5764
5765 void CodeGenerator::EmitSlotAssignment(Assignment* node) {
5766 #ifdef DEBUG
5767 int original_height = frame()->height();
5768 #endif
5769 Comment cmnt(masm(), "[ Variable Assignment");
5770 Variable* var = node->target()->AsVariableProxy()->AsVariable();
5771 ASSERT(var != NULL);
5772 Slot* slot = var->AsSlot();
5773 ASSERT(slot != NULL);
5774
5775 // Evaluate the right-hand side.
5776 if (node->is_compound()) {
5777 // For a compound assignment the right-hand side is a binary operation
5778 // between the current property value and the actual right-hand side.
5779 LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
5780 Load(node->value());
5781
5782 // Perform the binary operation.
5783 bool overwrite_value = node->value()->ResultOverwriteAllowed();
5784 // Construct the implicit binary operation.
5785 BinaryOperation expr(node);
5786 GenericBinaryOperation(&expr,
5787 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
5788 } else {
5789 // For non-compound assignment just load the right-hand side.
5790 Load(node->value());
5791 }
5792
5793 // Perform the assignment.
5794 if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
5795 CodeForSourcePosition(node->position());
5796 StoreToSlot(slot,
5797 node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
5798 }
5799 ASSERT(frame()->height() == original_height + 1);
5800 }
5801
5802
5803 void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
5804 #ifdef DEBUG
5805 int original_height = frame()->height();
5806 #endif
5807 Comment cmnt(masm(), "[ Named Property Assignment");
5808 Variable* var = node->target()->AsVariableProxy()->AsVariable();
5809 Property* prop = node->target()->AsProperty();
5810 ASSERT(var == NULL || (prop == NULL && var->is_global()));
5811
5812 // Initialize name and evaluate the receiver sub-expression if necessary. If
5813 // the receiver is trivial it is not placed on the stack at this point, but
5814 // loaded whenever actually needed.
5815 Handle<String> name;
5816 bool is_trivial_receiver = false;
5817 if (var != NULL) {
5818 name = var->name();
5819 } else {
5820 Literal* lit = prop->key()->AsLiteral();
5821 ASSERT_NOT_NULL(lit);
5822 name = Handle<String>::cast(lit->handle());
5823 // Do not materialize the receiver on the frame if it is trivial.
5824 is_trivial_receiver = prop->obj()->IsTrivial();
5825 if (!is_trivial_receiver) Load(prop->obj());
5826 }
5827
5828 // Change to slow case in the beginning of an initialization block to
5829 // avoid the quadratic behavior of repeatedly adding fast properties.
5830 if (node->starts_initialization_block()) {
5831 // Initialization block consists of assignments of the form expr.x = ..., so
5832 // this will never be an assignment to a variable, so there must be a
5833 // receiver object.
5834 ASSERT_EQ(NULL, var);
5835 if (is_trivial_receiver) {
5836 frame()->Push(prop->obj());
5837 } else {
5838 frame()->Dup();
5839 }
5840 Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
5841 }
5842
5843 // Change to fast case at the end of an initialization block. To prepare for
5844 // that add an extra copy of the receiver to the frame, so that it can be
5845 // converted back to fast case after the assignment.
5846 if (node->ends_initialization_block() && !is_trivial_receiver) {
5847 frame()->Dup();
5848 }
5849
5850 // Stack layout:
5851 // [tos] : receiver (only materialized if non-trivial)
5852 // [tos+1] : receiver if at the end of an initialization block
5853
5854 // Evaluate the right-hand side.
5855 if (node->is_compound()) {
5856 // For a compound assignment the right-hand side is a binary operation
5857 // between the current property value and the actual right-hand side.
5858 if (is_trivial_receiver) {
5859 frame()->Push(prop->obj());
5860 } else if (var != NULL) {
5861 // The LoadIC stub expects the object in eax.
5862 // Freeing eax causes the code generator to load the global into it.
5863 frame_->Spill(eax);
5864 LoadGlobal();
5865 } else {
5866 frame()->Dup();
5867 }
5868 Result value = EmitNamedLoad(name, var != NULL);
5869 frame()->Push(&value);
5870 Load(node->value());
5871
5872 bool overwrite_value = node->value()->ResultOverwriteAllowed();
5873 // Construct the implicit binary operation.
5874 BinaryOperation expr(node);
5875 GenericBinaryOperation(&expr,
5876 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
5877 } else {
5878 // For non-compound assignment just load the right-hand side.
5879 Load(node->value());
5880 }
5881
5882 // Stack layout:
5883 // [tos] : value
5884 // [tos+1] : receiver (only materialized if non-trivial)
5885 // [tos+2] : receiver if at the end of an initialization block
5886
5887 // Perform the assignment. It is safe to ignore constants here.
5888 ASSERT(var == NULL || var->mode() != Variable::CONST);
5889 ASSERT_NE(Token::INIT_CONST, node->op());
5890 if (is_trivial_receiver) {
5891 Result value = frame()->Pop();
5892 frame()->Push(prop->obj());
5893 frame()->Push(&value);
5894 }
5895 CodeForSourcePosition(node->position());
5896 bool is_contextual = (var != NULL);
5897 Result answer = EmitNamedStore(name, is_contextual);
5898 frame()->Push(&answer);
5899
5900 // Stack layout:
5901 // [tos] : result
5902 // [tos+1] : receiver if at the end of an initialization block
5903
5904 if (node->ends_initialization_block()) {
5905 ASSERT_EQ(NULL, var);
5906 // The argument to the runtime call is the receiver.
5907 if (is_trivial_receiver) {
5908 frame()->Push(prop->obj());
5909 } else {
5910 // A copy of the receiver is below the value of the assignment. Swap
5911 // the receiver and the value of the assignment expression.
5912 Result result = frame()->Pop();
5913 Result receiver = frame()->Pop();
5914 frame()->Push(&result);
5915 frame()->Push(&receiver);
5916 }
5917 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
5918 }
5919
5920 // Stack layout:
5921 // [tos] : result
5922
5923 ASSERT_EQ(frame()->height(), original_height + 1);
5924 }
5925
5926
5927 void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
5928 #ifdef DEBUG
5929 int original_height = frame()->height();
5930 #endif
5931 Comment cmnt(masm_, "[ Keyed Property Assignment");
5932 Property* prop = node->target()->AsProperty();
5933 ASSERT_NOT_NULL(prop);
5934
5935 // Evaluate the receiver subexpression.
5936 Load(prop->obj());
5937
5938 // Change to slow case in the beginning of an initialization block to
5939 // avoid the quadratic behavior of repeatedly adding fast properties.
5940 if (node->starts_initialization_block()) {
5941 frame_->Dup();
5942 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
5943 }
5944
5945 // Change to fast case at the end of an initialization block. To prepare for
5946 // that add an extra copy of the receiver to the frame, so that it can be
5947 // converted back to fast case after the assignment.
5948 if (node->ends_initialization_block()) {
5949 frame_->Dup();
5950 }
5951
5952 // Evaluate the key subexpression.
5953 Load(prop->key());
5954
5955 // Stack layout:
5956 // [tos] : key
5957 // [tos+1] : receiver
5958 // [tos+2] : receiver if at the end of an initialization block
5959
5960 // Evaluate the right-hand side.
5961 if (node->is_compound()) {
5962 // For a compound assignment the right-hand side is a binary operation
5963 // between the current property value and the actual right-hand side.
5964 // Duplicate receiver and key for loading the current property value.
5965 frame()->PushElementAt(1);
5966 frame()->PushElementAt(1);
5967 Result value = EmitKeyedLoad();
5968 frame()->Push(&value);
5969 Load(node->value());
5970
5971 // Perform the binary operation.
5972 bool overwrite_value = node->value()->ResultOverwriteAllowed();
5973 BinaryOperation expr(node);
5974 GenericBinaryOperation(&expr,
5975 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
5976 } else {
5977 // For non-compound assignment just load the right-hand side.
5978 Load(node->value());
5979 }
5980
5981 // Stack layout:
5982 // [tos] : value
5983 // [tos+1] : key
5984 // [tos+2] : receiver
5985 // [tos+3] : receiver if at the end of an initialization block
5986
5987 // Perform the assignment. It is safe to ignore constants here.
5988 ASSERT(node->op() != Token::INIT_CONST);
5989 CodeForSourcePosition(node->position());
5990 Result answer = EmitKeyedStore(prop->key()->type());
5991 frame()->Push(&answer);
5992
5993 // Stack layout:
5994 // [tos] : result
5995 // [tos+1] : receiver if at the end of an initialization block
5996
5997 // Change to fast case at the end of an initialization block.
5998 if (node->ends_initialization_block()) {
5999 // The argument to the runtime call is the extra copy of the receiver,
6000 // which is below the value of the assignment. Swap the receiver and
6001 // the value of the assignment expression.
6002 Result result = frame()->Pop();
6003 Result receiver = frame()->Pop();
6004 frame()->Push(&result);
6005 frame()->Push(&receiver);
6006 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
6007 }
6008
6009 // Stack layout:
6010 // [tos] : result
6011
6012 ASSERT(frame()->height() == original_height + 1);
6013 }
6014
6015
6016 void CodeGenerator::VisitAssignment(Assignment* node) {
6017 ASSERT(!in_safe_int32_mode());
6018 #ifdef DEBUG
6019 int original_height = frame()->height();
6020 #endif
6021 Variable* var = node->target()->AsVariableProxy()->AsVariable();
6022 Property* prop = node->target()->AsProperty();
6023
6024 if (var != NULL && !var->is_global()) {
6025 EmitSlotAssignment(node);
6026
6027 } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
6028 (var != NULL && var->is_global())) {
6029 // Properties whose keys are property names and global variables are
6030 // treated as named property references. We do not need to consider
6031 // global 'this' because it is not a valid left-hand side.
6032 EmitNamedPropertyAssignment(node);
6033
6034 } else if (prop != NULL) {
6035 // Other properties (including rewritten parameters for a function that
6036 // uses arguments) are keyed property assignments.
6037 EmitKeyedPropertyAssignment(node);
6038
6039 } else {
6040 // Invalid left-hand side.
6041 Load(node->target());
6042 Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
6043 // The runtime call doesn't actually return but the code generator will
6044 // still generate code and expects a certain frame height.
6045 frame()->Push(&result);
6046 }
6047
6048 ASSERT(frame()->height() == original_height + 1);
6049 }
6050
6051
6052 void CodeGenerator::VisitThrow(Throw* node) {
6053 ASSERT(!in_safe_int32_mode());
6054 Comment cmnt(masm_, "[ Throw");
6055 Load(node->exception());
6056 Result result = frame_->CallRuntime(Runtime::kThrow, 1);
6057 frame_->Push(&result);
6058 }
6059
6060
6061 void CodeGenerator::VisitProperty(Property* node) {
6062 ASSERT(!in_safe_int32_mode());
6063 Comment cmnt(masm_, "[ Property");
6064 Reference property(this, node);
6065 property.GetValue();
6066 }
6067
6068
6069 void CodeGenerator::VisitCall(Call* node) {
6070 ASSERT(!in_safe_int32_mode());
6071 Comment cmnt(masm_, "[ Call");
6072
6073 Expression* function = node->expression();
6074 ZoneList<Expression*>* args = node->arguments();
6075
6076 // Check if the function is a variable or a property.
6077 Variable* var = function->AsVariableProxy()->AsVariable();
6078 Property* property = function->AsProperty();
6079
6080 // ------------------------------------------------------------------------
6081 // Fast-case: Use inline caching.
6082 // ---
6083 // According to ECMA-262, section 11.2.3, page 44, the function to call
6084 // must be resolved after the arguments have been evaluated. The IC code
6085 // automatically handles this by loading the arguments before the function
6086 // is resolved in cache misses (this also holds for megamorphic calls).
6087 // ------------------------------------------------------------------------
6088
6089 if (var != NULL && var->is_possibly_eval()) {
6090 // ----------------------------------
6091 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
6092 // ----------------------------------
6093
6094 // In a call to eval, we first call %ResolvePossiblyDirectEval to
6095 // resolve the function we need to call and the receiver of the
6096 // call. Then we call the resolved function using the given
6097 // arguments.
6098
6099 // Prepare the stack for the call to the resolved function.
6100 Load(function);
6101
6102 // Allocate a frame slot for the receiver.
6103 frame_->Push(FACTORY->undefined_value());
6104
6105 // Load the arguments.
6106 int arg_count = args->length();
6107 for (int i = 0; i < arg_count; i++) {
6108 Load(args->at(i));
6109 frame_->SpillTop();
6110 }
6111
6112 // Result to hold the result of the function resolution and the
6113 // final result of the eval call.
6114 Result result;
6115
6116 // If we know that eval can only be shadowed by eval-introduced
6117 // variables we attempt to load the global eval function directly
6118 // in generated code. If we succeed, there is no need to perform a
6119 // context lookup in the runtime system.
6120 JumpTarget done;
6121 if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
6122 ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
6123 JumpTarget slow;
6124 // Prepare the stack for the call to
6125 // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
6126 // function, the first argument to the eval call and the
6127 // receiver.
6128 Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
6129 NOT_INSIDE_TYPEOF,
6130 &slow);
6131 frame_->Push(&fun);
6132 if (arg_count > 0) {
6133 frame_->PushElementAt(arg_count);
6134 } else {
6135 frame_->Push(FACTORY->undefined_value());
6136 }
6137 frame_->PushParameterAt(-1);
6138
6139 // Push the strict mode flag.
6140 frame_->Push(Smi::FromInt(strict_mode_flag()));
6141
6142 // Resolve the call.
6143 result =
6144 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
6145
6146 done.Jump(&result);
6147 slow.Bind();
6148 }
6149
6150 // Prepare the stack for the call to ResolvePossiblyDirectEval by
6151 // pushing the loaded function, the first argument to the eval
6152 // call and the receiver.
6153 frame_->PushElementAt(arg_count + 1);
6154 if (arg_count > 0) {
6155 frame_->PushElementAt(arg_count);
6156 } else {
6157 frame_->Push(FACTORY->undefined_value());
6158 }
6159 frame_->PushParameterAt(-1);
6160
6161 // Push the strict mode flag.
6162 frame_->Push(Smi::FromInt(strict_mode_flag()));
6163
6164 // Resolve the call.
6165 result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
6166
6167 // If we generated fast-case code bind the jump-target where fast
6168 // and slow case merge.
6169 if (done.is_linked()) done.Bind(&result);
6170
6171 // The runtime call returns a pair of values in eax (function) and
6172 // edx (receiver). Touch up the stack with the right values.
6173 Result receiver = allocator_->Allocate(edx);
6174 frame_->SetElementAt(arg_count + 1, &result);
6175 frame_->SetElementAt(arg_count, &receiver);
6176 receiver.Unuse();
6177
6178 // Call the function.
6179 CodeForSourcePosition(node->position());
6180 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
6181 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
6182 result = frame_->CallStub(&call_function, arg_count + 1);
6183
6184 // Restore the context and overwrite the function on the stack with
6185 // the result.
6186 frame_->RestoreContextRegister();
6187 frame_->SetElementAt(0, &result);
6188
6189 } else if (var != NULL && !var->is_this() && var->is_global()) {
6190 // ----------------------------------
6191 // JavaScript example: 'foo(1, 2, 3)' // foo is global
6192 // ----------------------------------
6193
6194 // Pass the global object as the receiver and let the IC stub
6195 // patch the stack to use the global proxy as 'this' in the
6196 // invoked function.
6197 LoadGlobal();
6198
6199 // Load the arguments.
6200 int arg_count = args->length();
6201 for (int i = 0; i < arg_count; i++) {
6202 Load(args->at(i));
6203 frame_->SpillTop();
6204 }
6205
6206 // Push the name of the function onto the frame.
6207 frame_->Push(var->name());
6208
6209 // Call the IC initialization code.
6210 CodeForSourcePosition(node->position());
6211 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
6212 arg_count,
6213 loop_nesting());
6214 frame_->RestoreContextRegister();
6215 frame_->Push(&result);
6216
6217 } else if (var != NULL && var->AsSlot() != NULL &&
6218 var->AsSlot()->type() == Slot::LOOKUP) {
6219 // ----------------------------------
6220 // JavaScript examples:
6221 //
6222 // with (obj) foo(1, 2, 3) // foo may be in obj.
6223 //
6224 // function f() {};
6225 // function g() {
6226 // eval(...);
6227 // f(); // f could be in extension object.
6228 // }
6229 // ----------------------------------
6230
6231 JumpTarget slow, done;
6232 Result function;
6233
6234 // Generate fast case for loading functions from slots that
6235 // correspond to local/global variables or arguments unless they
6236 // are shadowed by eval-introduced bindings.
6237 EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
6238 NOT_INSIDE_TYPEOF,
6239 &function,
6240 &slow,
6241 &done);
6242
6243 slow.Bind();
6244 // Enter the runtime system to load the function from the context.
6245 // Sync the frame so we can push the arguments directly into
6246 // place.
6247 frame_->SyncRange(0, frame_->element_count() - 1);
6248 frame_->EmitPush(esi);
6249 frame_->EmitPush(Immediate(var->name()));
6250 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
6251 // The runtime call returns a pair of values in eax and edx. The
6252 // looked-up function is in eax and the receiver is in edx. These
6253 // register references are not ref counted here. We spill them
6254 // eagerly since they are arguments to an inevitable call (and are
6255 // not sharable by the arguments).
6256 ASSERT(!allocator()->is_used(eax));
6257 frame_->EmitPush(eax);
6258
6259 // Load the receiver.
6260 ASSERT(!allocator()->is_used(edx));
6261 frame_->EmitPush(edx);
6262
6263 // If fast case code has been generated, emit code to push the
6264 // function and receiver and have the slow path jump around this
6265 // code.
6266 if (done.is_linked()) {
6267 JumpTarget call;
6268 call.Jump();
6269 done.Bind(&function);
6270 frame_->Push(&function);
6271 LoadGlobalReceiver();
6272 call.Bind();
6273 }
6274
6275 // Call the function.
6276 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
6277
6278 } else if (property != NULL) {
6279 // Check if the key is a literal string.
6280 Literal* literal = property->key()->AsLiteral();
6281
6282 if (literal != NULL && literal->handle()->IsSymbol()) {
6283 // ------------------------------------------------------------------
6284 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
6285 // ------------------------------------------------------------------
6286
6287 Handle<String> name = Handle<String>::cast(literal->handle());
6288
6289 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
6290 name->IsEqualTo(CStrVector("apply")) &&
6291 args->length() == 2 &&
6292 args->at(1)->AsVariableProxy() != NULL &&
6293 args->at(1)->AsVariableProxy()->IsArguments()) {
6294 // Use the optimized Function.prototype.apply that avoids
6295 // allocating lazily allocated arguments objects.
6296 CallApplyLazy(property->obj(),
6297 args->at(0),
6298 args->at(1)->AsVariableProxy(),
6299 node->position());
6300
6301 } else {
6302 // Push the receiver onto the frame.
6303 Load(property->obj());
6304
6305 // Load the arguments.
6306 int arg_count = args->length();
6307 for (int i = 0; i < arg_count; i++) {
6308 Load(args->at(i));
6309 frame_->SpillTop();
6310 }
6311
6312 // Push the name of the function onto the frame.
6313 frame_->Push(name);
6314
6315 // Call the IC initialization code.
6316 CodeForSourcePosition(node->position());
6317 Result result =
6318 frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
6319 loop_nesting());
6320 frame_->RestoreContextRegister();
6321 frame_->Push(&result);
6322 }
6323
6324 } else {
6325 // -------------------------------------------
6326 // JavaScript example: 'array[index](1, 2, 3)'
6327 // -------------------------------------------
6328
6329 // Load the function to call from the property through a reference.
6330
6331 // Pass receiver to called function.
6332 if (property->is_synthetic()) {
6333 Reference ref(this, property);
6334 ref.GetValue();
6335 // Use global object as receiver.
6336 LoadGlobalReceiver();
6337 // Call the function.
6338 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
6339 } else {
6340 // Push the receiver onto the frame.
6341 Load(property->obj());
6342
6343 // Load the name of the function.
6344 Load(property->key());
6345
6346 // Swap the name of the function and the receiver on the stack to follow
6347 // the calling convention for call ICs.
6348 Result key = frame_->Pop();
6349 Result receiver = frame_->Pop();
6350 frame_->Push(&key);
6351 frame_->Push(&receiver);
6352 key.Unuse();
6353 receiver.Unuse();
6354
6355 // Load the arguments.
6356 int arg_count = args->length();
6357 for (int i = 0; i < arg_count; i++) {
6358 Load(args->at(i));
6359 frame_->SpillTop();
6360 }
6361
6362 // Place the key on top of stack and call the IC initialization code.
6363 frame_->PushElementAt(arg_count + 1);
6364 CodeForSourcePosition(node->position());
6365 Result result =
6366 frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
6367 arg_count,
6368 loop_nesting());
6369 frame_->Drop(); // Drop the key still on the stack.
6370 frame_->RestoreContextRegister();
6371 frame_->Push(&result);
6372 }
6373 }
6374
6375 } else {
6376 // ----------------------------------
6377 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
6378 // ----------------------------------
6379
6380 // Load the function.
6381 Load(function);
6382
6383 // Pass the global proxy as the receiver.
6384 LoadGlobalReceiver();
6385
6386 // Call the function.
6387 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
6388 }
6389 }
6390
6391
6392 void CodeGenerator::VisitCallNew(CallNew* node) {
6393 ASSERT(!in_safe_int32_mode());
6394 Comment cmnt(masm_, "[ CallNew");
6395
6396 // According to ECMA-262, section 11.2.2, page 44, the function
6397 // expression in new calls must be evaluated before the
6398 // arguments. This is different from ordinary calls, where the
6399 // actual function to call is resolved after the arguments have been
6400 // evaluated.
6401
6402 // Push constructor on the stack. If it's not a function it's used as
6403 // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
6404 // ignored.
6405 Load(node->expression());
6406
6407 // Push the arguments ("left-to-right") on the stack.
6408 ZoneList<Expression*>* args = node->arguments();
6409 int arg_count = args->length();
6410 for (int i = 0; i < arg_count; i++) {
6411 Load(args->at(i));
6412 }
6413
6414 // Call the construct call builtin that handles allocation and
6415 // constructor invocation.
6416 CodeForSourcePosition(node->position());
6417 Result result = frame_->CallConstructor(arg_count);
6418 frame_->Push(&result);
6419 }
6420
6421
6422 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
6423 ASSERT(args->length() == 1);
6424 Load(args->at(0));
6425 Result value = frame_->Pop();
6426 value.ToRegister();
6427 ASSERT(value.is_valid());
6428 __ test(value.reg(), Immediate(kSmiTagMask));
6429 value.Unuse();
6430 destination()->Split(zero);
6431 }
6432
6433
6434 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
6435 // Conditionally generate a log call.
6436 // Args:
6437 // 0 (literal string): The type of logging (corresponds to the flags).
6438 // This is used to determine whether or not to generate the log call.
6439 // 1 (string): Format string. Access the string at argument index 2
6440 // with '%2s' (see Logger::LogRuntime for all the formats).
6441 // 2 (array): Arguments to the format string.
6442 ASSERT_EQ(args->length(), 3);
6443 #ifdef ENABLE_LOGGING_AND_PROFILING
6444 if (ShouldGenerateLog(args->at(0))) {
6445 Load(args->at(1));
6446 Load(args->at(2));
6447 frame_->CallRuntime(Runtime::kLog, 2);
6448 }
6449 #endif
6450 // Finally, we're expected to leave a value on the top of the stack.
6451 frame_->Push(FACTORY->undefined_value());
6452 }
6453
6454
6455 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
6456 ASSERT(args->length() == 1);
6457 Load(args->at(0));
6458 Result value = frame_->Pop();
6459 value.ToRegister();
6460 ASSERT(value.is_valid());
6461 __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
6462 value.Unuse();
6463 destination()->Split(zero);
6464 }
6465
6466
6467 class DeferredStringCharCodeAt : public DeferredCode {
6468 public:
6469 DeferredStringCharCodeAt(Register object,
6470 Register index,
6471 Register scratch,
6472 Register result)
6473 : result_(result),
6474 char_code_at_generator_(object,
6475 index,
6476 scratch,
6477 result,
6478 &need_conversion_,
6479 &need_conversion_,
6480 &index_out_of_range_,
6481 STRING_INDEX_IS_NUMBER) {}
6482
6483 StringCharCodeAtGenerator* fast_case_generator() {
6484 return &char_code_at_generator_;
6485 }
6486
6487 virtual void Generate() {
6488 VirtualFrameRuntimeCallHelper call_helper(frame_state());
6489 char_code_at_generator_.GenerateSlow(masm(), call_helper);
6490
6491 __ bind(&need_conversion_);
6492 // Move the undefined value into the result register, which will
6493 // trigger conversion.
6494 __ Set(result_, Immediate(FACTORY->undefined_value()));
6495 __ jmp(exit_label());
6496
6497 __ bind(&index_out_of_range_);
6498 // When the index is out of range, the spec requires us to return
6499 // NaN.
6500 __ Set(result_, Immediate(FACTORY->nan_value()));
6501 __ jmp(exit_label());
6502 }
6503
6504 private:
6505 Register result_;
6506
6507 Label need_conversion_;
6508 Label index_out_of_range_;
6509
6510 StringCharCodeAtGenerator char_code_at_generator_;
6511 };
6512
6513
6514 // This generates code that performs a String.prototype.charCodeAt() call
6515 // or returns a smi in order to trigger conversion.
6516 void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
6517 Comment(masm_, "[ GenerateStringCharCodeAt");
6518 ASSERT(args->length() == 2);
6519
6520 Load(args->at(0));
6521 Load(args->at(1));
6522 Result index = frame_->Pop();
6523 Result object = frame_->Pop();
6524 object.ToRegister();
6525 index.ToRegister();
6526 // We might mutate the object register.
6527 frame_->Spill(object.reg());
6528
6529 // We need two extra registers.
6530 Result result = allocator()->Allocate();
6531 ASSERT(result.is_valid());
6532 Result scratch = allocator()->Allocate();
6533 ASSERT(scratch.is_valid());
6534
6535 DeferredStringCharCodeAt* deferred =
6536 new DeferredStringCharCodeAt(object.reg(),
6537 index.reg(),
6538 scratch.reg(),
6539 result.reg());
6540 deferred->fast_case_generator()->GenerateFast(masm_);
6541 deferred->BindExit();
6542 frame_->Push(&result);
6543 }
6544
6545
6546 class DeferredStringCharFromCode : public DeferredCode {
6547 public:
6548 DeferredStringCharFromCode(Register code,
6549 Register result)
6550 : char_from_code_generator_(code, result) {}
6551
6552 StringCharFromCodeGenerator* fast_case_generator() {
6553 return &char_from_code_generator_;
6554 }
6555
6556 virtual void Generate() {
6557 VirtualFrameRuntimeCallHelper call_helper(frame_state());
6558 char_from_code_generator_.GenerateSlow(masm(), call_helper);
6559 }
6560
6561 private:
6562 StringCharFromCodeGenerator char_from_code_generator_;
6563 };
6564
6565
6566 // Generates code for creating a one-char string from a char code.
6567 void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
6568 Comment(masm_, "[ GenerateStringCharFromCode");
6569 ASSERT(args->length() == 1);
6570
6571 Load(args->at(0));
6572
6573 Result code = frame_->Pop();
6574 code.ToRegister();
6575 ASSERT(code.is_valid());
6576
6577 Result result = allocator()->Allocate();
6578 ASSERT(result.is_valid());
6579
6580 DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
6581 code.reg(), result.reg());
6582 deferred->fast_case_generator()->GenerateFast(masm_);
6583 deferred->BindExit();
6584 frame_->Push(&result);
6585 }
6586
6587
6588 class DeferredStringCharAt : public DeferredCode {
6589 public:
6590 DeferredStringCharAt(Register object,
6591 Register index,
6592 Register scratch1,
6593 Register scratch2,
6594 Register result)
6595 : result_(result),
6596 char_at_generator_(object,
6597 index,
6598 scratch1,
6599 scratch2,
6600 result,
6601 &need_conversion_,
6602 &need_conversion_,
6603 &index_out_of_range_,
6604 STRING_INDEX_IS_NUMBER) {}
6605
6606 StringCharAtGenerator* fast_case_generator() {
6607 return &char_at_generator_;
6608 }
6609
6610 virtual void Generate() {
6611 VirtualFrameRuntimeCallHelper call_helper(frame_state());
6612 char_at_generator_.GenerateSlow(masm(), call_helper);
6613
6614 __ bind(&need_conversion_);
6615 // Move smi zero into the result register, which will trigger
6616 // conversion.
6617 __ Set(result_, Immediate(Smi::FromInt(0)));
6618 __ jmp(exit_label());
6619
6620 __ bind(&index_out_of_range_);
6621 // When the index is out of range, the spec requires us to return
6622 // the empty string.
6623 __ Set(result_, Immediate(FACTORY->empty_string()));
6624 __ jmp(exit_label());
6625 }
6626
6627 private:
6628 Register result_;
6629
6630 Label need_conversion_;
6631 Label index_out_of_range_;
6632
6633 StringCharAtGenerator char_at_generator_;
6634 };
6635
6636
6637 // This generates code that performs a String.prototype.charAt() call
6638 // or returns a smi in order to trigger conversion.
6639 void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
6640 Comment(masm_, "[ GenerateStringCharAt");
6641 ASSERT(args->length() == 2);
6642
6643 Load(args->at(0));
6644 Load(args->at(1));
6645 Result index = frame_->Pop();
6646 Result object = frame_->Pop();
6647 object.ToRegister();
6648 index.ToRegister();
6649 // We might mutate the object register.
6650 frame_->Spill(object.reg());
6651
6652 // We need three extra registers.
6653 Result result = allocator()->Allocate();
6654 ASSERT(result.is_valid());
6655 Result scratch1 = allocator()->Allocate();
6656 ASSERT(scratch1.is_valid());
6657 Result scratch2 = allocator()->Allocate();
6658 ASSERT(scratch2.is_valid());
6659
6660 DeferredStringCharAt* deferred =
6661 new DeferredStringCharAt(object.reg(),
6662 index.reg(),
6663 scratch1.reg(),
6664 scratch2.reg(),
6665 result.reg());
6666 deferred->fast_case_generator()->GenerateFast(masm_);
6667 deferred->BindExit();
6668 frame_->Push(&result);
6669 }
6670
6671
6672 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
6673 ASSERT(args->length() == 1);
6674 Load(args->at(0));
6675 Result value = frame_->Pop();
6676 value.ToRegister();
6677 ASSERT(value.is_valid());
6678 __ test(value.reg(), Immediate(kSmiTagMask));
6679 destination()->false_target()->Branch(equal);
6680 // It is a heap object - get map.
6681 Result temp = allocator()->Allocate();
6682 ASSERT(temp.is_valid());
6683 // Check if the object is a JS array or not.
6684 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
6685 value.Unuse();
6686 temp.Unuse();
6687 destination()->Split(equal);
6688 }
6689
6690
6691 void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
6692 Label bailout, done, one_char_separator, long_separator,
6693 non_trivial_array, not_size_one_array, loop, loop_condition,
6694 loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
6695
6696 ASSERT(args->length() == 2);
6697 // We will leave the separator on the stack until the end of the function.
6698 Load(args->at(1));
6699 // Load this to eax (= array)
6700 Load(args->at(0));
6701 Result array_result = frame_->Pop();
6702 array_result.ToRegister(eax);
6703 frame_->SpillAll();
6704
6705 // All aliases of the same register have disjoint lifetimes.
6706 Register array = eax;
6707 Register elements = no_reg; // Will be eax.
6708
6709 Register index = edx;
6710
6711 Register string_length = ecx;
6712
6713 Register string = esi;
6714
6715 Register scratch = ebx;
6716
6717 Register array_length = edi;
6718 Register result_pos = no_reg; // Will be edi.
6719
6720 // Separator operand is already pushed.
6721 Operand separator_operand = Operand(esp, 2 * kPointerSize);
6722 Operand result_operand = Operand(esp, 1 * kPointerSize);
6723 Operand array_length_operand = Operand(esp, 0);
6724 __ sub(Operand(esp), Immediate(2 * kPointerSize));
6725 __ cld();
6726 // Check that the array is a JSArray
6727 __ test(array, Immediate(kSmiTagMask));
6728 __ j(zero, &bailout);
6729 __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
6730 __ j(not_equal, &bailout);
6731
6732 // Check that the array has fast elements.
6733 __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
6734 1 << Map::kHasFastElements);
6735 __ j(zero, &bailout);
6736
6737 // If the array has length zero, return the empty string.
6738 __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
6739 __ sar(array_length, 1);
6740 __ j(not_zero, &non_trivial_array);
6741 __ mov(result_operand, FACTORY->empty_string());
6742 __ jmp(&done);
6743
6744 // Save the array length.
6745 __ bind(&non_trivial_array);
6746 __ mov(array_length_operand, array_length);
6747
6748 // Save the FixedArray containing array's elements.
6749 // End of array's live range.
6750 elements = array;
6751 __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
6752 array = no_reg;
6753
6754
6755 // Check that all array elements are sequential ASCII strings, and
6756 // accumulate the sum of their lengths, as a smi-encoded value.
6757 __ Set(index, Immediate(0));
6758 __ Set(string_length, Immediate(0));
6759 // Loop condition: while (index < length).
6760 // Live loop registers: index, array_length, string,
6761 // scratch, string_length, elements.
6762 __ jmp(&loop_condition);
6763 __ bind(&loop);
6764 __ cmp(index, Operand(array_length));
6765 __ j(greater_equal, &done);
6766
6767 __ mov(string, FieldOperand(elements, index,
6768 times_pointer_size,
6769 FixedArray::kHeaderSize));
6770 __ test(string, Immediate(kSmiTagMask));
6771 __ j(zero, &bailout);
6772 __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
6773 __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
6774 __ and_(scratch, Immediate(
6775 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
6776 __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
6777 __ j(not_equal, &bailout);
6778 __ add(string_length,
6779 FieldOperand(string, SeqAsciiString::kLengthOffset));
6780 __ j(overflow, &bailout);
6781 __ add(Operand(index), Immediate(1));
6782 __ bind(&loop_condition);
6783 __ cmp(index, Operand(array_length));
6784 __ j(less, &loop);
6785
6786 // If array_length is 1, return elements[0], a string.
6787 __ cmp(array_length, 1);
6788 __ j(not_equal, &not_size_one_array);
6789 __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
6790 __ mov(result_operand, scratch);
6791 __ jmp(&done);
6792
6793 __ bind(&not_size_one_array);
6794
6795 // End of array_length live range.
6796 result_pos = array_length;
6797 array_length = no_reg;
6798
6799 // Live registers:
6800 // string_length: Sum of string lengths, as a smi.
6801 // elements: FixedArray of strings.
6802
6803 // Check that the separator is a flat ASCII string.
6804 __ mov(string, separator_operand);
6805 __ test(string, Immediate(kSmiTagMask));
6806 __ j(zero, &bailout);
6807 __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
6808 __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
6809 __ and_(scratch, Immediate(
6810 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
6811 __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
6812 __ j(not_equal, &bailout);
6813
6814 // Add (separator length times array_length) - separator length
6815 // to string_length.
6816 __ mov(scratch, separator_operand);
6817 __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
6818 __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
6819 __ imul(scratch, array_length_operand);
6820 __ j(overflow, &bailout);
6821 __ add(string_length, Operand(scratch));
6822 __ j(overflow, &bailout);
6823
6824 __ shr(string_length, 1);
6825 // Live registers and stack values:
6826 // string_length
6827 // elements
6828 __ AllocateAsciiString(result_pos, string_length, scratch,
6829 index, string, &bailout);
6830 __ mov(result_operand, result_pos);
6831 __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
6832
6833
6834 __ mov(string, separator_operand);
6835 __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
6836 Immediate(Smi::FromInt(1)));
6837 __ j(equal, &one_char_separator);
6838 __ j(greater, &long_separator);
6839
6840
6841 // Empty separator case
6842 __ mov(index, Immediate(0));
6843 __ jmp(&loop_1_condition);
6844 // Loop condition: while (index < length).
6845 __ bind(&loop_1);
6846 // Each iteration of the loop concatenates one string to the result.
6847 // Live values in registers:
6848 // index: which element of the elements array we are adding to the result.
6849 // result_pos: the position to which we are currently copying characters.
6850 // elements: the FixedArray of strings we are joining.
6851
6852 // Get string = array[index].
6853 __ mov(string, FieldOperand(elements, index,
6854 times_pointer_size,
6855 FixedArray::kHeaderSize));
6856 __ mov(string_length,
6857 FieldOperand(string, String::kLengthOffset));
6858 __ shr(string_length, 1);
6859 __ lea(string,
6860 FieldOperand(string, SeqAsciiString::kHeaderSize));
6861 __ CopyBytes(string, result_pos, string_length, scratch);
6862 __ add(Operand(index), Immediate(1));
6863 __ bind(&loop_1_condition);
6864 __ cmp(index, array_length_operand);
6865 __ j(less, &loop_1); // End while (index < length).
6866 __ jmp(&done);
6867
6868
6869
6870 // One-character separator case
6871 __ bind(&one_char_separator);
6872 // Replace separator with its ascii character value.
6873 __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
6874 __ mov_b(separator_operand, scratch);
6875
6876 __ Set(index, Immediate(0));
6877 // Jump into the loop after the code that copies the separator, so the first
6878 // element is not preceded by a separator
6879 __ jmp(&loop_2_entry);
6880 // Loop condition: while (index < length).
6881 __ bind(&loop_2);
6882 // Each iteration of the loop concatenates one string to the result.
6883 // Live values in registers:
6884 // index: which element of the elements array we are adding to the result.
6885 // result_pos: the position to which we are currently copying characters.
6886
6887 // Copy the separator character to the result.
6888 __ mov_b(scratch, separator_operand);
6889 __ mov_b(Operand(result_pos, 0), scratch);
6890 __ inc(result_pos);
6891
6892 __ bind(&loop_2_entry);
6893 // Get string = array[index].
6894 __ mov(string, FieldOperand(elements, index,
6895 times_pointer_size,
6896 FixedArray::kHeaderSize));
6897 __ mov(string_length,
6898 FieldOperand(string, String::kLengthOffset));
6899 __ shr(string_length, 1);
6900 __ lea(string,
6901 FieldOperand(string, SeqAsciiString::kHeaderSize));
6902 __ CopyBytes(string, result_pos, string_length, scratch);
6903 __ add(Operand(index), Immediate(1));
6904
6905 __ cmp(index, array_length_operand);
6906 __ j(less, &loop_2); // End while (index < length).
6907 __ jmp(&done);
6908
6909
6910 // Long separator case (separator is more than one character).
6911 __ bind(&long_separator);
6912
6913 __ Set(index, Immediate(0));
6914 // Jump into the loop after the code that copies the separator, so the first
6915 // element is not preceded by a separator
6916 __ jmp(&loop_3_entry);
6917 // Loop condition: while (index < length).
6918 __ bind(&loop_3);
6919 // Each iteration of the loop concatenates one string to the result.
6920 // Live values in registers:
6921 // index: which element of the elements array we are adding to the result.
6922 // result_pos: the position to which we are currently copying characters.
6923
6924 // Copy the separator to the result.
6925 __ mov(string, separator_operand);
6926 __ mov(string_length,
6927 FieldOperand(string, String::kLengthOffset));
6928 __ shr(string_length, 1);
6929 __ lea(string,
6930 FieldOperand(string, SeqAsciiString::kHeaderSize));
6931 __ CopyBytes(string, result_pos, string_length, scratch);
6932
6933 __ bind(&loop_3_entry);
6934 // Get string = array[index].
6935 __ mov(string, FieldOperand(elements, index,
6936 times_pointer_size,
6937 FixedArray::kHeaderSize));
6938 __ mov(string_length,
6939 FieldOperand(string, String::kLengthOffset));
6940 __ shr(string_length, 1);
6941 __ lea(string,
6942 FieldOperand(string, SeqAsciiString::kHeaderSize));
6943 __ CopyBytes(string, result_pos, string_length, scratch);
6944 __ add(Operand(index), Immediate(1));
6945
6946 __ cmp(index, array_length_operand);
6947 __ j(less, &loop_3); // End while (index < length).
6948 __ jmp(&done);
6949
6950
6951 __ bind(&bailout);
6952 __ mov(result_operand, FACTORY->undefined_value());
6953 __ bind(&done);
6954 __ mov(eax, result_operand);
6955 // Drop temp values from the stack, and restore context register.
6956 __ add(Operand(esp), Immediate(2 * kPointerSize));
6957
6958 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
6959 frame_->Drop(1);
6960 frame_->Push(&array_result);
6961 }
6962
6963
6964 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
6965 ASSERT(args->length() == 1);
6966 Load(args->at(0));
6967 Result value = frame_->Pop();
6968 value.ToRegister();
6969 ASSERT(value.is_valid());
6970 __ test(value.reg(), Immediate(kSmiTagMask));
6971 destination()->false_target()->Branch(equal);
6972 // It is a heap object - get map.
6973 Result temp = allocator()->Allocate();
6974 ASSERT(temp.is_valid());
6975 // Check if the object is a regexp.
6976 __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, temp.reg());
6977 value.Unuse();
6978 temp.Unuse();
6979 destination()->Split(equal);
6980 }
6981
6982
6983 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
6984 // This generates a fast version of:
6985 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
6986 ASSERT(args->length() == 1);
6987 Load(args->at(0));
6988 Result obj = frame_->Pop();
6989 obj.ToRegister();
6990
6991 __ test(obj.reg(), Immediate(kSmiTagMask));
6992 destination()->false_target()->Branch(zero);
6993 __ cmp(obj.reg(), FACTORY->null_value());
6994 destination()->true_target()->Branch(equal);
6995
6996 Result map = allocator()->Allocate();
6997 ASSERT(map.is_valid());
6998 __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
6999 // Undetectable objects behave like undefined when tested with typeof.
7000 __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
7001 1 << Map::kIsUndetectable);
7002 destination()->false_target()->Branch(not_zero);
7003 // Do a range test for JSObject type. We can't use
7004 // MacroAssembler::IsInstanceJSObjectType, because we are using a
7005 // ControlDestination, so we copy its implementation here.
7006 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
7007 __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
7008 __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
7009 obj.Unuse();
7010 map.Unuse();
7011 destination()->Split(below_equal);
7012 }
7013
7014
7015 void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
7016 // This generates a fast version of:
7017 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
7018 // typeof(arg) == function).
7019 // It includes undetectable objects (as opposed to IsObject).
7020 ASSERT(args->length() == 1);
7021 Load(args->at(0));
7022 Result value = frame_->Pop();
7023 value.ToRegister();
7024 ASSERT(value.is_valid());
7025 __ test(value.reg(), Immediate(kSmiTagMask));
7026 destination()->false_target()->Branch(equal);
7027
7028 // Check that this is an object.
7029 frame_->Spill(value.reg());
7030 __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
7031 value.Unuse();
7032 destination()->Split(above_equal);
7033 }
7034
7035
7036 // Deferred code to check whether the String JavaScript object is safe for using
7037 // default value of. This code is called after the bit caching this information
7038 // in the map has been checked with the map for the object in the map_result_
7039 // register. On return the register map_result_ contains 1 for true and 0 for
7040 // false.
7041 class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
7042 public:
7043 DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
7044 Register map_result,
7045 Register scratch1,
7046 Register scratch2)
7047 : object_(object),
7048 map_result_(map_result),
7049 scratch1_(scratch1),
7050 scratch2_(scratch2) { }
7051
7052 virtual void Generate() {
7053 Label false_result;
7054
7055 // Check that map is loaded as expected.
7056 if (FLAG_debug_code) {
7057 __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
7058 __ Assert(equal, "Map not in expected register");
7059 }
7060
7061 // Check for fast case object. Generate false result for slow case object.
7062 __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
7063 __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
7064 __ cmp(scratch1_, FACTORY->hash_table_map());
7065 __ j(equal, &false_result);
7066
7067 // Look for valueOf symbol in the descriptor array, and indicate false if
7068 // found. The type is not checked, so if it is a transition it is a false
7069 // negative.
7070 __ mov(map_result_,
7071 FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
7072 __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
7073 // map_result_: descriptor array
7074 // scratch1_: length of descriptor array
7075 // Calculate the end of the descriptor array.
7076 STATIC_ASSERT(kSmiTag == 0);
7077 STATIC_ASSERT(kSmiTagSize == 1);
7078 STATIC_ASSERT(kPointerSize == 4);
7079 __ lea(scratch1_,
7080 Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
7081 // Calculate location of the first key name.
7082 __ add(Operand(map_result_),
7083 Immediate(FixedArray::kHeaderSize +
7084 DescriptorArray::kFirstIndex * kPointerSize));
7085 // Loop through all the keys in the descriptor array. If one of these is the
7086 // symbol valueOf the result is false.
7087 Label entry, loop;
7088 __ jmp(&entry);
7089 __ bind(&loop);
7090 __ mov(scratch2_, FieldOperand(map_result_, 0));
7091 __ cmp(scratch2_, FACTORY->value_of_symbol());
7092 __ j(equal, &false_result);
7093 __ add(Operand(map_result_), Immediate(kPointerSize));
7094 __ bind(&entry);
7095 __ cmp(map_result_, Operand(scratch1_));
7096 __ j(not_equal, &loop);
7097
7098 // Reload map as register map_result_ was used as temporary above.
7099 __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
7100
7101 // If a valueOf property is not found on the object check that it's
7102 // prototype is the un-modified String prototype. If not result is false.
7103 __ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
7104 __ test(scratch1_, Immediate(kSmiTagMask));
7105 __ j(zero, &false_result);
7106 __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
7107 __ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
7108 __ mov(scratch2_,
7109 FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
7110 __ cmp(scratch1_,
7111 ContextOperand(scratch2_,
7112 Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
7113 __ j(not_equal, &false_result);
7114 // Set the bit in the map to indicate that it has been checked safe for
7115 // default valueOf and set true result.
7116 __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
7117 Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
7118 __ Set(map_result_, Immediate(1));
7119 __ jmp(exit_label());
7120 __ bind(&false_result);
7121 // Set false result.
7122 __ Set(map_result_, Immediate(0));
7123 }
7124
7125 private:
7126 Register object_;
7127 Register map_result_;
7128 Register scratch1_;
7129 Register scratch2_;
7130 };
7131
7132
7133 void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
7134 ZoneList<Expression*>* args) {
7135 ASSERT(args->length() == 1);
7136 Load(args->at(0));
7137 Result obj = frame_->Pop(); // Pop the string wrapper.
7138 obj.ToRegister();
7139 ASSERT(obj.is_valid());
7140 if (FLAG_debug_code) {
7141 __ AbortIfSmi(obj.reg());
7142 }
7143
7144 // Check whether this map has already been checked to be safe for default
7145 // valueOf.
7146 Result map_result = allocator()->Allocate();
7147 ASSERT(map_result.is_valid());
7148 __ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
7149 __ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset),
7150 1 << Map::kStringWrapperSafeForDefaultValueOf);
7151 destination()->true_target()->Branch(not_zero);
7152
7153 // We need an additional two scratch registers for the deferred code.
7154 Result temp1 = allocator()->Allocate();
7155 ASSERT(temp1.is_valid());
7156 Result temp2 = allocator()->Allocate();
7157 ASSERT(temp2.is_valid());
7158
7159 DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
7160 new DeferredIsStringWrapperSafeForDefaultValueOf(
7161 obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
7162 deferred->Branch(zero);
7163 deferred->BindExit();
7164 __ test(map_result.reg(), Operand(map_result.reg()));
7165 obj.Unuse();
7166 map_result.Unuse();
7167 temp1.Unuse();
7168 temp2.Unuse();
7169 destination()->Split(not_equal);
7170 }
7171
7172
7173 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
7174 // This generates a fast version of:
7175 // (%_ClassOf(arg) === 'Function')
7176 ASSERT(args->length() == 1);
7177 Load(args->at(0));
7178 Result obj = frame_->Pop();
7179 obj.ToRegister();
7180 __ test(obj.reg(), Immediate(kSmiTagMask));
7181 destination()->false_target()->Branch(zero);
7182 Result temp = allocator()->Allocate();
7183 ASSERT(temp.is_valid());
7184 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
7185 obj.Unuse();
7186 temp.Unuse();
7187 destination()->Split(equal);
7188 }
7189
7190
7191 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
7192 ASSERT(args->length() == 1);
7193 Load(args->at(0));
7194 Result obj = frame_->Pop();
7195 obj.ToRegister();
7196 __ test(obj.reg(), Immediate(kSmiTagMask));
7197 destination()->false_target()->Branch(zero);
7198 Result temp = allocator()->Allocate();
7199 ASSERT(temp.is_valid());
7200 __ mov(temp.reg(),
7201 FieldOperand(obj.reg(), HeapObject::kMapOffset));
7202 __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
7203 1 << Map::kIsUndetectable);
7204 obj.Unuse();
7205 temp.Unuse();
7206 destination()->Split(not_zero);
7207 }
7208
7209
7210 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
7211 ASSERT(args->length() == 0);
7212
7213 // Get the frame pointer for the calling frame.
7214 Result fp = allocator()->Allocate();
7215 __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
7216
7217 // Skip the arguments adaptor frame if it exists.
7218 Label check_frame_marker;
7219 __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
7220 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
7221 __ j(not_equal, &check_frame_marker);
7222 __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
7223
7224 // Check the marker in the calling frame.
7225 __ bind(&check_frame_marker);
7226 __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
7227 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
7228 fp.Unuse();
7229 destination()->Split(equal);
7230 }
7231
7232
7233 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
7234 ASSERT(args->length() == 0);
7235
7236 Result fp = allocator_->Allocate();
7237 Result result = allocator_->Allocate();
7238 ASSERT(fp.is_valid() && result.is_valid());
7239
7240 Label exit;
7241
7242 // Get the number of formal parameters.
7243 __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters())));
7244
7245 // Check if the calling frame is an arguments adaptor frame.
7246 __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
7247 __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
7248 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
7249 __ j(not_equal, &exit);
7250
7251 // Arguments adaptor case: Read the arguments length from the
7252 // adaptor frame.
7253 __ mov(result.reg(),
7254 Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
7255
7256 __ bind(&exit);
7257 result.set_type_info(TypeInfo::Smi());
7258 if (FLAG_debug_code) __ AbortIfNotSmi(result.reg());
7259 frame_->Push(&result);
7260 }
7261
7262
7263 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
7264 ASSERT(args->length() == 1);
7265 JumpTarget leave, null, function, non_function_constructor;
7266 Load(args->at(0)); // Load the object.
7267 Result obj = frame_->Pop();
7268 obj.ToRegister();
7269 frame_->Spill(obj.reg());
7270
7271 // If the object is a smi, we return null.
7272 __ test(obj.reg(), Immediate(kSmiTagMask));
7273 null.Branch(zero);
7274
7275 // Check that the object is a JS object but take special care of JS
7276 // functions to make sure they have 'Function' as their class.
7277 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
7278 null.Branch(below);
7279
7280 // As long as JS_FUNCTION_TYPE is the last instance type and it is
7281 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
7282 // LAST_JS_OBJECT_TYPE.
7283 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
7284 STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
7285 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
7286 function.Branch(equal);
7287
7288 // Check if the constructor in the map is a function.
7289 { Result tmp = allocator()->Allocate();
7290 __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
7291 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
7292 non_function_constructor.Branch(not_equal);
7293 }
7294
7295 // The map register now contains the constructor function. Grab the
7296 // instance class name from there.
7297 __ mov(obj.reg(),
7298 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
7299 __ mov(obj.reg(),
7300 FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
7301 frame_->Push(&obj);
7302 leave.Jump();
7303
7304 // Functions have class 'Function'.
7305 function.Bind();
7306 frame_->Push(FACTORY->function_class_symbol());
7307 leave.Jump();
7308
7309 // Objects with a non-function constructor have class 'Object'.
7310 non_function_constructor.Bind();
7311 frame_->Push(FACTORY->Object_symbol());
7312 leave.Jump();
7313
7314 // Non-JS objects have class null.
7315 null.Bind();
7316 frame_->Push(FACTORY->null_value());
7317
7318 // All done.
7319 leave.Bind();
7320 }
7321
7322
7323 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
7324 ASSERT(args->length() == 1);
7325 JumpTarget leave;
7326 Load(args->at(0)); // Load the object.
7327 frame_->Dup();
7328 Result object = frame_->Pop();
7329 object.ToRegister();
7330 ASSERT(object.is_valid());
7331 // if (object->IsSmi()) return object.
7332 __ test(object.reg(), Immediate(kSmiTagMask));
7333 leave.Branch(zero, taken);
7334 // It is a heap object - get map.
7335 Result temp = allocator()->Allocate();
7336 ASSERT(temp.is_valid());
7337 // if (!object->IsJSValue()) return object.
7338 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
7339 leave.Branch(not_equal, not_taken);
7340 __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
7341 object.Unuse();
7342 frame_->SetElementAt(0, &temp);
7343 leave.Bind();
7344 }
7345
7346
7347 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
7348 ASSERT(args->length() == 2);
7349 JumpTarget leave;
7350 Load(args->at(0)); // Load the object.
7351 Load(args->at(1)); // Load the value.
7352 Result value = frame_->Pop();
7353 Result object = frame_->Pop();
7354 value.ToRegister();
7355 object.ToRegister();
7356
7357 // if (object->IsSmi()) return value.
7358 __ test(object.reg(), Immediate(kSmiTagMask));
7359 leave.Branch(zero, &value, taken);
7360
7361 // It is a heap object - get its map.
7362 Result scratch = allocator_->Allocate();
7363 ASSERT(scratch.is_valid());
7364 // if (!object->IsJSValue()) return value.
7365 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
7366 leave.Branch(not_equal, &value, not_taken);
7367
7368 // Store the value.
7369 __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
7370 // Update the write barrier. Save the value as it will be
7371 // overwritten by the write barrier code and is needed afterward.
7372 Result duplicate_value = allocator_->Allocate();
7373 ASSERT(duplicate_value.is_valid());
7374 __ mov(duplicate_value.reg(), value.reg());
7375 // The object register is also overwritten by the write barrier and
7376 // possibly aliased in the frame.
7377 frame_->Spill(object.reg());
7378 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
7379 scratch.reg());
7380 object.Unuse();
7381 scratch.Unuse();
7382 duplicate_value.Unuse();
7383
7384 // Leave.
7385 leave.Bind(&value);
7386 frame_->Push(&value);
7387 }
7388
7389
7390 void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
7391 ASSERT(args->length() == 1);
7392
7393 // ArgumentsAccessStub expects the key in edx and the formal
7394 // parameter count in eax.
7395 Load(args->at(0));
7396 Result key = frame_->Pop();
7397 // Explicitly create a constant result.
7398 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
7399 // Call the shared stub to get to arguments[key].
7400 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
7401 Result result = frame_->CallStub(&stub, &key, &count);
7402 frame_->Push(&result);
7403 }
7404
7405
7406 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
7407 ASSERT(args->length() == 2);
7408
7409 // Load the two objects into registers and perform the comparison.
7410 Load(args->at(0));
7411 Load(args->at(1));
7412 Result right = frame_->Pop();
7413 Result left = frame_->Pop();
7414 right.ToRegister();
7415 left.ToRegister();
7416 __ cmp(right.reg(), Operand(left.reg()));
7417 right.Unuse();
7418 left.Unuse();
7419 destination()->Split(equal);
7420 }
7421
7422
7423 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
7424 ASSERT(args->length() == 0);
7425 STATIC_ASSERT(kSmiTag == 0); // EBP value is aligned, so it looks like a Smi.
7426 Result ebp_as_smi = allocator_->Allocate();
7427 ASSERT(ebp_as_smi.is_valid());
7428 __ mov(ebp_as_smi.reg(), Operand(ebp));
7429 frame_->Push(&ebp_as_smi);
7430 }
7431
7432
7433 void CodeGenerator::GenerateRandomHeapNumber(
7434 ZoneList<Expression*>* args) {
7435 ASSERT(args->length() == 0);
7436 frame_->SpillAll();
7437
7438 Label slow_allocate_heapnumber;
7439 Label heapnumber_allocated;
7440
7441 __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
7442 __ jmp(&heapnumber_allocated);
7443
7444 __ bind(&slow_allocate_heapnumber);
7445 // Allocate a heap number.
7446 __ CallRuntime(Runtime::kNumberAlloc, 0);
7447 __ mov(edi, eax);
7448
7449 __ bind(&heapnumber_allocated);
7450
7451 __ PrepareCallCFunction(1, ebx);
7452 __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
7453 __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()),
7454 1);
7455
7456 // Convert 32 random bits in eax to 0.(32 random bits) in a double
7457 // by computing:
7458 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
7459 // This is implemented on both SSE2 and FPU.
7460 if (CpuFeatures::IsSupported(SSE2)) {
7461 CpuFeatures::Scope fscope(SSE2);
7462 __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
7463 __ movd(xmm1, Operand(ebx));
7464 __ movd(xmm0, Operand(eax));
7465 __ cvtss2sd(xmm1, xmm1);
7466 __ pxor(xmm0, xmm1);
7467 __ subsd(xmm0, xmm1);
7468 __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
7469 } else {
7470 // 0x4130000000000000 is 1.0 x 2^20 as a double.
7471 __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
7472 Immediate(0x41300000));
7473 __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
7474 __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
7475 __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
7476 __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
7477 __ fsubp(1);
7478 __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
7479 }
7480 __ mov(eax, edi);
7481
7482 Result result = allocator_->Allocate(eax);
7483 frame_->Push(&result);
7484 }
7485
7486
7487 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
7488 ASSERT_EQ(2, args->length());
7489
7490 Load(args->at(0));
7491 Load(args->at(1));
7492
7493 StringAddStub stub(NO_STRING_ADD_FLAGS);
7494 Result answer = frame_->CallStub(&stub, 2);
7495 frame_->Push(&answer);
7496 }
7497
7498
7499 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
7500 ASSERT_EQ(3, args->length());
7501
7502 Load(args->at(0));
7503 Load(args->at(1));
7504 Load(args->at(2));
7505
7506 SubStringStub stub;
7507 Result answer = frame_->CallStub(&stub, 3);
7508 frame_->Push(&answer);
7509 }
7510
7511
7512 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
7513 ASSERT_EQ(2, args->length());
7514
7515 Load(args->at(0));
7516 Load(args->at(1));
7517
7518 StringCompareStub stub;
7519 Result answer = frame_->CallStub(&stub, 2);
7520 frame_->Push(&answer);
7521 }
7522
7523
7524 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
7525 ASSERT_EQ(4, args->length());
7526
7527 // Load the arguments on the stack and call the stub.
7528 Load(args->at(0));
7529 Load(args->at(1));
7530 Load(args->at(2));
7531 Load(args->at(3));
7532
7533 RegExpExecStub stub;
7534 Result result = frame_->CallStub(&stub, 4);
7535 frame_->Push(&result);
7536 }
7537
7538
7539 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
7540 ASSERT_EQ(3, args->length());
7541
7542 Load(args->at(0)); // Size of array, smi.
7543 Load(args->at(1)); // "index" property value.
7544 Load(args->at(2)); // "input" property value.
7545
7546 RegExpConstructResultStub stub;
7547 Result result = frame_->CallStub(&stub, 3);
7548 frame_->Push(&result);
7549 }
7550
7551
7552 class DeferredSearchCache: public DeferredCode {
7553 public:
7554 DeferredSearchCache(Register dst, Register cache, Register key)
7555 : dst_(dst), cache_(cache), key_(key) {
7556 set_comment("[ DeferredSearchCache");
7557 }
7558
7559 virtual void Generate();
7560
7561 private:
7562 Register dst_; // on invocation Smi index of finger, on exit
7563 // holds value being looked up.
7564 Register cache_; // instance of JSFunctionResultCache.
7565 Register key_; // key being looked up.
7566 };
7567
7568
7569 void DeferredSearchCache::Generate() {
7570 Label first_loop, search_further, second_loop, cache_miss;
7571
7572 // Smi-tagging is equivalent to multiplying by 2.
7573 STATIC_ASSERT(kSmiTag == 0);
7574 STATIC_ASSERT(kSmiTagSize == 1);
7575
7576 Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize);
7577 Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex);
7578
7579 // Check the cache from finger to start of the cache.
7580 __ bind(&first_loop);
7581 __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
7582 __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
7583 __ j(less, &search_further);
7584
7585 __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
7586 __ j(not_equal, &first_loop);
7587
7588 __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
7589 __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
7590 __ jmp(exit_label());
7591
7592 __ bind(&search_further);
7593
7594 // Check the cache from end of cache up to finger.
7595 __ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
7596
7597 __ bind(&second_loop);
7598 __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
7599 // Consider prefetching into some reg.
7600 __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
7601 __ j(less_equal, &cache_miss);
7602
7603 __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
7604 __ j(not_equal, &second_loop);
7605
7606 __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
7607 __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
7608 __ jmp(exit_label());
7609
7610 __ bind(&cache_miss);
7611 __ push(cache_); // store a reference to cache
7612 __ push(key_); // store a key
7613 __ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
7614 __ push(key_);
7615 // On ia32 function must be in edi.
7616 __ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
7617 ParameterCount expected(1);
7618 __ InvokeFunction(edi, expected, CALL_FUNCTION);
7619
7620 // Find a place to put new cached value into.
7621 Label add_new_entry, update_cache;
7622 __ mov(ecx, Operand(esp, kPointerSize)); // restore the cache
7623 // Possible optimization: cache size is constant for the given cache
7624 // so technically we could use a constant here. However, if we have
7625 // cache miss this optimization would hardly matter much.
7626
7627 // Check if we could add new entry to cache.
7628 __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
7629 __ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
7630 __ j(greater, &add_new_entry);
7631
7632 // Check if we could evict entry after finger.
7633 __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
7634 __ add(Operand(edx), Immediate(kEntrySizeSmi));
7635 __ cmp(ebx, Operand(edx));
7636 __ j(greater, &update_cache);
7637
7638 // Need to wrap over the cache.
7639 __ mov(edx, Immediate(kEntriesIndexSmi));
7640 __ jmp(&update_cache);
7641
7642 __ bind(&add_new_entry);
7643 __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
7644 __ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1));
7645 __ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx);
7646
7647 // Update the cache itself.
7648 // edx holds the index.
7649 __ bind(&update_cache);
7650 __ pop(ebx); // restore the key
7651 __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
7652 // Store key.
7653 __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
7654 __ RecordWrite(ecx, 0, ebx, edx);
7655
7656 // Store value.
7657 __ pop(ecx); // restore the cache.
7658 __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
7659 __ add(Operand(edx), Immediate(Smi::FromInt(1)));
7660 __ mov(ebx, eax);
7661 __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
7662 __ RecordWrite(ecx, 0, ebx, edx);
7663
7664 if (!dst_.is(eax)) {
7665 __ mov(dst_, eax);
7666 }
7667 }
7668
7669
7670 void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
7671 ASSERT_EQ(2, args->length());
7672
7673 ASSERT_NE(NULL, args->at(0)->AsLiteral());
7674 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
7675
7676 Handle<FixedArray> jsfunction_result_caches(
7677 masm()->isolate()->global_context()->jsfunction_result_caches());
7678 if (jsfunction_result_caches->length() <= cache_id) {
7679 __ Abort("Attempt to use undefined cache.");
7680 frame_->Push(FACTORY->undefined_value());
7681 return;
7682 }
7683
7684 Load(args->at(1));
7685 Result key = frame_->Pop();
7686 key.ToRegister();
7687
7688 Result cache = allocator()->Allocate();
7689 ASSERT(cache.is_valid());
7690 __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
7691 __ mov(cache.reg(),
7692 FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
7693 __ mov(cache.reg(),
7694 ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
7695 __ mov(cache.reg(),
7696 FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
7697
7698 Result tmp = allocator()->Allocate();
7699 ASSERT(tmp.is_valid());
7700
7701 DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
7702 cache.reg(),
7703 key.reg());
7704
7705 // tmp.reg() now holds finger offset as a smi.
7706 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
7707 __ mov(tmp.reg(), FieldOperand(cache.reg(),
7708 JSFunctionResultCache::kFingerOffset));
7709 __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
7710 deferred->Branch(not_equal);
7711
7712 __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
7713
7714 deferred->BindExit();
7715 frame_->Push(&tmp);
7716 }
7717
7718
7719 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
7720 ASSERT_EQ(args->length(), 1);
7721
7722 // Load the argument on the stack and call the stub.
7723 Load(args->at(0));
7724 NumberToStringStub stub;
7725 Result result = frame_->CallStub(&stub, 1);
7726 frame_->Push(&result);
7727 }
7728
7729
7730 class DeferredSwapElements: public DeferredCode {
7731 public:
7732 DeferredSwapElements(Register object, Register index1, Register index2)
7733 : object_(object), index1_(index1), index2_(index2) {
7734 set_comment("[ DeferredSwapElements");
7735 }
7736
7737 virtual void Generate();
7738
7739 private:
7740 Register object_, index1_, index2_;
7741 };
7742
7743
7744 void DeferredSwapElements::Generate() {
7745 __ push(object_);
7746 __ push(index1_);
7747 __ push(index2_);
7748 __ CallRuntime(Runtime::kSwapElements, 3);
7749 }
7750
7751
7752 void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
7753 // Note: this code assumes that indices are passed are within
7754 // elements' bounds and refer to valid (not holes) values.
7755 Comment cmnt(masm_, "[ GenerateSwapElements");
7756
7757 ASSERT_EQ(3, args->length());
7758
7759 Load(args->at(0));
7760 Load(args->at(1));
7761 Load(args->at(2));
7762
7763 Result index2 = frame_->Pop();
7764 index2.ToRegister();
7765
7766 Result index1 = frame_->Pop();
7767 index1.ToRegister();
7768
7769 Result object = frame_->Pop();
7770 object.ToRegister();
7771
7772 Result tmp1 = allocator()->Allocate();
7773 tmp1.ToRegister();
7774 Result tmp2 = allocator()->Allocate();
7775 tmp2.ToRegister();
7776
7777 frame_->Spill(object.reg());
7778 frame_->Spill(index1.reg());
7779 frame_->Spill(index2.reg());
7780
7781 DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
7782 index1.reg(),
7783 index2.reg());
7784
7785 // Fetch the map and check if array is in fast case.
7786 // Check that object doesn't require security checks and
7787 // has no indexed interceptor.
7788 __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
7789 deferred->Branch(below);
7790 __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
7791 KeyedLoadIC::kSlowCaseBitFieldMask);
7792 deferred->Branch(not_zero);
7793
7794 // Check the object's elements are in fast case and writable.
7795 __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
7796 __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
7797 Immediate(FACTORY->fixed_array_map()));
7798 deferred->Branch(not_equal);
7799
7800 // Smi-tagging is equivalent to multiplying by 2.
7801 STATIC_ASSERT(kSmiTag == 0);
7802 STATIC_ASSERT(kSmiTagSize == 1);
7803
7804 // Check that both indices are smis.
7805 __ mov(tmp2.reg(), index1.reg());
7806 __ or_(tmp2.reg(), Operand(index2.reg()));
7807 __ test(tmp2.reg(), Immediate(kSmiTagMask));
7808 deferred->Branch(not_zero);
7809
7810 // Check that both indices are valid.
7811 __ mov(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
7812 __ cmp(tmp2.reg(), Operand(index1.reg()));
7813 deferred->Branch(below_equal);
7814 __ cmp(tmp2.reg(), Operand(index2.reg()));
7815 deferred->Branch(below_equal);
7816
7817 // Bring addresses into index1 and index2.
7818 __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
7819 __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
7820
7821 // Swap elements.
7822 __ mov(object.reg(), Operand(index1.reg(), 0));
7823 __ mov(tmp2.reg(), Operand(index2.reg(), 0));
7824 __ mov(Operand(index2.reg(), 0), object.reg());
7825 __ mov(Operand(index1.reg(), 0), tmp2.reg());
7826
7827 Label done;
7828 __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
7829 // Possible optimization: do a check that both values are Smis
7830 // (or them and test against Smi mask.)
7831
7832 __ mov(tmp2.reg(), tmp1.reg());
7833 __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
7834 __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
7835 __ bind(&done);
7836
7837 deferred->BindExit();
7838 frame_->Push(FACTORY->undefined_value());
7839 }
7840
7841
7842 void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
7843 Comment cmnt(masm_, "[ GenerateCallFunction");
7844
7845 ASSERT(args->length() >= 2);
7846
7847 int n_args = args->length() - 2; // for receiver and function.
7848 Load(args->at(0)); // receiver
7849 for (int i = 0; i < n_args; i++) {
7850 Load(args->at(i + 1));
7851 }
7852 Load(args->at(n_args + 1)); // function
7853 Result result = frame_->CallJSFunction(n_args);
7854 frame_->Push(&result);
7855 }
7856
7857
7858 // Generates the Math.pow method. Only handles special cases and
7859 // branches to the runtime system for everything else. Please note
7860 // that this function assumes that the callsite has executed ToNumber
7861 // on both arguments.
7862 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
7863 ASSERT(args->length() == 2);
7864 Load(args->at(0));
7865 Load(args->at(1));
7866 if (!CpuFeatures::IsSupported(SSE2)) {
7867 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
7868 frame_->Push(&res);
7869 } else {
7870 CpuFeatures::Scope use_sse2(SSE2);
7871 Label allocate_return;
7872 // Load the two operands while leaving the values on the frame.
7873 frame()->Dup();
7874 Result exponent = frame()->Pop();
7875 exponent.ToRegister();
7876 frame()->Spill(exponent.reg());
7877 frame()->PushElementAt(1);
7878 Result base = frame()->Pop();
7879 base.ToRegister();
7880 frame()->Spill(base.reg());
7881
7882 Result answer = allocator()->Allocate();
7883 ASSERT(answer.is_valid());
7884 ASSERT(!exponent.reg().is(base.reg()));
7885 JumpTarget call_runtime;
7886
7887 // Save 1 in xmm3 - we need this several times later on.
7888 __ mov(answer.reg(), Immediate(1));
7889 __ cvtsi2sd(xmm3, Operand(answer.reg()));
7890
7891 Label exponent_nonsmi;
7892 Label base_nonsmi;
7893 // If the exponent is a heap number go to that specific case.
7894 __ test(exponent.reg(), Immediate(kSmiTagMask));
7895 __ j(not_zero, &exponent_nonsmi);
7896 __ test(base.reg(), Immediate(kSmiTagMask));
7897 __ j(not_zero, &base_nonsmi);
7898
7899 // Optimized version when y is an integer.
7900 Label powi;
7901 __ SmiUntag(base.reg());
7902 __ cvtsi2sd(xmm0, Operand(base.reg()));
7903 __ jmp(&powi);
7904 // exponent is smi and base is a heapnumber.
7905 __ bind(&base_nonsmi);
7906 __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
7907 FACTORY->heap_number_map());
7908 call_runtime.Branch(not_equal);
7909
7910 __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
7911
7912 // Optimized version of pow if y is an integer.
7913 __ bind(&powi);
7914 __ SmiUntag(exponent.reg());
7915
7916 // Save exponent in base as we need to check if exponent is negative later.
7917 // We know that base and exponent are in different registers.
7918 __ mov(base.reg(), exponent.reg());
7919
7920 // Get absolute value of exponent.
7921 Label no_neg;
7922 __ cmp(exponent.reg(), 0);
7923 __ j(greater_equal, &no_neg);
7924 __ neg(exponent.reg());
7925 __ bind(&no_neg);
7926
7927 // Load xmm1 with 1.
7928 __ movsd(xmm1, xmm3);
7929 Label while_true;
7930 Label no_multiply;
7931
7932 __ bind(&while_true);
7933 __ shr(exponent.reg(), 1);
7934 __ j(not_carry, &no_multiply);
7935 __ mulsd(xmm1, xmm0);
7936 __ bind(&no_multiply);
7937 __ test(exponent.reg(), Operand(exponent.reg()));
7938 __ mulsd(xmm0, xmm0);
7939 __ j(not_zero, &while_true);
7940
7941 // x has the original value of y - if y is negative return 1/result.
7942 __ test(base.reg(), Operand(base.reg()));
7943 __ j(positive, &allocate_return);
7944 // Special case if xmm1 has reached infinity.
7945 __ mov(answer.reg(), Immediate(0x7FB00000));
7946 __ movd(xmm0, Operand(answer.reg()));
7947 __ cvtss2sd(xmm0, xmm0);
7948 __ ucomisd(xmm0, xmm1);
7949 call_runtime.Branch(equal);
7950 __ divsd(xmm3, xmm1);
7951 __ movsd(xmm1, xmm3);
7952 __ jmp(&allocate_return);
7953
7954 // exponent (or both) is a heapnumber - no matter what we should now work
7955 // on doubles.
7956 __ bind(&exponent_nonsmi);
7957 __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
7958 FACTORY->heap_number_map());
7959 call_runtime.Branch(not_equal);
7960 __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
7961 // Test if exponent is nan.
7962 __ ucomisd(xmm1, xmm1);
7963 call_runtime.Branch(parity_even);
7964
7965 Label base_not_smi;
7966 Label handle_special_cases;
7967 __ test(base.reg(), Immediate(kSmiTagMask));
7968 __ j(not_zero, &base_not_smi);
7969 __ SmiUntag(base.reg());
7970 __ cvtsi2sd(xmm0, Operand(base.reg()));
7971 __ jmp(&handle_special_cases);
7972 __ bind(&base_not_smi);
7973 __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
7974 FACTORY->heap_number_map());
7975 call_runtime.Branch(not_equal);
7976 __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
7977 __ and_(answer.reg(), HeapNumber::kExponentMask);
7978 __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
7979 // base is NaN or +/-Infinity
7980 call_runtime.Branch(greater_equal);
7981 __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
7982
7983 // base is in xmm0 and exponent is in xmm1.
7984 __ bind(&handle_special_cases);
7985 Label not_minus_half;
7986 // Test for -0.5.
7987 // Load xmm2 with -0.5.
7988 __ mov(answer.reg(), Immediate(0xBF000000));
7989 __ movd(xmm2, Operand(answer.reg()));
7990 __ cvtss2sd(xmm2, xmm2);
7991 // xmm2 now has -0.5.
7992 __ ucomisd(xmm2, xmm1);
7993 __ j(not_equal, &not_minus_half);
7994
7995 // Calculates reciprocal of square root.
7996 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
7997 __ xorpd(xmm1, xmm1);
7998 __ addsd(xmm1, xmm0);
7999 __ sqrtsd(xmm1, xmm1);
8000 __ divsd(xmm3, xmm1);
8001 __ movsd(xmm1, xmm3);
8002 __ jmp(&allocate_return);
8003
8004 // Test for 0.5.
8005 __ bind(&not_minus_half);
8006 // Load xmm2 with 0.5.
8007 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
8008 __ addsd(xmm2, xmm3);
8009 // xmm2 now has 0.5.
8010 __ ucomisd(xmm2, xmm1);
8011 call_runtime.Branch(not_equal);
8012 // Calculates square root.
8013 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
8014 __ xorpd(xmm1, xmm1);
8015 __ addsd(xmm1, xmm0);
8016 __ sqrtsd(xmm1, xmm1);
8017
8018 JumpTarget done;
8019 Label failure, success;
8020 __ bind(&allocate_return);
8021 // Make a copy of the frame to enable us to handle allocation
8022 // failure after the JumpTarget jump.
8023 VirtualFrame* clone = new VirtualFrame(frame());
8024 __ AllocateHeapNumber(answer.reg(), exponent.reg(),
8025 base.reg(), &failure);
8026 __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
8027 // Remove the two original values from the frame - we only need those
8028 // in the case where we branch to runtime.
8029 frame()->Drop(2);
8030 exponent.Unuse();
8031 base.Unuse();
8032 done.Jump(&answer);
8033 // Use the copy of the original frame as our current frame.
8034 RegisterFile empty_regs;
8035 SetFrame(clone, &empty_regs);
8036 // If we experience an allocation failure we branch to runtime.
8037 __ bind(&failure);
8038 call_runtime.Bind();
8039 answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
8040
8041 done.Bind(&answer);
8042 frame()->Push(&answer);
8043 }
8044 }
8045
8046
8047 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
8048 ASSERT_EQ(args->length(), 1);
8049 Load(args->at(0));
8050 TranscendentalCacheStub stub(TranscendentalCache::SIN,
8051 TranscendentalCacheStub::TAGGED);
8052 Result result = frame_->CallStub(&stub, 1);
8053 frame_->Push(&result);
8054 }
8055
8056
8057 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
8058 ASSERT_EQ(args->length(), 1);
8059 Load(args->at(0));
8060 TranscendentalCacheStub stub(TranscendentalCache::COS,
8061 TranscendentalCacheStub::TAGGED);
8062 Result result = frame_->CallStub(&stub, 1);
8063 frame_->Push(&result);
8064 }
8065
8066
8067 void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
8068 ASSERT_EQ(args->length(), 1);
8069 Load(args->at(0));
8070 TranscendentalCacheStub stub(TranscendentalCache::LOG,
8071 TranscendentalCacheStub::TAGGED);
8072 Result result = frame_->CallStub(&stub, 1);
8073 frame_->Push(&result);
8074 }
8075
8076
8077 // Generates the Math.sqrt method. Please note - this function assumes that
8078 // the callsite has executed ToNumber on the argument.
8079 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
8080 ASSERT_EQ(args->length(), 1);
8081 Load(args->at(0));
8082
8083 if (!CpuFeatures::IsSupported(SSE2)) {
8084 Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
8085 frame()->Push(&result);
8086 } else {
8087 CpuFeatures::Scope use_sse2(SSE2);
8088 // Leave original value on the frame if we need to call runtime.
8089 frame()->Dup();
8090 Result result = frame()->Pop();
8091 result.ToRegister();
8092 frame()->Spill(result.reg());
8093 Label runtime;
8094 Label non_smi;
8095 Label load_done;
8096 JumpTarget end;
8097
8098 __ test(result.reg(), Immediate(kSmiTagMask));
8099 __ j(not_zero, &non_smi);
8100 __ SmiUntag(result.reg());
8101 __ cvtsi2sd(xmm0, Operand(result.reg()));
8102 __ jmp(&load_done);
8103 __ bind(&non_smi);
8104 __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
8105 FACTORY->heap_number_map());
8106 __ j(not_equal, &runtime);
8107 __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
8108
8109 __ bind(&load_done);
8110 __ sqrtsd(xmm0, xmm0);
8111 // A copy of the virtual frame to allow us to go to runtime after the
8112 // JumpTarget jump.
8113 Result scratch = allocator()->Allocate();
8114 VirtualFrame* clone = new VirtualFrame(frame());
8115 __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
8116
8117 __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
8118 frame()->Drop(1);
8119 scratch.Unuse();
8120 end.Jump(&result);
8121 // We only branch to runtime if we have an allocation error.
8122 // Use the copy of the original frame as our current frame.
8123 RegisterFile empty_regs;
8124 SetFrame(clone, &empty_regs);
8125 __ bind(&runtime);
8126 result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
8127
8128 end.Bind(&result);
8129 frame()->Push(&result);
8130 }
8131 }
8132
8133
8134 void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
8135 ASSERT_EQ(2, args->length());
8136 Load(args->at(0));
8137 Load(args->at(1));
8138 Result right_res = frame_->Pop();
8139 Result left_res = frame_->Pop();
8140 right_res.ToRegister();
8141 left_res.ToRegister();
8142 Result tmp_res = allocator()->Allocate();
8143 ASSERT(tmp_res.is_valid());
8144 Register right = right_res.reg();
8145 Register left = left_res.reg();
8146 Register tmp = tmp_res.reg();
8147 right_res.Unuse();
8148 left_res.Unuse();
8149 tmp_res.Unuse();
8150 __ cmp(left, Operand(right));
8151 destination()->true_target()->Branch(equal);
8152 // Fail if either is a non-HeapObject.
8153 __ mov(tmp, left);
8154 __ and_(Operand(tmp), right);
8155 __ test(Operand(tmp), Immediate(kSmiTagMask));
8156 destination()->false_target()->Branch(equal);
8157 __ CmpObjectType(left, JS_REGEXP_TYPE, tmp);
8158 destination()->false_target()->Branch(not_equal);
8159 __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
8160 destination()->false_target()->Branch(not_equal);
8161 __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
8162 __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
8163 destination()->Split(equal);
8164 }
8165
8166
8167 void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
8168 ASSERT(args->length() == 1);
8169 Load(args->at(0));
8170 Result value = frame_->Pop();
8171 value.ToRegister();
8172 ASSERT(value.is_valid());
8173 if (FLAG_debug_code) {
8174 __ AbortIfNotString(value.reg());
8175 }
8176
8177 __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
8178 Immediate(String::kContainsCachedArrayIndexMask));
8179
8180 value.Unuse();
8181 destination()->Split(zero);
8182 }
8183
8184
8185 void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
8186 ASSERT(args->length() == 1);
8187 Load(args->at(0));
8188 Result string = frame_->Pop();
8189 string.ToRegister();
8190 if (FLAG_debug_code) {
8191 __ AbortIfNotString(string.reg());
8192 }
8193
8194 Result number = allocator()->Allocate();
8195 ASSERT(number.is_valid());
8196 __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
8197 __ IndexFromHash(number.reg(), number.reg());
8198 string.Unuse();
8199 frame_->Push(&number);
8200 }
8201
8202
8203 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
8204 ASSERT(!in_safe_int32_mode());
8205 if (CheckForInlineRuntimeCall(node)) {
8206 return;
8207 }
8208
8209 ZoneList<Expression*>* args = node->arguments();
8210 Comment cmnt(masm_, "[ CallRuntime");
8211 const Runtime::Function* function = node->function();
8212
8213 if (function == NULL) {
8214 // Push the builtins object found in the current global object.
8215 Result temp = allocator()->Allocate();
8216 ASSERT(temp.is_valid());
8217 __ mov(temp.reg(), GlobalObjectOperand());
8218 __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
8219 frame_->Push(&temp);
8220 }
8221
8222 // Push the arguments ("left-to-right").
8223 int arg_count = args->length();
8224 for (int i = 0; i < arg_count; i++) {
8225 Load(args->at(i));
8226 }
8227
8228 if (function == NULL) {
8229 // Call the JS runtime function.
8230 frame_->Push(node->name());
8231 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
8232 arg_count,
8233 loop_nesting_);
8234 frame_->RestoreContextRegister();
8235 frame_->Push(&answer);
8236 } else {
8237 // Call the C runtime function.
8238 Result answer = frame_->CallRuntime(function, arg_count);
8239 frame_->Push(&answer);
8240 }
8241 }
8242
8243
8244 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
8245 Comment cmnt(masm_, "[ UnaryOperation");
8246
8247 Token::Value op = node->op();
8248
8249 if (op == Token::NOT) {
8250 // Swap the true and false targets but keep the same actual label
8251 // as the fall through.
8252 destination()->Invert();
8253 LoadCondition(node->expression(), destination(), true);
8254 // Swap the labels back.
8255 destination()->Invert();
8256
8257 } else if (op == Token::DELETE) {
8258 Property* property = node->expression()->AsProperty();
8259 if (property != NULL) {
8260 Load(property->obj());
8261 Load(property->key());
8262 frame_->Push(Smi::FromInt(strict_mode_flag()));
8263 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
8264 frame_->Push(&answer);
8265 return;
8266 }
8267
8268 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
8269 if (variable != NULL) {
8270 // Delete of an unqualified identifier is disallowed in strict mode
8271 // but "delete this" is.
8272 ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
8273 Slot* slot = variable->AsSlot();
8274 if (variable->is_global()) {
8275 LoadGlobal();
8276 frame_->Push(variable->name());
8277 frame_->Push(Smi::FromInt(kNonStrictMode));
8278 Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
8279 CALL_FUNCTION, 3);
8280 frame_->Push(&answer);
8281
8282 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
8283 // Call the runtime to delete from the context holding the named
8284 // variable. Sync the virtual frame eagerly so we can push the
8285 // arguments directly into place.
8286 frame_->SyncRange(0, frame_->element_count() - 1);
8287 frame_->EmitPush(esi);
8288 frame_->EmitPush(Immediate(variable->name()));
8289 Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
8290 frame_->Push(&answer);
8291 } else {
8292 // Default: Result of deleting non-global, not dynamically
8293 // introduced variables is false.
8294 frame_->Push(FACTORY->false_value());
8295 }
8296 } else {
8297 // Default: Result of deleting expressions is true.
8298 Load(node->expression()); // may have side-effects
8299 frame_->SetElementAt(0, FACTORY->true_value());
8300 }
8301
8302 } else if (op == Token::TYPEOF) {
8303 // Special case for loading the typeof expression; see comment on
8304 // LoadTypeofExpression().
8305 LoadTypeofExpression(node->expression());
8306 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
8307 frame_->Push(&answer);
8308
8309 } else if (op == Token::VOID) {
8310 Expression* expression = node->expression();
8311 if (expression && expression->AsLiteral() && (
8312 expression->AsLiteral()->IsTrue() ||
8313 expression->AsLiteral()->IsFalse() ||
8314 expression->AsLiteral()->handle()->IsNumber() ||
8315 expression->AsLiteral()->handle()->IsString() ||
8316 expression->AsLiteral()->handle()->IsJSRegExp() ||
8317 expression->AsLiteral()->IsNull())) {
8318 // Omit evaluating the value of the primitive literal.
8319 // It will be discarded anyway, and can have no side effect.
8320 frame_->Push(FACTORY->undefined_value());
8321 } else {
8322 Load(node->expression());
8323 frame_->SetElementAt(0, FACTORY->undefined_value());
8324 }
8325
8326 } else {
8327 if (in_safe_int32_mode()) {
8328 Visit(node->expression());
8329 Result value = frame_->Pop();
8330 ASSERT(value.is_untagged_int32());
8331 // Registers containing an int32 value are not multiply used.
8332 ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
8333 value.ToRegister();
8334 switch (op) {
8335 case Token::SUB: {
8336 __ neg(value.reg());
8337 frame_->Push(&value);
8338 if (node->no_negative_zero()) {
8339 // -MIN_INT is MIN_INT with the overflow flag set.
8340 unsafe_bailout_->Branch(overflow);
8341 } else {
8342 // MIN_INT and 0 both have bad negations. They both have 31 zeros.
8343 __ test(value.reg(), Immediate(0x7FFFFFFF));
8344 unsafe_bailout_->Branch(zero);
8345 }
8346 break;
8347 }
8348 case Token::BIT_NOT: {
8349 __ not_(value.reg());
8350 frame_->Push(&value);
8351 break;
8352 }
8353 case Token::ADD: {
8354 // Unary plus has no effect on int32 values.
8355 frame_->Push(&value);
8356 break;
8357 }
8358 default:
8359 UNREACHABLE();
8360 break;
8361 }
8362 } else {
8363 Load(node->expression());
8364 bool can_overwrite = node->expression()->ResultOverwriteAllowed();
8365 UnaryOverwriteMode overwrite =
8366 can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
8367 bool no_negative_zero = node->expression()->no_negative_zero();
8368 switch (op) {
8369 case Token::NOT:
8370 case Token::DELETE:
8371 case Token::TYPEOF:
8372 UNREACHABLE(); // handled above
8373 break;
8374
8375 case Token::SUB: {
8376 GenericUnaryOpStub stub(
8377 Token::SUB,
8378 overwrite,
8379 NO_UNARY_FLAGS,
8380 no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
8381 Result operand = frame_->Pop();
8382 Result answer = frame_->CallStub(&stub, &operand);
8383 answer.set_type_info(TypeInfo::Number());
8384 frame_->Push(&answer);
8385 break;
8386 }
8387 case Token::BIT_NOT: {
8388 // Smi check.
8389 JumpTarget smi_label;
8390 JumpTarget continue_label;
8391 Result operand = frame_->Pop();
8392 TypeInfo operand_info = operand.type_info();
8393 operand.ToRegister();
8394 if (operand_info.IsSmi()) {
8395 if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg());
8396 frame_->Spill(operand.reg());
8397 // Set smi tag bit. It will be reset by the not operation.
8398 __ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
8399 __ not_(operand.reg());
8400 Result answer = operand;
8401 answer.set_type_info(TypeInfo::Smi());
8402 frame_->Push(&answer);
8403 } else {
8404 __ test(operand.reg(), Immediate(kSmiTagMask));
8405 smi_label.Branch(zero, &operand, taken);
8406
8407 GenericUnaryOpStub stub(Token::BIT_NOT,
8408 overwrite,
8409 NO_UNARY_SMI_CODE_IN_STUB);
8410 Result answer = frame_->CallStub(&stub, &operand);
8411 continue_label.Jump(&answer);
8412
8413 smi_label.Bind(&answer);
8414 answer.ToRegister();
8415 frame_->Spill(answer.reg());
8416 // Set smi tag bit. It will be reset by the not operation.
8417 __ lea(answer.reg(), Operand(answer.reg(), kSmiTagMask));
8418 __ not_(answer.reg());
8419
8420 continue_label.Bind(&answer);
8421 answer.set_type_info(TypeInfo::Integer32());
8422 frame_->Push(&answer);
8423 }
8424 break;
8425 }
8426 case Token::ADD: {
8427 // Smi check.
8428 JumpTarget continue_label;
8429 Result operand = frame_->Pop();
8430 TypeInfo operand_info = operand.type_info();
8431 operand.ToRegister();
8432 __ test(operand.reg(), Immediate(kSmiTagMask));
8433 continue_label.Branch(zero, &operand, taken);
8434
8435 frame_->Push(&operand);
8436 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
8437 CALL_FUNCTION, 1);
8438
8439 continue_label.Bind(&answer);
8440 if (operand_info.IsSmi()) {
8441 answer.set_type_info(TypeInfo::Smi());
8442 } else if (operand_info.IsInteger32()) {
8443 answer.set_type_info(TypeInfo::Integer32());
8444 } else {
8445 answer.set_type_info(TypeInfo::Number());
8446 }
8447 frame_->Push(&answer);
8448 break;
8449 }
8450 default:
8451 UNREACHABLE();
8452 }
8453 }
8454 }
8455 }
8456
8457
8458 // The value in dst was optimistically incremented or decremented. The
8459 // result overflowed or was not smi tagged. Undo the operation, call
8460 // into the runtime to convert the argument to a number, and call the
8461 // specialized add or subtract stub. The result is left in dst.
8462 class DeferredPrefixCountOperation: public DeferredCode {
8463 public:
8464 DeferredPrefixCountOperation(Register dst,
8465 bool is_increment,
8466 TypeInfo input_type)
8467 : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
8468 set_comment("[ DeferredCountOperation");
8469 }
8470
8471 virtual void Generate();
8472
8473 private:
8474 Register dst_;
8475 bool is_increment_;
8476 TypeInfo input_type_;
8477 };
8478
8479
8480 void DeferredPrefixCountOperation::Generate() {
8481 // Undo the optimistic smi operation.
8482 if (is_increment_) {
8483 __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
8484 } else {
8485 __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
8486 }
8487 Register left;
8488 if (input_type_.IsNumber()) {
8489 left = dst_;
8490 } else {
8491 __ push(dst_);
8492 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
8493 left = eax;
8494 }
8495
8496 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
8497 NO_OVERWRITE,
8498 NO_GENERIC_BINARY_FLAGS,
8499 TypeInfo::Number());
8500 stub.GenerateCall(masm_, left, Smi::FromInt(1));
8501
8502 if (!dst_.is(eax)) __ mov(dst_, eax);
8503 }
8504
8505
8506 // The value in dst was optimistically incremented or decremented. The
8507 // result overflowed or was not smi tagged. Undo the operation and call
8508 // into the runtime to convert the argument to a number. Update the
8509 // original value in old. Call the specialized add or subtract stub.
8510 // The result is left in dst.
8511 class DeferredPostfixCountOperation: public DeferredCode {
8512 public:
8513 DeferredPostfixCountOperation(Register dst,
8514 Register old,
8515 bool is_increment,
8516 TypeInfo input_type)
8517 : dst_(dst),
8518 old_(old),
8519 is_increment_(is_increment),
8520 input_type_(input_type) {
8521 set_comment("[ DeferredCountOperation");
8522 }
8523
8524 virtual void Generate();
8525
8526 private:
8527 Register dst_;
8528 Register old_;
8529 bool is_increment_;
8530 TypeInfo input_type_;
8531 };
8532
8533
8534 void DeferredPostfixCountOperation::Generate() {
8535 // Undo the optimistic smi operation.
8536 if (is_increment_) {
8537 __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
8538 } else {
8539 __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
8540 }
8541 Register left;
8542 if (input_type_.IsNumber()) {
8543 __ push(dst_); // Save the input to use as the old value.
8544 left = dst_;
8545 } else {
8546 __ push(dst_);
8547 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
8548 __ push(eax); // Save the result of ToNumber to use as the old value.
8549 left = eax;
8550 }
8551
8552 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
8553 NO_OVERWRITE,
8554 NO_GENERIC_BINARY_FLAGS,
8555 TypeInfo::Number());
8556 stub.GenerateCall(masm_, left, Smi::FromInt(1));
8557
8558 if (!dst_.is(eax)) __ mov(dst_, eax);
8559 __ pop(old_);
8560 }
8561
8562
8563 void CodeGenerator::VisitCountOperation(CountOperation* node) {
8564 ASSERT(!in_safe_int32_mode());
8565 Comment cmnt(masm_, "[ CountOperation");
8566
8567 bool is_postfix = node->is_postfix();
8568 bool is_increment = node->op() == Token::INC;
8569
8570 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
8571 bool is_const = (var != NULL && var->mode() == Variable::CONST);
8572
8573 // Postfix operations need a stack slot under the reference to hold
8574 // the old value while the new value is being stored. This is so that
8575 // in the case that storing the new value requires a call, the old
8576 // value will be in the frame to be spilled.
8577 if (is_postfix) frame_->Push(Smi::FromInt(0));
8578
8579 // A constant reference is not saved to, so a constant reference is not a
8580 // compound assignment reference.
8581 { Reference target(this, node->expression(), !is_const);
8582 if (target.is_illegal()) {
8583 // Spoof the virtual frame to have the expected height (one higher
8584 // than on entry).
8585 if (!is_postfix) frame_->Push(Smi::FromInt(0));
8586 return;
8587 }
8588 target.TakeValue();
8589
8590 Result new_value = frame_->Pop();
8591 new_value.ToRegister();
8592
8593 Result old_value; // Only allocated in the postfix case.
8594 if (is_postfix) {
8595 // Allocate a temporary to preserve the old value.
8596 old_value = allocator_->Allocate();
8597 ASSERT(old_value.is_valid());
8598 __ mov(old_value.reg(), new_value.reg());
8599
8600 // The return value for postfix operations is ToNumber(input).
8601 // Keep more precise type info if the input is some kind of
8602 // number already. If the input is not a number we have to wait
8603 // for the deferred code to convert it.
8604 if (new_value.type_info().IsNumber()) {
8605 old_value.set_type_info(new_value.type_info());
8606 }
8607 }
8608
8609 // Ensure the new value is writable.
8610 frame_->Spill(new_value.reg());
8611
8612 Result tmp;
8613 if (new_value.is_smi()) {
8614 if (FLAG_debug_code) __ AbortIfNotSmi(new_value.reg());
8615 } else {
8616 // We don't know statically if the input is a smi.
8617 // In order to combine the overflow and the smi tag check, we need
8618 // to be able to allocate a byte register. We attempt to do so
8619 // without spilling. If we fail, we will generate separate overflow
8620 // and smi tag checks.
8621 // We allocate and clear a temporary byte register before performing
8622 // the count operation since clearing the register using xor will clear
8623 // the overflow flag.
8624 tmp = allocator_->AllocateByteRegisterWithoutSpilling();
8625 if (tmp.is_valid()) {
8626 __ Set(tmp.reg(), Immediate(0));
8627 }
8628 }
8629
8630 if (is_increment) {
8631 __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
8632 } else {
8633 __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
8634 }
8635
8636 DeferredCode* deferred = NULL;
8637 if (is_postfix) {
8638 deferred = new DeferredPostfixCountOperation(new_value.reg(),
8639 old_value.reg(),
8640 is_increment,
8641 new_value.type_info());
8642 } else {
8643 deferred = new DeferredPrefixCountOperation(new_value.reg(),
8644 is_increment,
8645 new_value.type_info());
8646 }
8647
8648 if (new_value.is_smi()) {
8649 // In case we have a smi as input just check for overflow.
8650 deferred->Branch(overflow);
8651 } else {
8652 // If the count operation didn't overflow and the result is a valid
8653 // smi, we're done. Otherwise, we jump to the deferred slow-case
8654 // code.
8655 // We combine the overflow and the smi tag check if we could
8656 // successfully allocate a temporary byte register.
8657 if (tmp.is_valid()) {
8658 __ setcc(overflow, tmp.reg());
8659 __ or_(Operand(tmp.reg()), new_value.reg());
8660 __ test(tmp.reg(), Immediate(kSmiTagMask));
8661 tmp.Unuse();
8662 deferred->Branch(not_zero);
8663 } else {
8664 // Otherwise we test separately for overflow and smi tag.
8665 deferred->Branch(overflow);
8666 __ test(new_value.reg(), Immediate(kSmiTagMask));
8667 deferred->Branch(not_zero);
8668 }
8669 }
8670 deferred->BindExit();
8671
8672 // Postfix count operations return their input converted to
8673 // number. The case when the input is already a number is covered
8674 // above in the allocation code for old_value.
8675 if (is_postfix && !new_value.type_info().IsNumber()) {
8676 old_value.set_type_info(TypeInfo::Number());
8677 }
8678
8679 // The result of ++ or -- is an Integer32 if the
8680 // input is a smi. Otherwise it is a number.
8681 if (new_value.is_smi()) {
8682 new_value.set_type_info(TypeInfo::Integer32());
8683 } else {
8684 new_value.set_type_info(TypeInfo::Number());
8685 }
8686
8687 // Postfix: store the old value in the allocated slot under the
8688 // reference.
8689 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
8690
8691 frame_->Push(&new_value);
8692 // Non-constant: update the reference.
8693 if (!is_const) target.SetValue(NOT_CONST_INIT);
8694 }
8695
8696 // Postfix: drop the new value and use the old.
8697 if (is_postfix) frame_->Drop();
8698 }
8699
8700
8701 void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
8702 Token::Value op = node->op();
8703 Comment cmnt(masm_, "[ Int32BinaryOperation");
8704 ASSERT(in_safe_int32_mode());
8705 ASSERT(safe_int32_mode_enabled());
8706 ASSERT(FLAG_safe_int32_compiler);
8707
8708 if (op == Token::COMMA) {
8709 // Discard left value.
8710 frame_->Nip(1);
8711 return;
8712 }
8713
8714 Result right = frame_->Pop();
8715 Result left = frame_->Pop();
8716
8717 ASSERT(right.is_untagged_int32());
8718 ASSERT(left.is_untagged_int32());
8719 // Registers containing an int32 value are not multiply used.
8720 ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
8721 ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
8722
8723 switch (op) {
8724 case Token::COMMA:
8725 case Token::OR:
8726 case Token::AND:
8727 UNREACHABLE();
8728 break;
8729 case Token::BIT_OR:
8730 case Token::BIT_XOR:
8731 case Token::BIT_AND:
8732 if (left.is_constant() || right.is_constant()) {
8733 int32_t value; // Put constant in value, non-constant in left.
8734 // Constants are known to be int32 values, from static analysis,
8735 // or else will be converted to int32 by implicit ECMA [[ToInt32]].
8736 if (left.is_constant()) {
8737 ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
8738 value = NumberToInt32(*left.handle());
8739 left = right;
8740 } else {
8741 ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
8742 value = NumberToInt32(*right.handle());
8743 }
8744
8745 left.ToRegister();
8746 if (op == Token::BIT_OR) {
8747 __ or_(Operand(left.reg()), Immediate(value));
8748 } else if (op == Token::BIT_XOR) {
8749 __ xor_(Operand(left.reg()), Immediate(value));
8750 } else {
8751 ASSERT(op == Token::BIT_AND);
8752 __ and_(Operand(left.reg()), Immediate(value));
8753 }
8754 } else {
8755 ASSERT(left.is_register());
8756 ASSERT(right.is_register());
8757 if (op == Token::BIT_OR) {
8758 __ or_(left.reg(), Operand(right.reg()));
8759 } else if (op == Token::BIT_XOR) {
8760 __ xor_(left.reg(), Operand(right.reg()));
8761 } else {
8762 ASSERT(op == Token::BIT_AND);
8763 __ and_(left.reg(), Operand(right.reg()));
8764 }
8765 }
8766 frame_->Push(&left);
8767 right.Unuse();
8768 break;
8769 case Token::SAR:
8770 case Token::SHL:
8771 case Token::SHR: {
8772 bool test_shr_overflow = false;
8773 left.ToRegister();
8774 if (right.is_constant()) {
8775 ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
8776 int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
8777 if (op == Token::SAR) {
8778 __ sar(left.reg(), shift_amount);
8779 } else if (op == Token::SHL) {
8780 __ shl(left.reg(), shift_amount);
8781 } else {
8782 ASSERT(op == Token::SHR);
8783 __ shr(left.reg(), shift_amount);
8784 if (shift_amount == 0) test_shr_overflow = true;
8785 }
8786 } else {
8787 // Move right to ecx
8788 if (left.is_register() && left.reg().is(ecx)) {
8789 right.ToRegister();
8790 __ xchg(left.reg(), right.reg());
8791 left = right; // Left is unused here, copy of right unused by Push.
8792 } else {
8793 right.ToRegister(ecx);
8794 left.ToRegister();
8795 }
8796 if (op == Token::SAR) {
8797 __ sar_cl(left.reg());
8798 } else if (op == Token::SHL) {
8799 __ shl_cl(left.reg());
8800 } else {
8801 ASSERT(op == Token::SHR);
8802 __ shr_cl(left.reg());
8803 test_shr_overflow = true;
8804 }
8805 }
8806 {
8807 Register left_reg = left.reg();
8808 frame_->Push(&left);
8809 right.Unuse();
8810 if (test_shr_overflow && !node->to_int32()) {
8811 // Uint32 results with top bit set are not Int32 values.
8812 // If they will be forced to Int32, skip the test.
8813 // Test is needed because shr with shift amount 0 does not set flags.
8814 __ test(left_reg, Operand(left_reg));
8815 unsafe_bailout_->Branch(sign);
8816 }
8817 }
8818 break;
8819 }
8820 case Token::ADD:
8821 case Token::SUB:
8822 case Token::MUL:
8823 if ((left.is_constant() && op != Token::SUB) || right.is_constant()) {
8824 int32_t value; // Put constant in value, non-constant in left.
8825 if (right.is_constant()) {
8826 ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
8827 value = NumberToInt32(*right.handle());
8828 } else {
8829 ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
8830 value = NumberToInt32(*left.handle());
8831 left = right;
8832 }
8833
8834 left.ToRegister();
8835 if (op == Token::ADD) {
8836 __ add(Operand(left.reg()), Immediate(value));
8837 } else if (op == Token::SUB) {
8838 __ sub(Operand(left.reg()), Immediate(value));
8839 } else {
8840 ASSERT(op == Token::MUL);
8841 __ imul(left.reg(), left.reg(), value);
8842 }
8843 } else {
8844 left.ToRegister();
8845 ASSERT(left.is_register());
8846 ASSERT(right.is_register());
8847 if (op == Token::ADD) {
8848 __ add(left.reg(), Operand(right.reg()));
8849 } else if (op == Token::SUB) {
8850 __ sub(left.reg(), Operand(right.reg()));
8851 } else {
8852 ASSERT(op == Token::MUL);
8853 // We have statically verified that a negative zero can be ignored.
8854 __ imul(left.reg(), Operand(right.reg()));
8855 }
8856 }
8857 right.Unuse();
8858 frame_->Push(&left);
8859 if (!node->to_int32() || op == Token::MUL) {
8860 // If ToInt32 is called on the result of ADD, SUB, we don't
8861 // care about overflows.
8862 // Result of MUL can be non-representable precisely in double so
8863 // we have to check for overflow.
8864 unsafe_bailout_->Branch(overflow);
8865 }
8866 break;
8867 case Token::DIV:
8868 case Token::MOD: {
8869 if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
8870 if (left.is_register() && left.reg().is(edi)) {
8871 right.ToRegister(ebx);
8872 } else {
8873 right.ToRegister(edi);
8874 }
8875 }
8876 left.ToRegister(eax);
8877 Result edx_reg = allocator_->Allocate(edx);
8878 right.ToRegister();
8879 // The results are unused here because BreakTarget::Branch cannot handle
8880 // live results.
8881 Register right_reg = right.reg();
8882 left.Unuse();
8883 right.Unuse();
8884 edx_reg.Unuse();
8885 __ cmp(right_reg, 0);
8886 // Ensure divisor is positive: no chance of non-int32 or -0 result.
8887 unsafe_bailout_->Branch(less_equal);
8888 __ cdq(); // Sign-extend eax into edx:eax
8889 __ idiv(right_reg);
8890 if (op == Token::MOD) {
8891 // Negative zero can arise as a negative divident with a zero result.
8892 if (!node->no_negative_zero()) {
8893 Label not_negative_zero;
8894 __ test(edx, Operand(edx));
8895 __ j(not_zero, &not_negative_zero);
8896 __ test(eax, Operand(eax));
8897 unsafe_bailout_->Branch(negative);
8898 __ bind(&not_negative_zero);
8899 }
8900 Result edx_result(edx, TypeInfo::Integer32());
8901 edx_result.set_untagged_int32(true);
8902 frame_->Push(&edx_result);
8903 } else {
8904 ASSERT(op == Token::DIV);
8905 __ test(edx, Operand(edx));
8906 unsafe_bailout_->Branch(not_equal);
8907 Result eax_result(eax, TypeInfo::Integer32());
8908 eax_result.set_untagged_int32(true);
8909 frame_->Push(&eax_result);
8910 }
8911 break;
8912 }
8913 default:
8914 UNREACHABLE();
8915 break;
8916 }
8917 }
8918
8919
8920 void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
8921 // According to ECMA-262 section 11.11, page 58, the binary logical
8922 // operators must yield the result of one of the two expressions
8923 // before any ToBoolean() conversions. This means that the value
8924 // produced by a && or || operator is not necessarily a boolean.
8925
8926 // NOTE: If the left hand side produces a materialized value (not
8927 // control flow), we force the right hand side to do the same. This
8928 // is necessary because we assume that if we get control flow on the
8929 // last path out of an expression we got it on all paths.
8930 if (node->op() == Token::AND) {
8931 ASSERT(!in_safe_int32_mode());
8932 JumpTarget is_true;
8933 ControlDestination dest(&is_true, destination()->false_target(), true);
8934 LoadCondition(node->left(), &dest, false);
8935
8936 if (dest.false_was_fall_through()) {
8937 // The current false target was used as the fall-through. If
8938 // there are no dangling jumps to is_true then the left
8939 // subexpression was unconditionally false. Otherwise we have
8940 // paths where we do have to evaluate the right subexpression.
8941 if (is_true.is_linked()) {
8942 // We need to compile the right subexpression. If the jump to
8943 // the current false target was a forward jump then we have a
8944 // valid frame, we have just bound the false target, and we
8945 // have to jump around the code for the right subexpression.
8946 if (has_valid_frame()) {
8947 destination()->false_target()->Unuse();
8948 destination()->false_target()->Jump();
8949 }
8950 is_true.Bind();
8951 // The left subexpression compiled to control flow, so the
8952 // right one is free to do so as well.
8953 LoadCondition(node->right(), destination(), false);
8954 } else {
8955 // We have actually just jumped to or bound the current false
8956 // target but the current control destination is not marked as
8957 // used.
8958 destination()->Use(false);
8959 }
8960
8961 } else if (dest.is_used()) {
8962 // The left subexpression compiled to control flow (and is_true
8963 // was just bound), so the right is free to do so as well.
8964 LoadCondition(node->right(), destination(), false);
8965
8966 } else {
8967 // We have a materialized value on the frame, so we exit with
8968 // one on all paths. There are possibly also jumps to is_true
8969 // from nested subexpressions.
8970 JumpTarget pop_and_continue;
8971 JumpTarget exit;
8972
8973 // Avoid popping the result if it converts to 'false' using the
8974 // standard ToBoolean() conversion as described in ECMA-262,
8975 // section 9.2, page 30.
8976 //
8977 // Duplicate the TOS value. The duplicate will be popped by
8978 // ToBoolean.
8979 frame_->Dup();
8980 ControlDestination dest(&pop_and_continue, &exit, true);
8981 ToBoolean(&dest);
8982
8983 // Pop the result of evaluating the first part.
8984 frame_->Drop();
8985
8986 // Compile right side expression.
8987 is_true.Bind();
8988 Load(node->right());
8989
8990 // Exit (always with a materialized value).
8991 exit.Bind();
8992 }
8993
8994 } else {
8995 ASSERT(node->op() == Token::OR);
8996 ASSERT(!in_safe_int32_mode());
8997 JumpTarget is_false;
8998 ControlDestination dest(destination()->true_target(), &is_false, false);
8999 LoadCondition(node->left(), &dest, false);
9000
9001 if (dest.true_was_fall_through()) {
9002 // The current true target was used as the fall-through. If
9003 // there are no dangling jumps to is_false then the left
9004 // subexpression was unconditionally true. Otherwise we have
9005 // paths where we do have to evaluate the right subexpression.
9006 if (is_false.is_linked()) {
9007 // We need to compile the right subexpression. If the jump to
9008 // the current true target was a forward jump then we have a
9009 // valid frame, we have just bound the true target, and we
9010 // have to jump around the code for the right subexpression.
9011 if (has_valid_frame()) {
9012 destination()->true_target()->Unuse();
9013 destination()->true_target()->Jump();
9014 }
9015 is_false.Bind();
9016 // The left subexpression compiled to control flow, so the
9017 // right one is free to do so as well.
9018 LoadCondition(node->right(), destination(), false);
9019 } else {
9020 // We have just jumped to or bound the current true target but
9021 // the current control destination is not marked as used.
9022 destination()->Use(true);
9023 }
9024
9025 } else if (dest.is_used()) {
9026 // The left subexpression compiled to control flow (and is_false
9027 // was just bound), so the right is free to do so as well.
9028 LoadCondition(node->right(), destination(), false);
9029
9030 } else {
9031 // We have a materialized value on the frame, so we exit with
9032 // one on all paths. There are possibly also jumps to is_false
9033 // from nested subexpressions.
9034 JumpTarget pop_and_continue;
9035 JumpTarget exit;
9036
9037 // Avoid popping the result if it converts to 'true' using the
9038 // standard ToBoolean() conversion as described in ECMA-262,
9039 // section 9.2, page 30.
9040 //
9041 // Duplicate the TOS value. The duplicate will be popped by
9042 // ToBoolean.
9043 frame_->Dup();
9044 ControlDestination dest(&exit, &pop_and_continue, false);
9045 ToBoolean(&dest);
9046
9047 // Pop the result of evaluating the first part.
9048 frame_->Drop();
9049
9050 // Compile right side expression.
9051 is_false.Bind();
9052 Load(node->right());
9053
9054 // Exit (always with a materialized value).
9055 exit.Bind();
9056 }
9057 }
9058 }
9059
9060
9061 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
9062 Comment cmnt(masm_, "[ BinaryOperation");
9063
9064 if (node->op() == Token::AND || node->op() == Token::OR) {
9065 GenerateLogicalBooleanOperation(node);
9066 } else if (in_safe_int32_mode()) {
9067 Visit(node->left());
9068 Visit(node->right());
9069 Int32BinaryOperation(node);
9070 } else {
9071 // NOTE: The code below assumes that the slow cases (calls to runtime)
9072 // never return a constant/immutable object.
9073 OverwriteMode overwrite_mode = NO_OVERWRITE;
9074 if (node->left()->ResultOverwriteAllowed()) {
9075 overwrite_mode = OVERWRITE_LEFT;
9076 } else if (node->right()->ResultOverwriteAllowed()) {
9077 overwrite_mode = OVERWRITE_RIGHT;
9078 }
9079
9080 if (node->left()->IsTrivial()) {
9081 Load(node->right());
9082 Result right = frame_->Pop();
9083 frame_->Push(node->left());
9084 frame_->Push(&right);
9085 } else {
9086 Load(node->left());
9087 Load(node->right());
9088 }
9089 GenericBinaryOperation(node, overwrite_mode);
9090 }
9091 }
9092
9093
9094 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
9095 ASSERT(!in_safe_int32_mode());
9096 frame_->PushFunction();
9097 }
9098
9099
9100 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
9101 ASSERT(!in_safe_int32_mode());
9102 Comment cmnt(masm_, "[ CompareOperation");
9103
9104 bool left_already_loaded = false;
9105
9106 // Get the expressions from the node.
9107 Expression* left = node->left();
9108 Expression* right = node->right();
9109 Token::Value op = node->op();
9110 // To make typeof testing for natives implemented in JavaScript really
9111 // efficient, we generate special code for expressions of the form:
9112 // 'typeof <expression> == <string>'.
9113 UnaryOperation* operation = left->AsUnaryOperation();
9114 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
9115 (operation != NULL && operation->op() == Token::TYPEOF) &&
9116 (right->AsLiteral() != NULL &&
9117 right->AsLiteral()->handle()->IsString())) {
9118 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
9119
9120 // Load the operand and move it to a register.
9121 LoadTypeofExpression(operation->expression());
9122 Result answer = frame_->Pop();
9123 answer.ToRegister();
9124
9125 if (check->Equals(HEAP->number_symbol())) {
9126 __ test(answer.reg(), Immediate(kSmiTagMask));
9127 destination()->true_target()->Branch(zero);
9128 frame_->Spill(answer.reg());
9129 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
9130 __ cmp(answer.reg(), FACTORY->heap_number_map());
9131 answer.Unuse();
9132 destination()->Split(equal);
9133
9134 } else if (check->Equals(HEAP->string_symbol())) {
9135 __ test(answer.reg(), Immediate(kSmiTagMask));
9136 destination()->false_target()->Branch(zero);
9137
9138 // It can be an undetectable string object.
9139 Result temp = allocator()->Allocate();
9140 ASSERT(temp.is_valid());
9141 __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
9142 __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
9143 1 << Map::kIsUndetectable);
9144 destination()->false_target()->Branch(not_zero);
9145 __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
9146 temp.Unuse();
9147 answer.Unuse();
9148 destination()->Split(below);
9149
9150 } else if (check->Equals(HEAP->boolean_symbol())) {
9151 __ cmp(answer.reg(), FACTORY->true_value());
9152 destination()->true_target()->Branch(equal);
9153 __ cmp(answer.reg(), FACTORY->false_value());
9154 answer.Unuse();
9155 destination()->Split(equal);
9156
9157 } else if (check->Equals(HEAP->undefined_symbol())) {
9158 __ cmp(answer.reg(), FACTORY->undefined_value());
9159 destination()->true_target()->Branch(equal);
9160
9161 __ test(answer.reg(), Immediate(kSmiTagMask));
9162 destination()->false_target()->Branch(zero);
9163
9164 // It can be an undetectable object.
9165 frame_->Spill(answer.reg());
9166 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
9167 __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
9168 1 << Map::kIsUndetectable);
9169 answer.Unuse();
9170 destination()->Split(not_zero);
9171
9172 } else if (check->Equals(HEAP->function_symbol())) {
9173 __ test(answer.reg(), Immediate(kSmiTagMask));
9174 destination()->false_target()->Branch(zero);
9175 frame_->Spill(answer.reg());
9176 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
9177 destination()->true_target()->Branch(equal);
9178 // Regular expressions are callable so typeof == 'function'.
9179 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
9180 answer.Unuse();
9181 destination()->Split(equal);
9182 } else if (check->Equals(HEAP->object_symbol())) {
9183 __ test(answer.reg(), Immediate(kSmiTagMask));
9184 destination()->false_target()->Branch(zero);
9185 __ cmp(answer.reg(), FACTORY->null_value());
9186 destination()->true_target()->Branch(equal);
9187
9188 Result map = allocator()->Allocate();
9189 ASSERT(map.is_valid());
9190 // Regular expressions are typeof == 'function', not 'object'.
9191 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
9192 destination()->false_target()->Branch(equal);
9193
9194 // It can be an undetectable object.
9195 __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
9196 1 << Map::kIsUndetectable);
9197 destination()->false_target()->Branch(not_zero);
9198 // Do a range test for JSObject type. We can't use
9199 // MacroAssembler::IsInstanceJSObjectType, because we are using a
9200 // ControlDestination, so we copy its implementation here.
9201 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
9202 __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
9203 __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
9204 answer.Unuse();
9205 map.Unuse();
9206 destination()->Split(below_equal);
9207 } else {
9208 // Uncommon case: typeof testing against a string literal that is
9209 // never returned from the typeof operator.
9210 answer.Unuse();
9211 destination()->Goto(false);
9212 }
9213 return;
9214 } else if (op == Token::LT &&
9215 right->AsLiteral() != NULL &&
9216 right->AsLiteral()->handle()->IsHeapNumber()) {
9217 Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
9218 if (check->value() == 2147483648.0) { // 0x80000000.
9219 Load(left);
9220 left_already_loaded = true;
9221 Result lhs = frame_->Pop();
9222 lhs.ToRegister();
9223 __ test(lhs.reg(), Immediate(kSmiTagMask));
9224 destination()->true_target()->Branch(zero); // All Smis are less.
9225 Result scratch = allocator()->Allocate();
9226 ASSERT(scratch.is_valid());
9227 __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
9228 __ cmp(scratch.reg(), FACTORY->heap_number_map());
9229 JumpTarget not_a_number;
9230 not_a_number.Branch(not_equal, &lhs);
9231 __ mov(scratch.reg(),
9232 FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
9233 __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
9234 not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf.
9235 const uint32_t borderline_exponent =
9236 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
9237 __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
9238 scratch.Unuse();
9239 lhs.Unuse();
9240 destination()->true_target()->Branch(less);
9241 destination()->false_target()->Jump();
9242
9243 not_a_number.Bind(&lhs);
9244 frame_->Push(&lhs);
9245 }
9246 }
9247
9248 Condition cc = no_condition;
9249 bool strict = false;
9250 switch (op) {
9251 case Token::EQ_STRICT:
9252 strict = true;
9253 // Fall through
9254 case Token::EQ:
9255 cc = equal;
9256 break;
9257 case Token::LT:
9258 cc = less;
9259 break;
9260 case Token::GT:
9261 cc = greater;
9262 break;
9263 case Token::LTE:
9264 cc = less_equal;
9265 break;
9266 case Token::GTE:
9267 cc = greater_equal;
9268 break;
9269 case Token::IN: {
9270 if (!left_already_loaded) Load(left);
9271 Load(right);
9272 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
9273 frame_->Push(&answer); // push the result
9274 return;
9275 }
9276 case Token::INSTANCEOF: {
9277 if (!left_already_loaded) Load(left);
9278 Load(right);
9279 InstanceofStub stub(InstanceofStub::kNoFlags);
9280 Result answer = frame_->CallStub(&stub, 2);
9281 answer.ToRegister();
9282 __ test(answer.reg(), Operand(answer.reg()));
9283 answer.Unuse();
9284 destination()->Split(zero);
9285 return;
9286 }
9287 default:
9288 UNREACHABLE();
9289 }
9290
9291 if (left->IsTrivial()) {
9292 if (!left_already_loaded) {
9293 Load(right);
9294 Result right_result = frame_->Pop();
9295 frame_->Push(left);
9296 frame_->Push(&right_result);
9297 } else {
9298 Load(right);
9299 }
9300 } else {
9301 if (!left_already_loaded) Load(left);
9302 Load(right);
9303 }
9304 Comparison(node, cc, strict, destination());
9305 }
9306
9307
9308 void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
9309 ASSERT(!in_safe_int32_mode());
9310 Comment cmnt(masm_, "[ CompareToNull");
9311
9312 Load(node->expression());
9313 Result operand = frame_->Pop();
9314 operand.ToRegister();
9315 __ cmp(operand.reg(), FACTORY->null_value());
9316 if (node->is_strict()) {
9317 operand.Unuse();
9318 destination()->Split(equal);
9319 } else {
9320 // The 'null' value is only equal to 'undefined' if using non-strict
9321 // comparisons.
9322 destination()->true_target()->Branch(equal);
9323 __ cmp(operand.reg(), FACTORY->undefined_value());
9324 destination()->true_target()->Branch(equal);
9325 __ test(operand.reg(), Immediate(kSmiTagMask));
9326 destination()->false_target()->Branch(equal);
9327
9328 // It can be an undetectable object.
9329 // Use a scratch register in preference to spilling operand.reg().
9330 Result temp = allocator()->Allocate();
9331 ASSERT(temp.is_valid());
9332 __ mov(temp.reg(),
9333 FieldOperand(operand.reg(), HeapObject::kMapOffset));
9334 __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
9335 1 << Map::kIsUndetectable);
9336 temp.Unuse();
9337 operand.Unuse();
9338 destination()->Split(not_zero);
9339 }
9340 }
9341
9342
9343 #ifdef DEBUG
9344 bool CodeGenerator::HasValidEntryRegisters() {
9345 return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
9346 && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
9347 && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
9348 && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
9349 && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
9350 }
9351 #endif
9352
9353
9354 // Emit a LoadIC call to get the value from receiver and leave it in
9355 // dst.
9356 class DeferredReferenceGetNamedValue: public DeferredCode {
9357 public:
9358 DeferredReferenceGetNamedValue(Register dst,
9359 Register receiver,
9360 Handle<String> name,
9361 bool is_contextual)
9362 : dst_(dst),
9363 receiver_(receiver),
9364 name_(name),
9365 is_contextual_(is_contextual),
9366 is_dont_delete_(false) {
9367 set_comment(is_contextual
9368 ? "[ DeferredReferenceGetNamedValue (contextual)"
9369 : "[ DeferredReferenceGetNamedValue");
9370 }
9371
9372 virtual void Generate();
9373
9374 Label* patch_site() { return &patch_site_; }
9375
9376 void set_is_dont_delete(bool value) {
9377 ASSERT(is_contextual_);
9378 is_dont_delete_ = value;
9379 }
9380
9381 private:
9382 Label patch_site_;
9383 Register dst_;
9384 Register receiver_;
9385 Handle<String> name_;
9386 bool is_contextual_;
9387 bool is_dont_delete_;
9388 };
9389
9390
9391 void DeferredReferenceGetNamedValue::Generate() {
9392 if (!receiver_.is(eax)) {
9393 __ mov(eax, receiver_);
9394 }
9395 __ Set(ecx, Immediate(name_));
9396 Handle<Code> ic(masm()->isolate()->builtins()->builtin(
9397 Builtins::kLoadIC_Initialize));
9398 RelocInfo::Mode mode = is_contextual_
9399 ? RelocInfo::CODE_TARGET_CONTEXT
9400 : RelocInfo::CODE_TARGET;
9401 __ call(ic, mode);
9402 // The call must be followed by:
9403 // - a test eax instruction to indicate that the inobject property
9404 // case was inlined.
9405 // - a mov ecx or mov edx instruction to indicate that the
9406 // contextual property load was inlined.
9407 //
9408 // Store the delta to the map check instruction here in the test
9409 // instruction. Use masm_-> instead of the __ macro since the
9410 // latter can't return a value.
9411 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
9412 // Here we use masm_-> instead of the __ macro because this is the
9413 // instruction that gets patched and coverage code gets in the way.
9414 Counters* counters = masm()->isolate()->counters();
9415 if (is_contextual_) {
9416 masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
9417 __ IncrementCounter(counters->named_load_global_inline_miss(), 1);
9418 if (is_dont_delete_) {
9419 __ IncrementCounter(counters->dont_delete_hint_miss(), 1);
9420 }
9421 } else {
9422 masm_->test(eax, Immediate(-delta_to_patch_site));
9423 __ IncrementCounter(counters->named_load_inline_miss(), 1);
9424 }
9425
9426 if (!dst_.is(eax)) __ mov(dst_, eax);
9427 }
9428
9429
9430 class DeferredReferenceGetKeyedValue: public DeferredCode {
9431 public:
9432 explicit DeferredReferenceGetKeyedValue(Register dst,
9433 Register receiver,
9434 Register key)
9435 : dst_(dst), receiver_(receiver), key_(key) {
9436 set_comment("[ DeferredReferenceGetKeyedValue");
9437 }
9438
9439 virtual void Generate();
9440
9441 Label* patch_site() { return &patch_site_; }
9442
9443 private:
9444 Label patch_site_;
9445 Register dst_;
9446 Register receiver_;
9447 Register key_;
9448 };
9449
9450
9451 void DeferredReferenceGetKeyedValue::Generate() {
9452 if (!receiver_.is(eax)) {
9453 // Register eax is available for key.
9454 if (!key_.is(eax)) {
9455 __ mov(eax, key_);
9456 }
9457 if (!receiver_.is(edx)) {
9458 __ mov(edx, receiver_);
9459 }
9460 } else if (!key_.is(edx)) {
9461 // Register edx is available for receiver.
9462 if (!receiver_.is(edx)) {
9463 __ mov(edx, receiver_);
9464 }
9465 if (!key_.is(eax)) {
9466 __ mov(eax, key_);
9467 }
9468 } else {
9469 __ xchg(edx, eax);
9470 }
9471 // Calculate the delta from the IC call instruction to the map check
9472 // cmp instruction in the inlined version. This delta is stored in
9473 // a test(eax, delta) instruction after the call so that we can find
9474 // it in the IC initialization code and patch the cmp instruction.
9475 // This means that we cannot allow test instructions after calls to
9476 // KeyedLoadIC stubs in other places.
9477 Handle<Code> ic(masm()->isolate()->builtins()->builtin(
9478 Builtins::kKeyedLoadIC_Initialize));
9479 __ call(ic, RelocInfo::CODE_TARGET);
9480 // The delta from the start of the map-compare instruction to the
9481 // test instruction. We use masm_-> directly here instead of the __
9482 // macro because the macro sometimes uses macro expansion to turn
9483 // into something that can't return a value. This is encountered
9484 // when doing generated code coverage tests.
9485 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
9486 // Here we use masm_-> instead of the __ macro because this is the
9487 // instruction that gets patched and coverage code gets in the way.
9488 masm_->test(eax, Immediate(-delta_to_patch_site));
9489 Counters* counters = masm()->isolate()->counters();
9490 __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
9491
9492 if (!dst_.is(eax)) __ mov(dst_, eax);
9493 }
9494
9495
9496 class DeferredReferenceSetKeyedValue: public DeferredCode {
9497 public:
9498 DeferredReferenceSetKeyedValue(Register value,
9499 Register key,
9500 Register receiver,
9501 Register scratch,
9502 StrictModeFlag strict_mode)
9503 : value_(value),
9504 key_(key),
9505 receiver_(receiver),
9506 scratch_(scratch),
9507 strict_mode_(strict_mode) {
9508 set_comment("[ DeferredReferenceSetKeyedValue");
9509 }
9510
9511 virtual void Generate();
9512
9513 Label* patch_site() { return &patch_site_; }
9514
9515 private:
9516 Register value_;
9517 Register key_;
9518 Register receiver_;
9519 Register scratch_;
9520 Label patch_site_;
9521 StrictModeFlag strict_mode_;
9522 };
9523
9524
9525 void DeferredReferenceSetKeyedValue::Generate() {
9526 Counters* counters = masm()->isolate()->counters();
9527 __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
9528 // Move value_ to eax, key_ to ecx, and receiver_ to edx.
9529 Register old_value = value_;
9530
9531 // First, move value to eax.
9532 if (!value_.is(eax)) {
9533 if (key_.is(eax)) {
9534 // Move key_ out of eax, preferably to ecx.
9535 if (!value_.is(ecx) && !receiver_.is(ecx)) {
9536 __ mov(ecx, key_);
9537 key_ = ecx;
9538 } else {
9539 __ mov(scratch_, key_);
9540 key_ = scratch_;
9541 }
9542 }
9543 if (receiver_.is(eax)) {
9544 // Move receiver_ out of eax, preferably to edx.
9545 if (!value_.is(edx) && !key_.is(edx)) {
9546 __ mov(edx, receiver_);
9547 receiver_ = edx;
9548 } else {
9549 // Both moves to scratch are from eax, also, no valid path hits both.
9550 __ mov(scratch_, receiver_);
9551 receiver_ = scratch_;
9552 }
9553 }
9554 __ mov(eax, value_);
9555 value_ = eax;
9556 }
9557
9558 // Now value_ is in eax. Move the other two to the right positions.
9559 // We do not update the variables key_ and receiver_ to ecx and edx.
9560 if (key_.is(ecx)) {
9561 if (!receiver_.is(edx)) {
9562 __ mov(edx, receiver_);
9563 }
9564 } else if (key_.is(edx)) {
9565 if (receiver_.is(ecx)) {
9566 __ xchg(edx, ecx);
9567 } else {
9568 __ mov(ecx, key_);
9569 if (!receiver_.is(edx)) {
9570 __ mov(edx, receiver_);
9571 }
9572 }
9573 } else { // Key is not in edx or ecx.
9574 if (!receiver_.is(edx)) {
9575 __ mov(edx, receiver_);
9576 }
9577 __ mov(ecx, key_);
9578 }
9579
9580 // Call the IC stub.
9581 Handle<Code> ic(masm()->isolate()->builtins()->builtin(
9582 (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
9583 : Builtins::kKeyedStoreIC_Initialize));
9584 __ call(ic, RelocInfo::CODE_TARGET);
9585 // The delta from the start of the map-compare instruction to the
9586 // test instruction. We use masm_-> directly here instead of the
9587 // __ macro because the macro sometimes uses macro expansion to turn
9588 // into something that can't return a value. This is encountered
9589 // when doing generated code coverage tests.
9590 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
9591 // Here we use masm_-> instead of the __ macro because this is the
9592 // instruction that gets patched and coverage code gets in the way.
9593 masm_->test(eax, Immediate(-delta_to_patch_site));
9594 // Restore value (returned from store IC) register.
9595 if (!old_value.is(eax)) __ mov(old_value, eax);
9596 }
9597
9598
9599 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
9600 #ifdef DEBUG
9601 int original_height = frame()->height();
9602 #endif
9603
9604 Isolate* isolate = masm()->isolate();
9605 Factory* factory = isolate->factory();
9606 Counters* counters = isolate->counters();
9607
9608 bool contextual_load_in_builtin =
9609 is_contextual &&
9610 (isolate->bootstrapper()->IsActive() ||
9611 (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
9612
9613 Result result;
9614 // Do not inline in the global code or when not in loop.
9615 if (scope()->is_global_scope() ||
9616 loop_nesting() == 0 ||
9617 contextual_load_in_builtin) {
9618 Comment cmnt(masm(), "[ Load from named Property");
9619 frame()->Push(name);
9620
9621 RelocInfo::Mode mode = is_contextual
9622 ? RelocInfo::CODE_TARGET_CONTEXT
9623 : RelocInfo::CODE_TARGET;
9624 result = frame()->CallLoadIC(mode);
9625 // A test eax instruction following the call signals that the inobject
9626 // property case was inlined. Ensure that there is not a test eax
9627 // instruction here.
9628 __ nop();
9629 } else {
9630 // Inline the property load.
9631 Comment cmnt(masm(), is_contextual
9632 ? "[ Inlined contextual property load"
9633 : "[ Inlined named property load");
9634 Result receiver = frame()->Pop();
9635 receiver.ToRegister();
9636
9637 result = allocator()->Allocate();
9638 ASSERT(result.is_valid());
9639 DeferredReferenceGetNamedValue* deferred =
9640 new DeferredReferenceGetNamedValue(result.reg(),
9641 receiver.reg(),
9642 name,
9643 is_contextual);
9644
9645 if (!is_contextual) {
9646 // Check that the receiver is a heap object.
9647 __ test(receiver.reg(), Immediate(kSmiTagMask));
9648 deferred->Branch(zero);
9649 }
9650
9651 __ bind(deferred->patch_site());
9652 // This is the map check instruction that will be patched (so we can't
9653 // use the double underscore macro that may insert instructions).
9654 // Initially use an invalid map to force a failure.
9655 masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
9656 Immediate(factory->null_value()));
9657 // This branch is always a forwards branch so it's always a fixed size
9658 // which allows the assert below to succeed and patching to work.
9659 deferred->Branch(not_equal);
9660
9661 // The delta from the patch label to the actual load must be
9662 // statically known.
9663 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
9664 LoadIC::kOffsetToLoadInstruction);
9665
9666 if (is_contextual) {
9667 // Load the (initialy invalid) cell and get its value.
9668 masm()->mov(result.reg(), factory->null_value());
9669 if (FLAG_debug_code) {
9670 __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
9671 factory->global_property_cell_map());
9672 __ Assert(equal, "Uninitialized inlined contextual load");
9673 }
9674 __ mov(result.reg(),
9675 FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
9676 __ cmp(result.reg(), factory->the_hole_value());
9677 deferred->Branch(equal);
9678 bool is_dont_delete = false;
9679 if (!info_->closure().is_null()) {
9680 // When doing lazy compilation we can check if the global cell
9681 // already exists and use its "don't delete" status as a hint.
9682 AssertNoAllocation no_gc;
9683 v8::internal::GlobalObject* global_object =
9684 info_->closure()->context()->global();
9685 LookupResult lookup;
9686 global_object->LocalLookupRealNamedProperty(*name, &lookup);
9687 if (lookup.IsProperty() && lookup.type() == NORMAL) {
9688 ASSERT(lookup.holder() == global_object);
9689 ASSERT(global_object->property_dictionary()->ValueAt(
9690 lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
9691 is_dont_delete = lookup.IsDontDelete();
9692 }
9693 }
9694 deferred->set_is_dont_delete(is_dont_delete);
9695 if (!is_dont_delete) {
9696 __ cmp(result.reg(), factory->the_hole_value());
9697 deferred->Branch(equal);
9698 } else if (FLAG_debug_code) {
9699 __ cmp(result.reg(), factory->the_hole_value());
9700 __ Check(not_equal, "DontDelete cells can't contain the hole");
9701 }
9702 __ IncrementCounter(counters->named_load_global_inline(), 1);
9703 if (is_dont_delete) {
9704 __ IncrementCounter(counters->dont_delete_hint_hit(), 1);
9705 }
9706 } else {
9707 // The initial (invalid) offset has to be large enough to force a 32-bit
9708 // instruction encoding to allow patching with an arbitrary offset. Use
9709 // kMaxInt (minus kHeapObjectTag).
9710 int offset = kMaxInt;
9711 masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
9712 __ IncrementCounter(counters->named_load_inline(), 1);
9713 }
9714
9715 deferred->BindExit();
9716 }
9717 ASSERT(frame()->height() == original_height - 1);
9718 return result;
9719 }
9720
9721
9722 Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
9723 #ifdef DEBUG
9724 int expected_height = frame()->height() - (is_contextual ? 1 : 2);
9725 #endif
9726
9727 Result result;
9728 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
9729 result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
9730 // A test eax instruction following the call signals that the inobject
9731 // property case was inlined. Ensure that there is not a test eax
9732 // instruction here.
9733 __ nop();
9734 } else {
9735 // Inline the in-object property case.
9736 JumpTarget slow, done;
9737 Label patch_site;
9738
9739 // Get the value and receiver from the stack.
9740 Result value = frame()->Pop();
9741 value.ToRegister();
9742 Result receiver = frame()->Pop();
9743 receiver.ToRegister();
9744
9745 // Allocate result register.
9746 result = allocator()->Allocate();
9747 ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
9748
9749 // Check that the receiver is a heap object.
9750 __ test(receiver.reg(), Immediate(kSmiTagMask));
9751 slow.Branch(zero, &value, &receiver);
9752
9753 // This is the map check instruction that will be patched (so we can't
9754 // use the double underscore macro that may insert instructions).
9755 // Initially use an invalid map to force a failure.
9756 __ bind(&patch_site);
9757 masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
9758 Immediate(FACTORY->null_value()));
9759 // This branch is always a forwards branch so it's always a fixed size
9760 // which allows the assert below to succeed and patching to work.
9761 slow.Branch(not_equal, &value, &receiver);
9762
9763 // The delta from the patch label to the store offset must be
9764 // statically known.
9765 ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
9766 StoreIC::kOffsetToStoreInstruction);
9767
9768 // The initial (invalid) offset has to be large enough to force a 32-bit
9769 // instruction encoding to allow patching with an arbitrary offset. Use
9770 // kMaxInt (minus kHeapObjectTag).
9771 int offset = kMaxInt;
9772 __ mov(FieldOperand(receiver.reg(), offset), value.reg());
9773 __ mov(result.reg(), Operand(value.reg()));
9774
9775 // Allocate scratch register for write barrier.
9776 Result scratch = allocator()->Allocate();
9777 ASSERT(scratch.is_valid());
9778
9779 // The write barrier clobbers all input registers, so spill the
9780 // receiver and the value.
9781 frame_->Spill(receiver.reg());
9782 frame_->Spill(value.reg());
9783
9784 // If the receiver and the value share a register allocate a new
9785 // register for the receiver.
9786 if (receiver.reg().is(value.reg())) {
9787 receiver = allocator()->Allocate();
9788 ASSERT(receiver.is_valid());
9789 __ mov(receiver.reg(), Operand(value.reg()));
9790 }
9791
9792 // Update the write barrier. To save instructions in the inlined
9793 // version we do not filter smis.
9794 Label skip_write_barrier;
9795 __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
9796 int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
9797 __ lea(scratch.reg(), Operand(receiver.reg(), offset));
9798 __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
9799 if (FLAG_debug_code) {
9800 __ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
9801 __ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
9802 __ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
9803 }
9804 __ bind(&skip_write_barrier);
9805 value.Unuse();
9806 scratch.Unuse();
9807 receiver.Unuse();
9808 done.Jump(&result);
9809
9810 slow.Bind(&value, &receiver);
9811 frame()->Push(&receiver);
9812 frame()->Push(&value);
9813 result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
9814 // Encode the offset to the map check instruction and the offset
9815 // to the write barrier store address computation in a test eax
9816 // instruction.
9817 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
9818 __ test(eax,
9819 Immediate((delta_to_record_write << 16) | delta_to_patch_site));
9820 done.Bind(&result);
9821 }
9822
9823 ASSERT_EQ(expected_height, frame()->height());
9824 return result;
9825 }
9826
9827
9828 Result CodeGenerator::EmitKeyedLoad() {
9829 #ifdef DEBUG
9830 int original_height = frame()->height();
9831 #endif
9832 Result result;
9833 // Inline array load code if inside of a loop. We do not know the
9834 // receiver map yet, so we initially generate the code with a check
9835 // against an invalid map. In the inline cache code, we patch the map
9836 // check if appropriate.
9837 if (loop_nesting() > 0) {
9838 Comment cmnt(masm_, "[ Inlined load from keyed Property");
9839
9840 // Use a fresh temporary to load the elements without destroying
9841 // the receiver which is needed for the deferred slow case.
9842 Result elements = allocator()->Allocate();
9843 ASSERT(elements.is_valid());
9844
9845 Result key = frame_->Pop();
9846 Result receiver = frame_->Pop();
9847 key.ToRegister();
9848 receiver.ToRegister();
9849
9850 // If key and receiver are shared registers on the frame, their values will
9851 // be automatically saved and restored when going to deferred code.
9852 // The result is in elements, which is guaranteed non-shared.
9853 DeferredReferenceGetKeyedValue* deferred =
9854 new DeferredReferenceGetKeyedValue(elements.reg(),
9855 receiver.reg(),
9856 key.reg());
9857
9858 __ test(receiver.reg(), Immediate(kSmiTagMask));
9859 deferred->Branch(zero);
9860
9861 // Check that the receiver has the expected map.
9862 // Initially, use an invalid map. The map is patched in the IC
9863 // initialization code.
9864 __ bind(deferred->patch_site());
9865 // Use masm-> here instead of the double underscore macro since extra
9866 // coverage code can interfere with the patching.
9867 masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
9868 Immediate(FACTORY->null_value()));
9869 deferred->Branch(not_equal);
9870
9871 // Check that the key is a smi.
9872 if (!key.is_smi()) {
9873 __ test(key.reg(), Immediate(kSmiTagMask));
9874 deferred->Branch(not_zero);
9875 } else {
9876 if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
9877 }
9878
9879 // Get the elements array from the receiver.
9880 __ mov(elements.reg(),
9881 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
9882 __ AssertFastElements(elements.reg());
9883
9884 // Check that the key is within bounds.
9885 __ cmp(key.reg(),
9886 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
9887 deferred->Branch(above_equal);
9888
9889 // Load and check that the result is not the hole.
9890 // Key holds a smi.
9891 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
9892 __ mov(elements.reg(),
9893 FieldOperand(elements.reg(),
9894 key.reg(),
9895 times_2,
9896 FixedArray::kHeaderSize));
9897 result = elements;
9898 __ cmp(Operand(result.reg()), Immediate(FACTORY->the_hole_value()));
9899 deferred->Branch(equal);
9900 __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(), 1);
9901
9902 deferred->BindExit();
9903 } else {
9904 Comment cmnt(masm_, "[ Load from keyed Property");
9905 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
9906 // Make sure that we do not have a test instruction after the
9907 // call. A test instruction after the call is used to
9908 // indicate that we have generated an inline version of the
9909 // keyed load. The explicit nop instruction is here because
9910 // the push that follows might be peep-hole optimized away.
9911 __ nop();
9912 }
9913 ASSERT(frame()->height() == original_height - 2);
9914 return result;
9915 }
9916
9917
9918 Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
9919 #ifdef DEBUG
9920 int original_height = frame()->height();
9921 #endif
9922 Result result;
9923 // Generate inlined version of the keyed store if the code is in a loop
9924 // and the key is likely to be a smi.
9925 if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
9926 Comment cmnt(masm(), "[ Inlined store to keyed Property");
9927
9928 // Get the receiver, key and value into registers.
9929 result = frame()->Pop();
9930 Result key = frame()->Pop();
9931 Result receiver = frame()->Pop();
9932
9933 Result tmp = allocator_->Allocate();
9934 ASSERT(tmp.is_valid());
9935 Result tmp2 = allocator_->Allocate();
9936 ASSERT(tmp2.is_valid());
9937
9938 // Determine whether the value is a constant before putting it in a
9939 // register.
9940 bool value_is_constant = result.is_constant();
9941
9942 // Make sure that value, key and receiver are in registers.
9943 result.ToRegister();
9944 key.ToRegister();
9945 receiver.ToRegister();
9946
9947 DeferredReferenceSetKeyedValue* deferred =
9948 new DeferredReferenceSetKeyedValue(result.reg(),
9949 key.reg(),
9950 receiver.reg(),
9951 tmp.reg(),
9952 strict_mode_flag());
9953
9954 // Check that the receiver is not a smi.
9955 __ test(receiver.reg(), Immediate(kSmiTagMask));
9956 deferred->Branch(zero);
9957
9958 // Check that the key is a smi.
9959 if (!key.is_smi()) {
9960 __ test(key.reg(), Immediate(kSmiTagMask));
9961 deferred->Branch(not_zero);
9962 } else {
9963 if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
9964 }
9965
9966 // Check that the receiver is a JSArray.
9967 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
9968 deferred->Branch(not_equal);
9969
9970 // Get the elements array from the receiver and check that it is not a
9971 // dictionary.
9972 __ mov(tmp.reg(),
9973 FieldOperand(receiver.reg(), JSArray::kElementsOffset));
9974
9975 // Check whether it is possible to omit the write barrier. If the elements
9976 // array is in new space or the value written is a smi we can safely update
9977 // the elements array without write barrier.
9978 Label in_new_space;
9979 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
9980 if (!value_is_constant) {
9981 __ test(result.reg(), Immediate(kSmiTagMask));
9982 deferred->Branch(not_zero);
9983 }
9984
9985 __ bind(&in_new_space);
9986 // Bind the deferred code patch site to be able to locate the fixed
9987 // array map comparison. When debugging, we patch this comparison to
9988 // always fail so that we will hit the IC call in the deferred code
9989 // which will allow the debugger to break for fast case stores.
9990 __ bind(deferred->patch_site());
9991 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
9992 Immediate(FACTORY->fixed_array_map()));
9993 deferred->Branch(not_equal);
9994
9995 // Check that the key is within bounds. Both the key and the length of
9996 // the JSArray are smis (because the fixed array check above ensures the
9997 // elements are in fast case). Use unsigned comparison to handle negative
9998 // keys.
9999 __ cmp(key.reg(),
10000 FieldOperand(receiver.reg(), JSArray::kLengthOffset));
10001 deferred->Branch(above_equal);
10002
10003 // Store the value.
10004 __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
10005 __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(), 1);
10006
10007 deferred->BindExit();
10008 } else {
10009 result = frame()->CallKeyedStoreIC(strict_mode_flag());
10010 // Make sure that we do not have a test instruction after the
10011 // call. A test instruction after the call is used to
10012 // indicate that we have generated an inline version of the
10013 // keyed store.
10014 __ nop();
10015 }
10016 ASSERT(frame()->height() == original_height - 3);
10017 return result;
10018 }
10019
10020
10021 #undef __
10022 #define __ ACCESS_MASM(masm)
10023
10024
10025 Handle<String> Reference::GetName() {
10026 ASSERT(type_ == NAMED);
10027 Property* property = expression_->AsProperty();
10028 if (property == NULL) {
10029 // Global variable reference treated as a named property reference.
10030 VariableProxy* proxy = expression_->AsVariableProxy();
10031 ASSERT(proxy->AsVariable() != NULL);
10032 ASSERT(proxy->AsVariable()->is_global());
10033 return proxy->name();
10034 } else {
10035 Literal* raw_name = property->key()->AsLiteral();
10036 ASSERT(raw_name != NULL);
10037 return Handle<String>::cast(raw_name->handle());
10038 }
10039 }
10040
10041
10042 void Reference::GetValue() {
10043 ASSERT(!cgen_->in_spilled_code());
10044 ASSERT(cgen_->HasValidEntryRegisters());
10045 ASSERT(!is_illegal());
10046 MacroAssembler* masm = cgen_->masm();
10047
10048 // Record the source position for the property load.
10049 Property* property = expression_->AsProperty();
10050 if (property != NULL) {
10051 cgen_->CodeForSourcePosition(property->position());
10052 }
10053
10054 switch (type_) {
10055 case SLOT: {
10056 Comment cmnt(masm, "[ Load from Slot");
10057 Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
10058 ASSERT(slot != NULL);
10059 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
10060 if (!persist_after_get_) set_unloaded();
10061 break;
10062 }
10063
10064 case NAMED: {
10065 Variable* var = expression_->AsVariableProxy()->AsVariable();
10066 bool is_global = var != NULL;
10067 ASSERT(!is_global || var->is_global());
10068 if (persist_after_get_) cgen_->frame()->Dup();
10069 Result result = cgen_->EmitNamedLoad(GetName(), is_global);
10070 if (!persist_after_get_) set_unloaded();
10071 cgen_->frame()->Push(&result);
10072 break;
10073 }
10074
10075 case KEYED: {
10076 if (persist_after_get_) {
10077 cgen_->frame()->PushElementAt(1);
10078 cgen_->frame()->PushElementAt(1);
10079 }
10080 Result value = cgen_->EmitKeyedLoad();
10081 cgen_->frame()->Push(&value);
10082 if (!persist_after_get_) set_unloaded();
10083 break;
10084 }
10085
10086 default:
10087 UNREACHABLE();
10088 }
10089 }
10090
10091
10092 void Reference::TakeValue() {
10093 // For non-constant frame-allocated slots, we invalidate the value in the
10094 // slot. For all others, we fall back on GetValue.
10095 ASSERT(!cgen_->in_spilled_code());
10096 ASSERT(!is_illegal());
10097 if (type_ != SLOT) {
10098 GetValue();
10099 return;
10100 }
10101
10102 Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
10103 ASSERT(slot != NULL);
10104 if (slot->type() == Slot::LOOKUP ||
10105 slot->type() == Slot::CONTEXT ||
10106 slot->var()->mode() == Variable::CONST ||
10107 slot->is_arguments()) {
10108 GetValue();
10109 return;
10110 }
10111
10112 // Only non-constant, frame-allocated parameters and locals can
10113 // reach here. Be careful not to use the optimizations for arguments
10114 // object access since it may not have been initialized yet.
10115 ASSERT(!slot->is_arguments());
10116 if (slot->type() == Slot::PARAMETER) {
10117 cgen_->frame()->TakeParameterAt(slot->index());
10118 } else {
10119 ASSERT(slot->type() == Slot::LOCAL);
10120 cgen_->frame()->TakeLocalAt(slot->index());
10121 }
10122
10123 ASSERT(persist_after_get_);
10124 // Do not unload the reference, because it is used in SetValue.
10125 }
10126
10127
10128 void Reference::SetValue(InitState init_state) {
10129 ASSERT(cgen_->HasValidEntryRegisters());
10130 ASSERT(!is_illegal());
10131 MacroAssembler* masm = cgen_->masm();
10132 switch (type_) {
10133 case SLOT: {
10134 Comment cmnt(masm, "[ Store to Slot");
10135 Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
10136 ASSERT(slot != NULL);
10137 cgen_->StoreToSlot(slot, init_state);
10138 set_unloaded();
10139 break;
10140 }
10141
10142 case NAMED: {
10143 Comment cmnt(masm, "[ Store to named Property");
10144 Result answer = cgen_->EmitNamedStore(GetName(), false);
10145 cgen_->frame()->Push(&answer);
10146 set_unloaded();
10147 break;
10148 }
10149
10150 case KEYED: {
10151 Comment cmnt(masm, "[ Store to keyed Property");
10152 Property* property = expression()->AsProperty();
10153 ASSERT(property != NULL);
10154
10155 Result answer = cgen_->EmitKeyedStore(property->key()->type());
10156 cgen_->frame()->Push(&answer);
10157 set_unloaded();
10158 break;
10159 }
10160
10161 case UNLOADED:
10162 case ILLEGAL:
10163 UNREACHABLE();
10164 }
10165 }
10166
10167
10168 #undef __
10169
10170 #define __ masm. 51 #define __ masm.
10171 52
10172
10173 static void MemCopyWrapper(void* dest, const void* src, size_t size) { 53 static void MemCopyWrapper(void* dest, const void* src, size_t size) {
10174 memcpy(dest, src, size); 54 memcpy(dest, src, size);
10175 } 55 }
10176 56
10177 57
10178 OS::MemCopyFunction CreateMemCopyFunction() { 58 OS::MemCopyFunction CreateMemCopyFunction() {
10179 size_t actual_size; 59 size_t actual_size;
10180 // Allocate buffer in executable space. 60 // Allocate buffer in executable space.
10181 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, 61 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
10182 &actual_size, 62 &actual_size,
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after
10376 256
10377 CPU::FlushICache(buffer, actual_size); 257 CPU::FlushICache(buffer, actual_size);
10378 return FUNCTION_CAST<OS::MemCopyFunction>(buffer); 258 return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
10379 } 259 }
10380 260
10381 #undef __ 261 #undef __
10382 262
10383 } } // namespace v8::internal 263 } } // namespace v8::internal
10384 264
10385 #endif // V8_TARGET_ARCH_IA32 265 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/codegen-ia32.h ('k') | src/ia32/codegen-ia32-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698