Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(77)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 6811012: Remove some dead code. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/codegen-arm-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 10 matching lines...) Expand all
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_ARM) 30 #if defined(V8_TARGET_ARCH_ARM)
31 31
32 #include "bootstrapper.h" 32 #include "codegen.h"
33 #include "code-stubs.h"
34 #include "codegen-inl.h"
35 #include "compiler.h"
36 #include "debug.h"
37 #include "ic-inl.h"
38 #include "jsregexp.h"
39 #include "jump-target-inl.h"
40 #include "parser.h"
41 #include "regexp-macro-assembler.h"
42 #include "regexp-stack.h"
43 #include "register-allocator-inl.h"
44 #include "runtime.h"
45 #include "scopes.h"
46 #include "stub-cache.h"
47 #include "virtual-frame-inl.h"
48 #include "virtual-frame-arm-inl.h"
49 33
50 namespace v8 { 34 namespace v8 {
51 namespace internal { 35 namespace internal {
52 36
53
54 #define __ ACCESS_MASM(masm_)
55
56 // -------------------------------------------------------------------------
57 // Platform-specific DeferredCode functions.
58
59 void DeferredCode::SaveRegisters() {
60 // On ARM you either have a completely spilled frame or you
61 // handle it yourself, but at the moment there's no automation
62 // of registers and deferred code.
63 }
64
65
66 void DeferredCode::RestoreRegisters() {
67 }
68
69
70 // ------------------------------------------------------------------------- 37 // -------------------------------------------------------------------------
71 // Platform-specific RuntimeCallHelper functions. 38 // Platform-specific RuntimeCallHelper functions.
72 39
73 void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
74 frame_state_->frame()->AssertIsSpilled();
75 }
76
77
78 void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
79 }
80
81
82 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 40 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
83 masm->EnterInternalFrame(); 41 masm->EnterInternalFrame();
84 } 42 }
85 43
86 44
87 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 45 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
88 masm->LeaveInternalFrame(); 46 masm->LeaveInternalFrame();
89 } 47 }
90 48
91 49
92 // -------------------------------------------------------------------------
93 // CodeGenState implementation.
94
95 CodeGenState::CodeGenState(CodeGenerator* owner)
96 : owner_(owner),
97 previous_(owner->state()) {
98 owner->set_state(this);
99 }
100
101
102 ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
103 JumpTarget* true_target,
104 JumpTarget* false_target)
105 : CodeGenState(owner),
106 true_target_(true_target),
107 false_target_(false_target) {
108 owner->set_state(this);
109 }
110
111
112 TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
113 Slot* slot,
114 TypeInfo type_info)
115 : CodeGenState(owner),
116 slot_(slot) {
117 owner->set_state(this);
118 old_type_info_ = owner->set_type_info(slot, type_info);
119 }
120
121
122 CodeGenState::~CodeGenState() {
123 ASSERT(owner_->state() == this);
124 owner_->set_state(previous_);
125 }
126
127
128 TypeInfoCodeGenState::~TypeInfoCodeGenState() {
129 owner()->set_type_info(slot_, old_type_info_);
130 }
131
132 // -------------------------------------------------------------------------
133 // CodeGenerator implementation
134
135 CodeGenerator::CodeGenerator(MacroAssembler* masm)
136 : deferred_(8),
137 masm_(masm),
138 info_(NULL),
139 frame_(NULL),
140 allocator_(NULL),
141 cc_reg_(al),
142 state_(NULL),
143 loop_nesting_(0),
144 type_info_(NULL),
145 function_return_(JumpTarget::BIDIRECTIONAL),
146 function_return_is_shadowed_(false) {
147 }
148
149
150 // Calling conventions:
151 // fp: caller's frame pointer
152 // sp: stack pointer
153 // r1: called JS function
154 // cp: callee's context
155
156 void CodeGenerator::Generate(CompilationInfo* info) {
157 // Record the position for debugging purposes.
158 CodeForFunctionPosition(info->function());
159 Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
160
161 // Initialize state.
162 info_ = info;
163
164 int slots = scope()->num_parameters() + scope()->num_stack_slots();
165 ScopedVector<TypeInfo> type_info_array(slots);
166 for (int i = 0; i < slots; i++) {
167 type_info_array[i] = TypeInfo::Unknown();
168 }
169 type_info_ = &type_info_array;
170
171 ASSERT(allocator_ == NULL);
172 RegisterAllocator register_allocator(this);
173 allocator_ = &register_allocator;
174 ASSERT(frame_ == NULL);
175 frame_ = new VirtualFrame();
176 cc_reg_ = al;
177
178 // Adjust for function-level loop nesting.
179 ASSERT_EQ(0, loop_nesting_);
180 loop_nesting_ = info->is_in_loop() ? 1 : 0;
181
182 {
183 CodeGenState state(this);
184
185 // Entry:
186 // Stack: receiver, arguments
187 // lr: return address
188 // fp: caller's frame pointer
189 // sp: stack pointer
190 // r1: called JS function
191 // cp: callee's context
192 allocator_->Initialize();
193
194 #ifdef DEBUG
195 if (strlen(FLAG_stop_at) > 0 &&
196 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
197 frame_->SpillAll();
198 __ stop("stop-at");
199 }
200 #endif
201
202 frame_->Enter();
203 // tos: code slot
204
205 // Allocate space for locals and initialize them. This also checks
206 // for stack overflow.
207 frame_->AllocateStackSlots();
208
209 frame_->AssertIsSpilled();
210 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
211 if (heap_slots > 0) {
212 // Allocate local context.
213 // Get outer context and create a new context based on it.
214 __ ldr(r0, frame_->Function());
215 frame_->EmitPush(r0);
216 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
217 FastNewContextStub stub(heap_slots);
218 frame_->CallStub(&stub, 1);
219 } else {
220 frame_->CallRuntime(Runtime::kNewContext, 1);
221 }
222
223 #ifdef DEBUG
224 JumpTarget verified_true;
225 __ cmp(r0, cp);
226 verified_true.Branch(eq);
227 __ stop("NewContext: r0 is expected to be the same as cp");
228 verified_true.Bind();
229 #endif
230 // Update context local.
231 __ str(cp, frame_->Context());
232 }
233
234 // TODO(1241774): Improve this code:
235 // 1) only needed if we have a context
236 // 2) no need to recompute context ptr every single time
237 // 3) don't copy parameter operand code from SlotOperand!
238 {
239 Comment cmnt2(masm_, "[ copy context parameters into .context");
240 // Note that iteration order is relevant here! If we have the same
241 // parameter twice (e.g., function (x, y, x)), and that parameter
242 // needs to be copied into the context, it must be the last argument
243 // passed to the parameter that needs to be copied. This is a rare
244 // case so we don't check for it, instead we rely on the copying
245 // order: such a parameter is copied repeatedly into the same
246 // context location and thus the last value is what is seen inside
247 // the function.
248 frame_->AssertIsSpilled();
249 for (int i = 0; i < scope()->num_parameters(); i++) {
250 Variable* par = scope()->parameter(i);
251 Slot* slot = par->AsSlot();
252 if (slot != NULL && slot->type() == Slot::CONTEXT) {
253 ASSERT(!scope()->is_global_scope()); // No params in global scope.
254 __ ldr(r1, frame_->ParameterAt(i));
255 // Loads r2 with context; used below in RecordWrite.
256 __ str(r1, SlotOperand(slot, r2));
257 // Load the offset into r3.
258 int slot_offset =
259 FixedArray::kHeaderSize + slot->index() * kPointerSize;
260 __ RecordWrite(r2, Operand(slot_offset), r3, r1);
261 }
262 }
263 }
264
265 // Store the arguments object. This must happen after context
266 // initialization because the arguments object may be stored in
267 // the context.
268 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
269 StoreArgumentsObject(true);
270 }
271
272 // Initialize ThisFunction reference if present.
273 if (scope()->is_function_scope() && scope()->function() != NULL) {
274 frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
275 StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
276 }
277
278 // Initialize the function return target after the locals are set
279 // up, because it needs the expected frame height from the frame.
280 function_return_.SetExpectedHeight();
281 function_return_is_shadowed_ = false;
282
283 // Generate code to 'execute' declarations and initialize functions
284 // (source elements). In case of an illegal redeclaration we need to
285 // handle that instead of processing the declarations.
286 if (scope()->HasIllegalRedeclaration()) {
287 Comment cmnt(masm_, "[ illegal redeclarations");
288 scope()->VisitIllegalRedeclaration(this);
289 } else {
290 Comment cmnt(masm_, "[ declarations");
291 ProcessDeclarations(scope()->declarations());
292 // Bail out if a stack-overflow exception occurred when processing
293 // declarations.
294 if (HasStackOverflow()) return;
295 }
296
297 if (FLAG_trace) {
298 frame_->CallRuntime(Runtime::kTraceEnter, 0);
299 // Ignore the return value.
300 }
301
302 // Compile the body of the function in a vanilla state. Don't
303 // bother compiling all the code if the scope has an illegal
304 // redeclaration.
305 if (!scope()->HasIllegalRedeclaration()) {
306 Comment cmnt(masm_, "[ function body");
307 #ifdef DEBUG
308 bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
309 bool should_trace =
310 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
311 if (should_trace) {
312 frame_->CallRuntime(Runtime::kDebugTrace, 0);
313 // Ignore the return value.
314 }
315 #endif
316 VisitStatements(info->function()->body());
317 }
318 }
319
320 // Handle the return from the function.
321 if (has_valid_frame()) {
322 // If there is a valid frame, control flow can fall off the end of
323 // the body. In that case there is an implicit return statement.
324 ASSERT(!function_return_is_shadowed_);
325 frame_->PrepareForReturn();
326 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
327 if (function_return_.is_bound()) {
328 function_return_.Jump();
329 } else {
330 function_return_.Bind();
331 GenerateReturnSequence();
332 }
333 } else if (function_return_.is_linked()) {
334 // If the return target has dangling jumps to it, then we have not
335 // yet generated the return sequence. This can happen when (a)
336 // control does not flow off the end of the body so we did not
337 // compile an artificial return statement just above, and (b) there
338 // are return statements in the body but (c) they are all shadowed.
339 function_return_.Bind();
340 GenerateReturnSequence();
341 }
342
343 // Adjust for function-level loop nesting.
344 ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0);
345 loop_nesting_ = 0;
346
347 // Code generation state must be reset.
348 ASSERT(!has_cc());
349 ASSERT(state_ == NULL);
350 ASSERT(loop_nesting() == 0);
351 ASSERT(!function_return_is_shadowed_);
352 function_return_.Unuse();
353 DeleteFrame();
354
355 // Process any deferred code using the register allocator.
356 if (!HasStackOverflow()) {
357 ProcessDeferred();
358 }
359
360 allocator_ = NULL;
361 type_info_ = NULL;
362 }
363
364
365 int CodeGenerator::NumberOfSlot(Slot* slot) {
366 if (slot == NULL) return kInvalidSlotNumber;
367 switch (slot->type()) {
368 case Slot::PARAMETER:
369 return slot->index();
370 case Slot::LOCAL:
371 return slot->index() + scope()->num_parameters();
372 default:
373 break;
374 }
375 return kInvalidSlotNumber;
376 }
377
378
379 MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
380 // Currently, this assertion will fail if we try to assign to
381 // a constant variable that is constant because it is read-only
382 // (such as the variable referring to a named function expression).
383 // We need to implement assignments to read-only variables.
384 // Ideally, we should do this during AST generation (by converting
385 // such assignments into expression statements); however, in general
386 // we may not be able to make the decision until past AST generation,
387 // that is when the entire program is known.
388 ASSERT(slot != NULL);
389 int index = slot->index();
390 switch (slot->type()) {
391 case Slot::PARAMETER:
392 return frame_->ParameterAt(index);
393
394 case Slot::LOCAL:
395 return frame_->LocalAt(index);
396
397 case Slot::CONTEXT: {
398 // Follow the context chain if necessary.
399 ASSERT(!tmp.is(cp)); // do not overwrite context register
400 Register context = cp;
401 int chain_length = scope()->ContextChainLength(slot->var()->scope());
402 for (int i = 0; i < chain_length; i++) {
403 // Load the closure.
404 // (All contexts, even 'with' contexts, have a closure,
405 // and it is the same for all contexts inside a function.
406 // There is no need to go to the function context first.)
407 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
408 // Load the function context (which is the incoming, outer context).
409 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
410 context = tmp;
411 }
412 // We may have a 'with' context now. Get the function context.
413 // (In fact this mov may never be the needed, since the scope analysis
414 // may not permit a direct context access in this case and thus we are
415 // always at a function context. However it is safe to dereference be-
416 // cause the function context of a function context is itself. Before
417 // deleting this mov we should try to create a counter-example first,
418 // though...)
419 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
420 return ContextOperand(tmp, index);
421 }
422
423 default:
424 UNREACHABLE();
425 return MemOperand(r0, 0);
426 }
427 }
428
429
430 MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
431 Slot* slot,
432 Register tmp,
433 Register tmp2,
434 JumpTarget* slow) {
435 ASSERT(slot->type() == Slot::CONTEXT);
436 Register context = cp;
437
438 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
439 if (s->num_heap_slots() > 0) {
440 if (s->calls_eval()) {
441 // Check that extension is NULL.
442 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
443 __ tst(tmp2, tmp2);
444 slow->Branch(ne);
445 }
446 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
447 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
448 context = tmp;
449 }
450 }
451 // Check that last extension is NULL.
452 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
453 __ tst(tmp2, tmp2);
454 slow->Branch(ne);
455 __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
456 return ContextOperand(tmp, slot->index());
457 }
458
459
460 // Loads a value on TOS. If it is a boolean value, the result may have been
461 // (partially) translated into branches, or it may have set the condition
462 // code register. If force_cc is set, the value is forced to set the
463 // condition code register and no value is pushed. If the condition code
464 // register was set, has_cc() is true and cc_reg_ contains the condition to
465 // test for 'true'.
466 void CodeGenerator::LoadCondition(Expression* x,
467 JumpTarget* true_target,
468 JumpTarget* false_target,
469 bool force_cc) {
470 ASSERT(!has_cc());
471 int original_height = frame_->height();
472
473 { ConditionCodeGenState new_state(this, true_target, false_target);
474 Visit(x);
475
476 // If we hit a stack overflow, we may not have actually visited
477 // the expression. In that case, we ensure that we have a
478 // valid-looking frame state because we will continue to generate
479 // code as we unwind the C++ stack.
480 //
481 // It's possible to have both a stack overflow and a valid frame
482 // state (eg, a subexpression overflowed, visiting it returned
483 // with a dummied frame state, and visiting this expression
484 // returned with a normal-looking state).
485 if (HasStackOverflow() &&
486 has_valid_frame() &&
487 !has_cc() &&
488 frame_->height() == original_height) {
489 true_target->Jump();
490 }
491 }
492 if (force_cc && frame_ != NULL && !has_cc()) {
493 // Convert the TOS value to a boolean in the condition code register.
494 ToBoolean(true_target, false_target);
495 }
496 ASSERT(!force_cc || !has_valid_frame() || has_cc());
497 ASSERT(!has_valid_frame() ||
498 (has_cc() && frame_->height() == original_height) ||
499 (!has_cc() && frame_->height() == original_height + 1));
500 }
501
502
503 void CodeGenerator::Load(Expression* expr) {
504 // We generally assume that we are not in a spilled scope for most
505 // of the code generator. A failure to ensure this caused issue 815
506 // and this assert is designed to catch similar issues.
507 frame_->AssertIsNotSpilled();
508 #ifdef DEBUG
509 int original_height = frame_->height();
510 #endif
511 JumpTarget true_target;
512 JumpTarget false_target;
513 LoadCondition(expr, &true_target, &false_target, false);
514
515 if (has_cc()) {
516 // Convert cc_reg_ into a boolean value.
517 JumpTarget loaded;
518 JumpTarget materialize_true;
519 materialize_true.Branch(cc_reg_);
520 frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
521 loaded.Jump();
522 materialize_true.Bind();
523 frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
524 loaded.Bind();
525 cc_reg_ = al;
526 }
527
528 if (true_target.is_linked() || false_target.is_linked()) {
529 // We have at least one condition value that has been "translated"
530 // into a branch, thus it needs to be loaded explicitly.
531 JumpTarget loaded;
532 if (frame_ != NULL) {
533 loaded.Jump(); // Don't lose the current TOS.
534 }
535 bool both = true_target.is_linked() && false_target.is_linked();
536 // Load "true" if necessary.
537 if (true_target.is_linked()) {
538 true_target.Bind();
539 frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
540 }
541 // If both "true" and "false" need to be loaded jump across the code for
542 // "false".
543 if (both) {
544 loaded.Jump();
545 }
546 // Load "false" if necessary.
547 if (false_target.is_linked()) {
548 false_target.Bind();
549 frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
550 }
551 // A value is loaded on all paths reaching this point.
552 loaded.Bind();
553 }
554 ASSERT(has_valid_frame());
555 ASSERT(!has_cc());
556 ASSERT_EQ(original_height + 1, frame_->height());
557 }
558
559
560 void CodeGenerator::LoadGlobal() {
561 Register reg = frame_->GetTOSRegister();
562 __ ldr(reg, GlobalObjectOperand());
563 frame_->EmitPush(reg);
564 }
565
566
567 void CodeGenerator::LoadGlobalReceiver(Register scratch) {
568 Register reg = frame_->GetTOSRegister();
569 __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
570 __ ldr(reg,
571 FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
572 frame_->EmitPush(reg);
573 }
574
575
576 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
577 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
578
579 // In strict mode there is no need for shadow arguments.
580 ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
581 // We don't want to do lazy arguments allocation for functions that
582 // have heap-allocated contexts, because it interfers with the
583 // uninitialized const tracking in the context objects.
584 return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
585 ? EAGER_ARGUMENTS_ALLOCATION
586 : LAZY_ARGUMENTS_ALLOCATION;
587 }
588
589
590 void CodeGenerator::StoreArgumentsObject(bool initial) {
591 ArgumentsAllocationMode mode = ArgumentsMode();
592 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
593
594 Comment cmnt(masm_, "[ store arguments object");
595 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
596 // When using lazy arguments allocation, we store the hole value
597 // as a sentinel indicating that the arguments object hasn't been
598 // allocated yet.
599 frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
600 } else {
601 frame_->SpillAll();
602 ArgumentsAccessStub stub(is_strict_mode()
603 ? ArgumentsAccessStub::NEW_STRICT
604 : ArgumentsAccessStub::NEW_NON_STRICT);
605 __ ldr(r2, frame_->Function());
606 // The receiver is below the arguments, the return address, and the
607 // frame pointer on the stack.
608 const int kReceiverDisplacement = 2 + scope()->num_parameters();
609 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
610 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
611 frame_->Adjust(3);
612 __ Push(r2, r1, r0);
613 frame_->CallStub(&stub, 3);
614 frame_->EmitPush(r0);
615 }
616
617 Variable* arguments = scope()->arguments();
618 Variable* shadow = scope()->arguments_shadow();
619 ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
620 ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
621 scope()->is_strict_mode());
622
623 JumpTarget done;
624 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
625 // We have to skip storing into the arguments slot if it has
626 // already been written to. This can happen if the a function
627 // has a local variable named 'arguments'.
628 LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
629 Register arguments = frame_->PopToRegister();
630 __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
631 __ cmp(arguments, ip);
632 done.Branch(ne);
633 }
634 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
635 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
636 if (shadow != NULL) {
637 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
638 }
639 }
640
641
642 void CodeGenerator::LoadTypeofExpression(Expression* expr) {
643 // Special handling of identifiers as subexpressions of typeof.
644 Variable* variable = expr->AsVariableProxy()->AsVariable();
645 if (variable != NULL && !variable->is_this() && variable->is_global()) {
646 // For a global variable we build the property reference
647 // <global>.<variable> and perform a (regular non-contextual) property
648 // load to make sure we do not get reference errors.
649 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
650 Literal key(variable->name());
651 Property property(&global, &key, RelocInfo::kNoPosition);
652 Reference ref(this, &property);
653 ref.GetValue();
654 } else if (variable != NULL && variable->AsSlot() != NULL) {
655 // For a variable that rewrites to a slot, we signal it is the immediate
656 // subexpression of a typeof.
657 LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
658 } else {
659 // Anything else can be handled normally.
660 Load(expr);
661 }
662 }
663
664
665 Reference::Reference(CodeGenerator* cgen,
666 Expression* expression,
667 bool persist_after_get)
668 : cgen_(cgen),
669 expression_(expression),
670 type_(ILLEGAL),
671 persist_after_get_(persist_after_get) {
672 // We generally assume that we are not in a spilled scope for most
673 // of the code generator. A failure to ensure this caused issue 815
674 // and this assert is designed to catch similar issues.
675 cgen->frame()->AssertIsNotSpilled();
676 cgen->LoadReference(this);
677 }
678
679
680 Reference::~Reference() {
681 ASSERT(is_unloaded() || is_illegal());
682 }
683
684
685 void CodeGenerator::LoadReference(Reference* ref) {
686 Comment cmnt(masm_, "[ LoadReference");
687 Expression* e = ref->expression();
688 Property* property = e->AsProperty();
689 Variable* var = e->AsVariableProxy()->AsVariable();
690
691 if (property != NULL) {
692 // The expression is either a property or a variable proxy that rewrites
693 // to a property.
694 Load(property->obj());
695 if (property->key()->IsPropertyName()) {
696 ref->set_type(Reference::NAMED);
697 } else {
698 Load(property->key());
699 ref->set_type(Reference::KEYED);
700 }
701 } else if (var != NULL) {
702 // The expression is a variable proxy that does not rewrite to a
703 // property. Global variables are treated as named property references.
704 if (var->is_global()) {
705 LoadGlobal();
706 ref->set_type(Reference::NAMED);
707 } else {
708 ASSERT(var->AsSlot() != NULL);
709 ref->set_type(Reference::SLOT);
710 }
711 } else {
712 // Anything else is a runtime error.
713 Load(e);
714 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
715 }
716 }
717
718
719 void CodeGenerator::UnloadReference(Reference* ref) {
720 int size = ref->size();
721 ref->set_unloaded();
722 if (size == 0) return;
723
724 // Pop a reference from the stack while preserving TOS.
725 VirtualFrame::RegisterAllocationScope scope(this);
726 Comment cmnt(masm_, "[ UnloadReference");
727 if (size > 0) {
728 Register tos = frame_->PopToRegister();
729 frame_->Drop(size);
730 frame_->EmitPush(tos);
731 }
732 }
733
734
735 // ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
736 // register to a boolean in the condition code register. The code
737 // may jump to 'false_target' in case the register converts to 'false'.
738 void CodeGenerator::ToBoolean(JumpTarget* true_target,
739 JumpTarget* false_target) {
740 // Note: The generated code snippet does not change stack variables.
741 // Only the condition code should be set.
742 bool known_smi = frame_->KnownSmiAt(0);
743 Register tos = frame_->PopToRegister();
744
745 // Fast case checks
746
747 // Check if the value is 'false'.
748 if (!known_smi) {
749 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
750 __ cmp(tos, ip);
751 false_target->Branch(eq);
752
753 // Check if the value is 'true'.
754 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
755 __ cmp(tos, ip);
756 true_target->Branch(eq);
757
758 // Check if the value is 'undefined'.
759 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
760 __ cmp(tos, ip);
761 false_target->Branch(eq);
762 }
763
764 // Check if the value is a smi.
765 __ cmp(tos, Operand(Smi::FromInt(0)));
766
767 if (!known_smi) {
768 false_target->Branch(eq);
769 __ tst(tos, Operand(kSmiTagMask));
770 true_target->Branch(eq);
771
772 // Slow case.
773 if (CpuFeatures::IsSupported(VFP3)) {
774 CpuFeatures::Scope scope(VFP3);
775 // Implements the slow case by using ToBooleanStub.
776 // The ToBooleanStub takes a single argument, and
777 // returns a non-zero value for true, or zero for false.
778 // Both the argument value and the return value use the
779 // register assigned to tos_
780 ToBooleanStub stub(tos);
781 frame_->CallStub(&stub, 0);
782 // Convert the result in "tos" to a condition code.
783 __ cmp(tos, Operand(0, RelocInfo::NONE));
784 } else {
785 // Implements slow case by calling the runtime.
786 frame_->EmitPush(tos);
787 frame_->CallRuntime(Runtime::kToBool, 1);
788 // Convert the result (r0) to a condition code.
789 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
790 __ cmp(r0, ip);
791 }
792 }
793
794 cc_reg_ = ne;
795 }
796
797
798 void CodeGenerator::GenericBinaryOperation(Token::Value op,
799 OverwriteMode overwrite_mode,
800 GenerateInlineSmi inline_smi,
801 int constant_rhs) {
802 // top of virtual frame: y
803 // 2nd elt. on virtual frame : x
804 // result : top of virtual frame
805
806 // Stub is entered with a call: 'return address' is in lr.
807 switch (op) {
808 case Token::ADD:
809 case Token::SUB:
810 if (inline_smi) {
811 JumpTarget done;
812 Register rhs = frame_->PopToRegister();
813 Register lhs = frame_->PopToRegister(rhs);
814 Register scratch = VirtualFrame::scratch0();
815 __ orr(scratch, rhs, Operand(lhs));
816 // Check they are both small and positive.
817 __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
818 ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
819 STATIC_ASSERT(kSmiTag == 0);
820 if (op == Token::ADD) {
821 __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
822 } else {
823 __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
824 }
825 done.Branch(eq);
826 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
827 frame_->SpillAll();
828 frame_->CallStub(&stub, 0);
829 done.Bind();
830 frame_->EmitPush(r0);
831 break;
832 } else {
833 // Fall through!
834 }
835 case Token::BIT_OR:
836 case Token::BIT_AND:
837 case Token::BIT_XOR:
838 if (inline_smi) {
839 bool rhs_is_smi = frame_->KnownSmiAt(0);
840 bool lhs_is_smi = frame_->KnownSmiAt(1);
841 Register rhs = frame_->PopToRegister();
842 Register lhs = frame_->PopToRegister(rhs);
843 Register smi_test_reg;
844 Condition cond;
845 if (!rhs_is_smi || !lhs_is_smi) {
846 if (rhs_is_smi) {
847 smi_test_reg = lhs;
848 } else if (lhs_is_smi) {
849 smi_test_reg = rhs;
850 } else {
851 smi_test_reg = VirtualFrame::scratch0();
852 __ orr(smi_test_reg, rhs, Operand(lhs));
853 }
854 // Check they are both Smis.
855 __ tst(smi_test_reg, Operand(kSmiTagMask));
856 cond = eq;
857 } else {
858 cond = al;
859 }
860 ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
861 if (op == Token::BIT_OR) {
862 __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
863 } else if (op == Token::BIT_AND) {
864 __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
865 } else {
866 ASSERT(op == Token::BIT_XOR);
867 STATIC_ASSERT(kSmiTag == 0);
868 __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
869 }
870 if (cond != al) {
871 JumpTarget done;
872 done.Branch(cond);
873 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
874 frame_->SpillAll();
875 frame_->CallStub(&stub, 0);
876 done.Bind();
877 }
878 frame_->EmitPush(r0);
879 break;
880 } else {
881 // Fall through!
882 }
883 case Token::MUL:
884 case Token::DIV:
885 case Token::MOD:
886 case Token::SHL:
887 case Token::SHR:
888 case Token::SAR: {
889 Register rhs = frame_->PopToRegister();
890 Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
891 GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
892 frame_->SpillAll();
893 frame_->CallStub(&stub, 0);
894 frame_->EmitPush(r0);
895 break;
896 }
897
898 case Token::COMMA: {
899 Register scratch = frame_->PopToRegister();
900 // Simply discard left value.
901 frame_->Drop();
902 frame_->EmitPush(scratch);
903 break;
904 }
905
906 default:
907 // Other cases should have been handled before this point.
908 UNREACHABLE();
909 break;
910 }
911 }
912
913
914 class DeferredInlineSmiOperation: public DeferredCode {
915 public:
916 DeferredInlineSmiOperation(Token::Value op,
917 int value,
918 bool reversed,
919 OverwriteMode overwrite_mode,
920 Register tos)
921 : op_(op),
922 value_(value),
923 reversed_(reversed),
924 overwrite_mode_(overwrite_mode),
925 tos_register_(tos) {
926 set_comment("[ DeferredInlinedSmiOperation");
927 }
928
929 virtual void Generate();
930 // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
931 // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
932 // methods, it is the responsibility of the deferred code to save and restore
933 // registers.
934 virtual bool AutoSaveAndRestore() { return false; }
935
936 void JumpToNonSmiInput(Condition cond);
937 void JumpToAnswerOutOfRange(Condition cond);
938
939 private:
940 void GenerateNonSmiInput();
941 void GenerateAnswerOutOfRange();
942 void WriteNonSmiAnswer(Register answer,
943 Register heap_number,
944 Register scratch);
945
946 Token::Value op_;
947 int value_;
948 bool reversed_;
949 OverwriteMode overwrite_mode_;
950 Register tos_register_;
951 Label non_smi_input_;
952 Label answer_out_of_range_;
953 };
954
955
956 // For bit operations we try harder and handle the case where the input is not
957 // a Smi but a 32bits integer without calling the generic stub.
958 void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
959 ASSERT(Token::IsBitOp(op_));
960
961 __ b(cond, &non_smi_input_);
962 }
963
964
965 // For bit operations the result is always 32bits so we handle the case where
966 // the result does not fit in a Smi without calling the generic stub.
967 void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
968 ASSERT(Token::IsBitOp(op_));
969
970 if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
971 // >>> requires an unsigned to double conversion and the non VFP code
972 // does not support this conversion.
973 __ b(cond, entry_label());
974 } else {
975 __ b(cond, &answer_out_of_range_);
976 }
977 }
978
979
980 // On entry the non-constant side of the binary operation is in tos_register_
981 // and the constant smi side is nowhere. The tos_register_ is not used by the
982 // virtual frame. On exit the answer is in the tos_register_ and the virtual
983 // frame is unchanged.
984 void DeferredInlineSmiOperation::Generate() {
985 VirtualFrame copied_frame(*frame_state()->frame());
986 copied_frame.SpillAll();
987
988 Register lhs = r1;
989 Register rhs = r0;
990 switch (op_) {
991 case Token::ADD: {
992 // Revert optimistic add.
993 if (reversed_) {
994 __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
995 __ mov(r1, Operand(Smi::FromInt(value_)));
996 } else {
997 __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
998 __ mov(r0, Operand(Smi::FromInt(value_)));
999 }
1000 break;
1001 }
1002
1003 case Token::SUB: {
1004 // Revert optimistic sub.
1005 if (reversed_) {
1006 __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
1007 __ mov(r1, Operand(Smi::FromInt(value_)));
1008 } else {
1009 __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
1010 __ mov(r0, Operand(Smi::FromInt(value_)));
1011 }
1012 break;
1013 }
1014
1015 // For these operations there is no optimistic operation that needs to be
1016 // reverted.
1017 case Token::MUL:
1018 case Token::MOD:
1019 case Token::BIT_OR:
1020 case Token::BIT_XOR:
1021 case Token::BIT_AND:
1022 case Token::SHL:
1023 case Token::SHR:
1024 case Token::SAR: {
1025 if (tos_register_.is(r1)) {
1026 __ mov(r0, Operand(Smi::FromInt(value_)));
1027 } else {
1028 ASSERT(tos_register_.is(r0));
1029 __ mov(r1, Operand(Smi::FromInt(value_)));
1030 }
1031 if (reversed_ == tos_register_.is(r1)) {
1032 lhs = r0;
1033 rhs = r1;
1034 }
1035 break;
1036 }
1037
1038 default:
1039 // Other cases should have been handled before this point.
1040 UNREACHABLE();
1041 break;
1042 }
1043
1044 GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
1045 __ CallStub(&stub);
1046
1047 // The generic stub returns its value in r0, but that's not
1048 // necessarily what we want. We want whatever the inlined code
1049 // expected, which is that the answer is in the same register as
1050 // the operand was.
1051 __ Move(tos_register_, r0);
1052
1053 // The tos register was not in use for the virtual frame that we
1054 // came into this function with, so we can merge back to that frame
1055 // without trashing it.
1056 copied_frame.MergeTo(frame_state()->frame());
1057
1058 Exit();
1059
1060 if (non_smi_input_.is_linked()) {
1061 GenerateNonSmiInput();
1062 }
1063
1064 if (answer_out_of_range_.is_linked()) {
1065 GenerateAnswerOutOfRange();
1066 }
1067 }
1068
1069
1070 // Convert and write the integer answer into heap_number.
1071 void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
1072 Register heap_number,
1073 Register scratch) {
1074 if (CpuFeatures::IsSupported(VFP3)) {
1075 CpuFeatures::Scope scope(VFP3);
1076 __ vmov(s0, answer);
1077 if (op_ == Token::SHR) {
1078 __ vcvt_f64_u32(d0, s0);
1079 } else {
1080 __ vcvt_f64_s32(d0, s0);
1081 }
1082 __ sub(scratch, heap_number, Operand(kHeapObjectTag));
1083 __ vstr(d0, scratch, HeapNumber::kValueOffset);
1084 } else {
1085 WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
1086 __ CallStub(&stub);
1087 }
1088 }
1089
1090
1091 void DeferredInlineSmiOperation::GenerateNonSmiInput() {
1092 // We know the left hand side is not a Smi and the right hand side is an
1093 // immediate value (value_) which can be represented as a Smi. We only
1094 // handle bit operations.
1095 ASSERT(Token::IsBitOp(op_));
1096
1097 if (FLAG_debug_code) {
1098 __ Abort("Should not fall through!");
1099 }
1100
1101 __ bind(&non_smi_input_);
1102 if (FLAG_debug_code) {
1103 __ AbortIfSmi(tos_register_);
1104 }
1105
1106 // This routine uses the registers from r2 to r6. At the moment they are
1107 // not used by the register allocator, but when they are it should use
1108 // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
1109
1110 Register heap_number_map = r7;
1111 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1112 __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
1113 __ cmp(r3, heap_number_map);
1114 // Not a number, fall back to the GenericBinaryOpStub.
1115 __ b(ne, entry_label());
1116
1117 Register int32 = r2;
1118 // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
1119 __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
1120
1121 // tos_register_ (r0 or r1): Original heap number.
1122 // int32: signed 32bits int.
1123
1124 Label result_not_a_smi;
1125 int shift_value = value_ & 0x1f;
1126 switch (op_) {
1127 case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break;
1128 case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
1129 case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
1130 case Token::SAR:
1131 ASSERT(!reversed_);
1132 if (shift_value != 0) {
1133 __ mov(int32, Operand(int32, ASR, shift_value));
1134 }
1135 break;
1136 case Token::SHR:
1137 ASSERT(!reversed_);
1138 if (shift_value != 0) {
1139 __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
1140 } else {
1141 // SHR is special because it is required to produce a positive answer.
1142 __ cmp(int32, Operand(0, RelocInfo::NONE));
1143 }
1144 if (CpuFeatures::IsSupported(VFP3)) {
1145 __ b(mi, &result_not_a_smi);
1146 } else {
1147 // Non VFP code cannot convert from unsigned to double, so fall back
1148 // to GenericBinaryOpStub.
1149 __ b(mi, entry_label());
1150 }
1151 break;
1152 case Token::SHL:
1153 ASSERT(!reversed_);
1154 if (shift_value != 0) {
1155 __ mov(int32, Operand(int32, LSL, shift_value));
1156 }
1157 break;
1158 default: UNREACHABLE();
1159 }
1160 // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
1161 // if the shift if more than 0 or SHR if the shit is more than 1.
1162 if (!( (op_ == Token::AND && value_ >= 0) ||
1163 ((op_ == Token::SAR) && (shift_value > 0)) ||
1164 ((op_ == Token::SHR) && (shift_value > 1)))) {
1165 __ add(r3, int32, Operand(0x40000000), SetCC);
1166 __ b(mi, &result_not_a_smi);
1167 }
1168 __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
1169 Exit();
1170
1171 if (result_not_a_smi.is_linked()) {
1172 __ bind(&result_not_a_smi);
1173 if (overwrite_mode_ != OVERWRITE_LEFT) {
1174 ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
1175 (overwrite_mode_ == OVERWRITE_RIGHT));
1176 // If the allocation fails, fall back to the GenericBinaryOpStub.
1177 __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
1178 // Nothing can go wrong now, so overwrite tos.
1179 __ mov(tos_register_, Operand(r4));
1180 }
1181
1182 // int32: answer as signed 32bits integer.
1183 // tos_register_: Heap number to write the answer into.
1184 WriteNonSmiAnswer(int32, tos_register_, r3);
1185
1186 Exit();
1187 }
1188 }
1189
1190
1191 void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
1192 // The input from a bitwise operation were Smis but the result cannot fit
1193 // into a Smi, so we store it into a heap number. VirtualFrame::scratch0()
1194 // holds the untagged result to be converted. tos_register_ contains the
1195 // input. See the calls to JumpToAnswerOutOfRange to see how we got here.
1196 ASSERT(Token::IsBitOp(op_));
1197 ASSERT(!reversed_);
1198
1199 Register untagged_result = VirtualFrame::scratch0();
1200
1201 if (FLAG_debug_code) {
1202 __ Abort("Should not fall through!");
1203 }
1204
1205 __ bind(&answer_out_of_range_);
1206 if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
1207 // >>> 0 is a special case where the untagged_result register is not set up
1208 // yet. We untag the input to get it.
1209 __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize));
1210 }
1211
1212 // This routine uses the registers from r2 to r6. At the moment they are
1213 // not used by the register allocator, but when they are it should use
1214 // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
1215
1216 // Allocate the result heap number.
1217 Register heap_number_map = VirtualFrame::scratch1();
1218 Register heap_number = r4;
1219 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1220 // If the allocation fails, fall back to the GenericBinaryOpStub.
1221 __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
1222 WriteNonSmiAnswer(untagged_result, heap_number, r3);
1223 __ mov(tos_register_, Operand(heap_number));
1224
1225 Exit();
1226 }
1227
1228
1229 static bool PopCountLessThanEqual2(unsigned int x) {
1230 x &= x - 1;
1231 return (x & (x - 1)) == 0;
1232 }
1233
1234
1235 // Returns the index of the lowest bit set.
1236 static int BitPosition(unsigned x) {
1237 int bit_posn = 0;
1238 while ((x & 0xf) == 0) {
1239 bit_posn += 4;
1240 x >>= 4;
1241 }
1242 while ((x & 1) == 0) {
1243 bit_posn++;
1244 x >>= 1;
1245 }
1246 return bit_posn;
1247 }
1248
1249
1250 // Can we multiply by x with max two shifts and an add.
1251 // This answers yes to all integers from 2 to 10.
1252 static bool IsEasyToMultiplyBy(int x) {
1253 if (x < 2) return false; // Avoid special cases.
1254 if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
1255 if (IsPowerOf2(x)) return true; // Simple shift.
1256 if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
1257 if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
1258 return false;
1259 }
1260
1261
1262 // Can multiply by anything that IsEasyToMultiplyBy returns true for.
1263 // Source and destination may be the same register. This routine does
1264 // not set carry and overflow the way a mul instruction would.
1265 static void InlineMultiplyByKnownInt(MacroAssembler* masm,
1266 Register source,
1267 Register destination,
1268 int known_int) {
1269 if (IsPowerOf2(known_int)) {
1270 masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
1271 } else if (PopCountLessThanEqual2(known_int)) {
1272 int first_bit = BitPosition(known_int);
1273 int second_bit = BitPosition(known_int ^ (1 << first_bit));
1274 masm->add(destination, source,
1275 Operand(source, LSL, second_bit - first_bit));
1276 if (first_bit != 0) {
1277 masm->mov(destination, Operand(destination, LSL, first_bit));
1278 }
1279 } else {
1280 ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
1281 int the_bit = BitPosition(known_int + 1);
1282 masm->rsb(destination, source, Operand(source, LSL, the_bit));
1283 }
1284 }
1285
1286
1287 void CodeGenerator::SmiOperation(Token::Value op,
1288 Handle<Object> value,
1289 bool reversed,
1290 OverwriteMode mode) {
1291 int int_value = Smi::cast(*value)->value();
1292
1293 bool both_sides_are_smi = frame_->KnownSmiAt(0);
1294
1295 bool something_to_inline;
1296 switch (op) {
1297 case Token::ADD:
1298 case Token::SUB:
1299 case Token::BIT_AND:
1300 case Token::BIT_OR:
1301 case Token::BIT_XOR: {
1302 something_to_inline = true;
1303 break;
1304 }
1305 case Token::SHL: {
1306 something_to_inline = (both_sides_are_smi || !reversed);
1307 break;
1308 }
1309 case Token::SHR:
1310 case Token::SAR: {
1311 if (reversed) {
1312 something_to_inline = false;
1313 } else {
1314 something_to_inline = true;
1315 }
1316 break;
1317 }
1318 case Token::MOD: {
1319 if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
1320 something_to_inline = false;
1321 } else {
1322 something_to_inline = true;
1323 }
1324 break;
1325 }
1326 case Token::MUL: {
1327 if (!IsEasyToMultiplyBy(int_value)) {
1328 something_to_inline = false;
1329 } else {
1330 something_to_inline = true;
1331 }
1332 break;
1333 }
1334 default: {
1335 something_to_inline = false;
1336 break;
1337 }
1338 }
1339
1340 if (!something_to_inline) {
1341 if (!reversed) {
1342 // Push the rhs onto the virtual frame by putting it in a TOS register.
1343 Register rhs = frame_->GetTOSRegister();
1344 __ mov(rhs, Operand(value));
1345 frame_->EmitPush(rhs, TypeInfo::Smi());
1346 GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
1347 } else {
1348 // Pop the rhs, then push lhs and rhs in the right order. Only performs
1349 // at most one pop, the rest takes place in TOS registers.
1350 Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
1351 Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this.
1352 __ mov(lhs, Operand(value));
1353 frame_->EmitPush(lhs, TypeInfo::Smi());
1354 TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
1355 frame_->EmitPush(rhs, t);
1356 GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
1357 GenericBinaryOpStub::kUnknownIntValue);
1358 }
1359 return;
1360 }
1361
1362 // We move the top of stack to a register (normally no move is invoved).
1363 Register tos = frame_->PopToRegister();
1364 switch (op) {
1365 case Token::ADD: {
1366 DeferredCode* deferred =
1367 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1368
1369 __ add(tos, tos, Operand(value), SetCC);
1370 deferred->Branch(vs);
1371 if (!both_sides_are_smi) {
1372 __ tst(tos, Operand(kSmiTagMask));
1373 deferred->Branch(ne);
1374 }
1375 deferred->BindExit();
1376 frame_->EmitPush(tos);
1377 break;
1378 }
1379
1380 case Token::SUB: {
1381 DeferredCode* deferred =
1382 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1383
1384 if (reversed) {
1385 __ rsb(tos, tos, Operand(value), SetCC);
1386 } else {
1387 __ sub(tos, tos, Operand(value), SetCC);
1388 }
1389 deferred->Branch(vs);
1390 if (!both_sides_are_smi) {
1391 __ tst(tos, Operand(kSmiTagMask));
1392 deferred->Branch(ne);
1393 }
1394 deferred->BindExit();
1395 frame_->EmitPush(tos);
1396 break;
1397 }
1398
1399
1400 case Token::BIT_OR:
1401 case Token::BIT_XOR:
1402 case Token::BIT_AND: {
1403 if (both_sides_are_smi) {
1404 switch (op) {
1405 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
1406 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1407 case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1408 default: UNREACHABLE();
1409 }
1410 frame_->EmitPush(tos, TypeInfo::Smi());
1411 } else {
1412 DeferredInlineSmiOperation* deferred =
1413 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1414 __ tst(tos, Operand(kSmiTagMask));
1415 deferred->JumpToNonSmiInput(ne);
1416 switch (op) {
1417 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
1418 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1419 case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1420 default: UNREACHABLE();
1421 }
1422 deferred->BindExit();
1423 TypeInfo result_type = TypeInfo::Integer32();
1424 if (op == Token::BIT_AND && int_value >= 0) {
1425 result_type = TypeInfo::Smi();
1426 }
1427 frame_->EmitPush(tos, result_type);
1428 }
1429 break;
1430 }
1431
1432 case Token::SHL:
1433 if (reversed) {
1434 ASSERT(both_sides_are_smi);
1435 int max_shift = 0;
1436 int max_result = int_value == 0 ? 1 : int_value;
1437 while (Smi::IsValid(max_result << 1)) {
1438 max_shift++;
1439 max_result <<= 1;
1440 }
1441 DeferredCode* deferred =
1442 new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
1443 // Mask off the last 5 bits of the shift operand (rhs). This is part
1444 // of the definition of shift in JS and we know we have a Smi so we
1445 // can safely do this. The masked version gets passed to the
1446 // deferred code, but that makes no difference.
1447 __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
1448 __ cmp(tos, Operand(Smi::FromInt(max_shift)));
1449 deferred->Branch(ge);
1450 Register scratch = VirtualFrame::scratch0();
1451 __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag.
1452 __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant.
1453 __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant.
1454 deferred->BindExit();
1455 TypeInfo result = TypeInfo::Integer32();
1456 frame_->EmitPush(tos, result);
1457 break;
1458 }
1459 // Fall through!
1460 case Token::SHR:
1461 case Token::SAR: {
1462 ASSERT(!reversed);
1463 int shift_value = int_value & 0x1f;
1464 TypeInfo result = TypeInfo::Number();
1465
1466 if (op == Token::SHR) {
1467 if (shift_value > 1) {
1468 result = TypeInfo::Smi();
1469 } else if (shift_value > 0) {
1470 result = TypeInfo::Integer32();
1471 }
1472 } else if (op == Token::SAR) {
1473 if (shift_value > 0) {
1474 result = TypeInfo::Smi();
1475 } else {
1476 result = TypeInfo::Integer32();
1477 }
1478 } else {
1479 ASSERT(op == Token::SHL);
1480 result = TypeInfo::Integer32();
1481 }
1482
1483 DeferredInlineSmiOperation* deferred =
1484 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
1485 if (!both_sides_are_smi) {
1486 __ tst(tos, Operand(kSmiTagMask));
1487 deferred->JumpToNonSmiInput(ne);
1488 }
1489 switch (op) {
1490 case Token::SHL: {
1491 if (shift_value != 0) {
1492 Register untagged_result = VirtualFrame::scratch0();
1493 Register scratch = VirtualFrame::scratch1();
1494 int adjusted_shift = shift_value - kSmiTagSize;
1495 ASSERT(adjusted_shift >= 0);
1496
1497 if (adjusted_shift != 0) {
1498 __ mov(untagged_result, Operand(tos, LSL, adjusted_shift));
1499 } else {
1500 __ mov(untagged_result, Operand(tos));
1501 }
1502 // Check that the *signed* result fits in a smi.
1503 __ add(scratch, untagged_result, Operand(0x40000000), SetCC);
1504 deferred->JumpToAnswerOutOfRange(mi);
1505 __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
1506 }
1507 break;
1508 }
1509 case Token::SHR: {
1510 if (shift_value != 0) {
1511 Register untagged_result = VirtualFrame::scratch0();
1512 // Remove tag.
1513 __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize));
1514 __ mov(untagged_result, Operand(untagged_result, LSR, shift_value));
1515 if (shift_value == 1) {
1516 // Check that the *unsigned* result fits in a smi.
1517 // Neither of the two high-order bits can be set:
1518 // - 0x80000000: high bit would be lost when smi tagging
1519 // - 0x40000000: this number would convert to negative when Smi
1520 // tagging.
1521 // These two cases can only happen with shifts by 0 or 1 when
1522 // handed a valid smi.
1523 __ tst(untagged_result, Operand(0xc0000000));
1524 deferred->JumpToAnswerOutOfRange(ne);
1525 }
1526 __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
1527 } else {
1528 __ cmp(tos, Operand(0, RelocInfo::NONE));
1529 deferred->JumpToAnswerOutOfRange(mi);
1530 }
1531 break;
1532 }
1533 case Token::SAR: {
1534 if (shift_value != 0) {
1535 // Do the shift and the tag removal in one operation. If the shift
1536 // is 31 bits (the highest possible value) then we emit the
1537 // instruction as a shift by 0 which in the ARM ISA means shift
1538 // arithmetically by 32.
1539 __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
1540 __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1541 }
1542 break;
1543 }
1544 default: UNREACHABLE();
1545 }
1546 deferred->BindExit();
1547 frame_->EmitPush(tos, result);
1548 break;
1549 }
1550
1551 case Token::MOD: {
1552 ASSERT(!reversed);
1553 ASSERT(int_value >= 2);
1554 ASSERT(IsPowerOf2(int_value));
1555 DeferredCode* deferred =
1556 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1557 unsigned mask = (0x80000000u | kSmiTagMask);
1558 __ tst(tos, Operand(mask));
1559 deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
1560 mask = (int_value << kSmiTagSize) - 1;
1561 __ and_(tos, tos, Operand(mask));
1562 deferred->BindExit();
1563 // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
1564 frame_->EmitPush(
1565 tos,
1566 both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
1567 break;
1568 }
1569
1570 case Token::MUL: {
1571 ASSERT(IsEasyToMultiplyBy(int_value));
1572 DeferredCode* deferred =
1573 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1574 unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1575 max_smi_that_wont_overflow <<= kSmiTagSize;
1576 unsigned mask = 0x80000000u;
1577 while ((mask & max_smi_that_wont_overflow) == 0) {
1578 mask |= mask >> 1;
1579 }
1580 mask |= kSmiTagMask;
1581 // This does a single mask that checks for a too high value in a
1582 // conservative way and for a non-Smi. It also filters out negative
1583 // numbers, unfortunately, but since this code is inline we prefer
1584 // brevity to comprehensiveness.
1585 __ tst(tos, Operand(mask));
1586 deferred->Branch(ne);
1587 InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
1588 deferred->BindExit();
1589 frame_->EmitPush(tos);
1590 break;
1591 }
1592
1593 default:
1594 UNREACHABLE();
1595 break;
1596 }
1597 }
1598
1599
1600 void CodeGenerator::Comparison(Condition cond,
1601 Expression* left,
1602 Expression* right,
1603 bool strict) {
1604 VirtualFrame::RegisterAllocationScope scope(this);
1605
1606 if (left != NULL) Load(left);
1607 if (right != NULL) Load(right);
1608
1609 // sp[0] : y
1610 // sp[1] : x
1611 // result : cc register
1612
1613 // Strict only makes sense for equality comparisons.
1614 ASSERT(!strict || cond == eq);
1615
1616 Register lhs;
1617 Register rhs;
1618
1619 bool lhs_is_smi;
1620 bool rhs_is_smi;
1621
1622 // We load the top two stack positions into registers chosen by the virtual
1623 // frame. This should keep the register shuffling to a minimum.
1624 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1625 if (cond == gt || cond == le) {
1626 cond = ReverseCondition(cond);
1627 lhs_is_smi = frame_->KnownSmiAt(0);
1628 rhs_is_smi = frame_->KnownSmiAt(1);
1629 lhs = frame_->PopToRegister();
1630 rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
1631 } else {
1632 rhs_is_smi = frame_->KnownSmiAt(0);
1633 lhs_is_smi = frame_->KnownSmiAt(1);
1634 rhs = frame_->PopToRegister();
1635 lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
1636 }
1637
1638 bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
1639
1640 ASSERT(rhs.is(r0) || rhs.is(r1));
1641 ASSERT(lhs.is(r0) || lhs.is(r1));
1642
1643 JumpTarget exit;
1644
1645 if (!both_sides_are_smi) {
1646 // Now we have the two sides in r0 and r1. We flush any other registers
1647 // because the stub doesn't know about register allocation.
1648 frame_->SpillAll();
1649 Register scratch = VirtualFrame::scratch0();
1650 Register smi_test_reg;
1651 if (lhs_is_smi) {
1652 smi_test_reg = rhs;
1653 } else if (rhs_is_smi) {
1654 smi_test_reg = lhs;
1655 } else {
1656 __ orr(scratch, lhs, Operand(rhs));
1657 smi_test_reg = scratch;
1658 }
1659 __ tst(smi_test_reg, Operand(kSmiTagMask));
1660 JumpTarget smi;
1661 smi.Branch(eq);
1662
1663 // Perform non-smi comparison by stub.
1664 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1665 // We call with 0 args because there are 0 on the stack.
1666 CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
1667 frame_->CallStub(&stub, 0);
1668 __ cmp(r0, Operand(0, RelocInfo::NONE));
1669 exit.Jump();
1670
1671 smi.Bind();
1672 }
1673
1674 // Do smi comparisons by pointer comparison.
1675 __ cmp(lhs, Operand(rhs));
1676
1677 exit.Bind();
1678 cc_reg_ = cond;
1679 }
1680
1681
1682 // Call the function on the stack with the given arguments.
1683 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1684 CallFunctionFlags flags,
1685 int position) {
1686 // Push the arguments ("left-to-right") on the stack.
1687 int arg_count = args->length();
1688 for (int i = 0; i < arg_count; i++) {
1689 Load(args->at(i));
1690 }
1691
1692 // Record the position for debugging purposes.
1693 CodeForSourcePosition(position);
1694
1695 // Use the shared code stub to call the function.
1696 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
1697 CallFunctionStub call_function(arg_count, in_loop, flags);
1698 frame_->CallStub(&call_function, arg_count + 1);
1699
1700 // Restore context and pop function from the stack.
1701 __ ldr(cp, frame_->Context());
1702 frame_->Drop(); // discard the TOS
1703 }
1704
1705
1706 void CodeGenerator::CallApplyLazy(Expression* applicand,
1707 Expression* receiver,
1708 VariableProxy* arguments,
1709 int position) {
1710 // An optimized implementation of expressions of the form
1711 // x.apply(y, arguments).
1712 // If the arguments object of the scope has not been allocated,
1713 // and x.apply is Function.prototype.apply, this optimization
1714 // just copies y and the arguments of the current function on the
1715 // stack, as receiver and arguments, and calls x.
1716 // In the implementation comments, we call x the applicand
1717 // and y the receiver.
1718
1719 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
1720 ASSERT(arguments->IsArguments());
1721
1722 // Load applicand.apply onto the stack. This will usually
1723 // give us a megamorphic load site. Not super, but it works.
1724 Load(applicand);
1725 Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
1726 frame_->Dup();
1727 frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
1728 frame_->EmitPush(r0);
1729
1730 // Load the receiver and the existing arguments object onto the
1731 // expression stack. Avoid allocating the arguments object here.
1732 Load(receiver);
1733 LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
1734
1735 // At this point the top two stack elements are probably in registers
1736 // since they were just loaded. Ensure they are in regs and get the
1737 // regs.
1738 Register receiver_reg = frame_->Peek2();
1739 Register arguments_reg = frame_->Peek();
1740
1741 // From now on the frame is spilled.
1742 frame_->SpillAll();
1743
1744 // Emit the source position information after having loaded the
1745 // receiver and the arguments.
1746 CodeForSourcePosition(position);
1747 // Contents of the stack at this point:
1748 // sp[0]: arguments object of the current function or the hole.
1749 // sp[1]: receiver
1750 // sp[2]: applicand.apply
1751 // sp[3]: applicand.
1752
1753 // Check if the arguments object has been lazily allocated
1754 // already. If so, just use that instead of copying the arguments
1755 // from the stack. This also deals with cases where a local variable
1756 // named 'arguments' has been introduced.
1757 JumpTarget slow;
1758 Label done;
1759 __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
1760 __ cmp(ip, arguments_reg);
1761 slow.Branch(ne);
1762
1763 Label build_args;
1764 // Get rid of the arguments object probe.
1765 frame_->Drop();
1766 // Stack now has 3 elements on it.
1767 // Contents of stack at this point:
1768 // sp[0]: receiver - in the receiver_reg register.
1769 // sp[1]: applicand.apply
1770 // sp[2]: applicand.
1771
1772 // Check that the receiver really is a JavaScript object.
1773 __ JumpIfSmi(receiver_reg, &build_args);
1774 // We allow all JSObjects including JSFunctions. As long as
1775 // JS_FUNCTION_TYPE is the last instance type and it is right
1776 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
1777 // bound.
1778 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1779 STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1780 __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
1781 __ b(lt, &build_args);
1782
1783 // Check that applicand.apply is Function.prototype.apply.
1784 __ ldr(r0, MemOperand(sp, kPointerSize));
1785 __ JumpIfSmi(r0, &build_args);
1786 __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
1787 __ b(ne, &build_args);
1788 Handle<Code> apply_code(
1789 Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply));
1790 __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
1791 __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
1792 __ cmp(r1, Operand(apply_code));
1793 __ b(ne, &build_args);
1794
1795 // Check that applicand is a function.
1796 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1797 __ JumpIfSmi(r1, &build_args);
1798 __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
1799 __ b(ne, &build_args);
1800
1801 // Copy the arguments to this function possibly from the
1802 // adaptor frame below it.
1803 Label invoke, adapted;
1804 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1805 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1806 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1807 __ b(eq, &adapted);
1808
1809 // No arguments adaptor frame. Copy fixed number of arguments.
1810 __ mov(r0, Operand(scope()->num_parameters()));
1811 for (int i = 0; i < scope()->num_parameters(); i++) {
1812 __ ldr(r2, frame_->ParameterAt(i));
1813 __ push(r2);
1814 }
1815 __ jmp(&invoke);
1816
1817 // Arguments adaptor frame present. Copy arguments from there, but
1818 // avoid copying too many arguments to avoid stack overflows.
1819 __ bind(&adapted);
1820 static const uint32_t kArgumentsLimit = 1 * KB;
1821 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1822 __ mov(r0, Operand(r0, LSR, kSmiTagSize));
1823 __ mov(r3, r0);
1824 __ cmp(r0, Operand(kArgumentsLimit));
1825 __ b(gt, &build_args);
1826
1827 // Loop through the arguments pushing them onto the execution
1828 // stack. We don't inform the virtual frame of the push, so we don't
1829 // have to worry about getting rid of the elements from the virtual
1830 // frame.
1831 Label loop;
1832 // r3 is a small non-negative integer, due to the test above.
1833 __ cmp(r3, Operand(0, RelocInfo::NONE));
1834 __ b(eq, &invoke);
1835 // Compute the address of the first argument.
1836 __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
1837 __ add(r2, r2, Operand(kPointerSize));
1838 __ bind(&loop);
1839 // Post-decrement argument address by kPointerSize on each iteration.
1840 __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
1841 __ push(r4);
1842 __ sub(r3, r3, Operand(1), SetCC);
1843 __ b(gt, &loop);
1844
1845 // Invoke the function.
1846 __ bind(&invoke);
1847 ParameterCount actual(r0);
1848 __ InvokeFunction(r1, actual, CALL_FUNCTION);
1849 // Drop applicand.apply and applicand from the stack, and push
1850 // the result of the function call, but leave the spilled frame
1851 // unchanged, with 3 elements, so it is correct when we compile the
1852 // slow-case code.
1853 __ add(sp, sp, Operand(2 * kPointerSize));
1854 __ push(r0);
1855 // Stack now has 1 element:
1856 // sp[0]: result
1857 __ jmp(&done);
1858
1859 // Slow-case: Allocate the arguments object since we know it isn't
1860 // there, and fall-through to the slow-case where we call
1861 // applicand.apply.
1862 __ bind(&build_args);
1863 // Stack now has 3 elements, because we have jumped from where:
1864 // sp[0]: receiver
1865 // sp[1]: applicand.apply
1866 // sp[2]: applicand.
1867 StoreArgumentsObject(false);
1868
1869 // Stack and frame now have 4 elements.
1870 slow.Bind();
1871
1872 // Generic computation of x.apply(y, args) with no special optimization.
1873 // Flip applicand.apply and applicand on the stack, so
1874 // applicand looks like the receiver of the applicand.apply call.
1875 // Then process it as a normal function call.
1876 __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1877 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1878 __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
1879
1880 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1881 frame_->CallStub(&call_function, 3);
1882 // The function and its two arguments have been dropped.
1883 frame_->Drop(); // Drop the receiver as well.
1884 frame_->EmitPush(r0);
1885 frame_->SpillAll(); // A spilled frame is also jumping to label done.
1886 // Stack now has 1 element:
1887 // sp[0]: result
1888 __ bind(&done);
1889
1890 // Restore the context register after a call.
1891 __ ldr(cp, frame_->Context());
1892 }
1893
1894
1895 void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1896 ASSERT(has_cc());
1897 Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1898 target->Branch(cond);
1899 cc_reg_ = al;
1900 }
1901
1902
1903 void CodeGenerator::CheckStack() {
1904 frame_->SpillAll();
1905 Comment cmnt(masm_, "[ check stack");
1906 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1907 masm_->cmp(sp, Operand(ip));
1908 StackCheckStub stub;
1909 // Call the stub if lower.
1910 masm_->mov(ip,
1911 Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1912 RelocInfo::CODE_TARGET),
1913 LeaveCC,
1914 lo);
1915 masm_->Call(ip, lo);
1916 }
1917
1918
1919 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1920 #ifdef DEBUG
1921 int original_height = frame_->height();
1922 #endif
1923 for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1924 Visit(statements->at(i));
1925 }
1926 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1927 }
1928
1929
1930 void CodeGenerator::VisitBlock(Block* node) {
1931 #ifdef DEBUG
1932 int original_height = frame_->height();
1933 #endif
1934 Comment cmnt(masm_, "[ Block");
1935 CodeForStatementPosition(node);
1936 node->break_target()->SetExpectedHeight();
1937 VisitStatements(node->statements());
1938 if (node->break_target()->is_linked()) {
1939 node->break_target()->Bind();
1940 }
1941 node->break_target()->Unuse();
1942 ASSERT(!has_valid_frame() || frame_->height() == original_height);
1943 }
1944
1945
1946 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1947 frame_->EmitPush(cp);
1948 frame_->EmitPush(Operand(pairs));
1949 frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1950 frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
1951
1952 frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
1953 // The result is discarded.
1954 }
1955
1956
1957 void CodeGenerator::VisitDeclaration(Declaration* node) {
1958 #ifdef DEBUG
1959 int original_height = frame_->height();
1960 #endif
1961 Comment cmnt(masm_, "[ Declaration");
1962 Variable* var = node->proxy()->var();
1963 ASSERT(var != NULL); // must have been resolved
1964 Slot* slot = var->AsSlot();
1965
1966 // If it was not possible to allocate the variable at compile time,
1967 // we need to "declare" it at runtime to make sure it actually
1968 // exists in the local context.
1969 if (slot != NULL && slot->type() == Slot::LOOKUP) {
1970 // Variables with a "LOOKUP" slot were introduced as non-locals
1971 // during variable resolution and must have mode DYNAMIC.
1972 ASSERT(var->is_dynamic());
1973 // For now, just do a runtime call.
1974 frame_->EmitPush(cp);
1975 frame_->EmitPush(Operand(var->name()));
1976 // Declaration nodes are always declared in only two modes.
1977 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1978 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1979 frame_->EmitPush(Operand(Smi::FromInt(attr)));
1980 // Push initial value, if any.
1981 // Note: For variables we must not push an initial value (such as
1982 // 'undefined') because we may have a (legal) redeclaration and we
1983 // must not destroy the current value.
1984 if (node->mode() == Variable::CONST) {
1985 frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
1986 } else if (node->fun() != NULL) {
1987 Load(node->fun());
1988 } else {
1989 frame_->EmitPush(Operand(0, RelocInfo::NONE));
1990 }
1991
1992 frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1993 // Ignore the return value (declarations are statements).
1994
1995 ASSERT(frame_->height() == original_height);
1996 return;
1997 }
1998
1999 ASSERT(!var->is_global());
2000
2001 // If we have a function or a constant, we need to initialize the variable.
2002 Expression* val = NULL;
2003 if (node->mode() == Variable::CONST) {
2004 val = new Literal(FACTORY->the_hole_value());
2005 } else {
2006 val = node->fun(); // NULL if we don't have a function
2007 }
2008
2009
2010 if (val != NULL) {
2011 WriteBarrierCharacter wb_info =
2012 val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
2013 if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
2014 // Set initial value.
2015 Reference target(this, node->proxy());
2016 Load(val);
2017 target.SetValue(NOT_CONST_INIT, wb_info);
2018
2019 // Get rid of the assigned value (declarations are statements).
2020 frame_->Drop();
2021 }
2022 ASSERT(frame_->height() == original_height);
2023 }
2024
2025
2026 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
2027 #ifdef DEBUG
2028 int original_height = frame_->height();
2029 #endif
2030 Comment cmnt(masm_, "[ ExpressionStatement");
2031 CodeForStatementPosition(node);
2032 Expression* expression = node->expression();
2033 expression->MarkAsStatement();
2034 Load(expression);
2035 frame_->Drop();
2036 ASSERT(frame_->height() == original_height);
2037 }
2038
2039
2040 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
2041 #ifdef DEBUG
2042 int original_height = frame_->height();
2043 #endif
2044 Comment cmnt(masm_, "// EmptyStatement");
2045 CodeForStatementPosition(node);
2046 // nothing to do
2047 ASSERT(frame_->height() == original_height);
2048 }
2049
2050
2051 void CodeGenerator::VisitIfStatement(IfStatement* node) {
2052 #ifdef DEBUG
2053 int original_height = frame_->height();
2054 #endif
2055 Comment cmnt(masm_, "[ IfStatement");
2056 // Generate different code depending on which parts of the if statement
2057 // are present or not.
2058 bool has_then_stm = node->HasThenStatement();
2059 bool has_else_stm = node->HasElseStatement();
2060
2061 CodeForStatementPosition(node);
2062
2063 JumpTarget exit;
2064 if (has_then_stm && has_else_stm) {
2065 Comment cmnt(masm_, "[ IfThenElse");
2066 JumpTarget then;
2067 JumpTarget else_;
2068 // if (cond)
2069 LoadCondition(node->condition(), &then, &else_, true);
2070 if (frame_ != NULL) {
2071 Branch(false, &else_);
2072 }
2073 // then
2074 if (frame_ != NULL || then.is_linked()) {
2075 then.Bind();
2076 Visit(node->then_statement());
2077 }
2078 if (frame_ != NULL) {
2079 exit.Jump();
2080 }
2081 // else
2082 if (else_.is_linked()) {
2083 else_.Bind();
2084 Visit(node->else_statement());
2085 }
2086
2087 } else if (has_then_stm) {
2088 Comment cmnt(masm_, "[ IfThen");
2089 ASSERT(!has_else_stm);
2090 JumpTarget then;
2091 // if (cond)
2092 LoadCondition(node->condition(), &then, &exit, true);
2093 if (frame_ != NULL) {
2094 Branch(false, &exit);
2095 }
2096 // then
2097 if (frame_ != NULL || then.is_linked()) {
2098 then.Bind();
2099 Visit(node->then_statement());
2100 }
2101
2102 } else if (has_else_stm) {
2103 Comment cmnt(masm_, "[ IfElse");
2104 ASSERT(!has_then_stm);
2105 JumpTarget else_;
2106 // if (!cond)
2107 LoadCondition(node->condition(), &exit, &else_, true);
2108 if (frame_ != NULL) {
2109 Branch(true, &exit);
2110 }
2111 // else
2112 if (frame_ != NULL || else_.is_linked()) {
2113 else_.Bind();
2114 Visit(node->else_statement());
2115 }
2116
2117 } else {
2118 Comment cmnt(masm_, "[ If");
2119 ASSERT(!has_then_stm && !has_else_stm);
2120 // if (cond)
2121 LoadCondition(node->condition(), &exit, &exit, false);
2122 if (frame_ != NULL) {
2123 if (has_cc()) {
2124 cc_reg_ = al;
2125 } else {
2126 frame_->Drop();
2127 }
2128 }
2129 }
2130
2131 // end
2132 if (exit.is_linked()) {
2133 exit.Bind();
2134 }
2135 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2136 }
2137
2138
2139 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
2140 Comment cmnt(masm_, "[ ContinueStatement");
2141 CodeForStatementPosition(node);
2142 node->target()->continue_target()->Jump();
2143 }
2144
2145
2146 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
2147 Comment cmnt(masm_, "[ BreakStatement");
2148 CodeForStatementPosition(node);
2149 node->target()->break_target()->Jump();
2150 }
2151
2152
2153 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
2154 Comment cmnt(masm_, "[ ReturnStatement");
2155
2156 CodeForStatementPosition(node);
2157 Load(node->expression());
2158 frame_->PopToR0();
2159 frame_->PrepareForReturn();
2160 if (function_return_is_shadowed_) {
2161 function_return_.Jump();
2162 } else {
2163 // Pop the result from the frame and prepare the frame for
2164 // returning thus making it easier to merge.
2165 if (function_return_.is_bound()) {
2166 // If the function return label is already bound we reuse the
2167 // code by jumping to the return site.
2168 function_return_.Jump();
2169 } else {
2170 function_return_.Bind();
2171 GenerateReturnSequence();
2172 }
2173 }
2174 }
2175
2176
2177 void CodeGenerator::GenerateReturnSequence() {
2178 if (FLAG_trace) {
2179 // Push the return value on the stack as the parameter.
2180 // Runtime::TraceExit returns the parameter as it is.
2181 frame_->EmitPush(r0);
2182 frame_->CallRuntime(Runtime::kTraceExit, 1);
2183 }
2184
2185 #ifdef DEBUG
2186 // Add a label for checking the size of the code used for returning.
2187 Label check_exit_codesize;
2188 masm_->bind(&check_exit_codesize);
2189 #endif
2190 // Make sure that the constant pool is not emitted inside of the return
2191 // sequence.
2192 { Assembler::BlockConstPoolScope block_const_pool(masm_);
2193 // Tear down the frame which will restore the caller's frame pointer and
2194 // the link register.
2195 frame_->Exit();
2196
2197 // Here we use masm_-> instead of the __ macro to avoid the code coverage
2198 // tool from instrumenting as we rely on the code size here.
2199 int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
2200 masm_->add(sp, sp, Operand(sp_delta));
2201 masm_->Jump(lr);
2202 DeleteFrame();
2203
2204 #ifdef DEBUG
2205 // Check that the size of the code used for returning is large enough
2206 // for the debugger's requirements.
2207 ASSERT(Assembler::kJSReturnSequenceInstructions <=
2208 masm_->InstructionsGeneratedSince(&check_exit_codesize));
2209 #endif
2210 }
2211 }
2212
2213
2214 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
2215 #ifdef DEBUG
2216 int original_height = frame_->height();
2217 #endif
2218 Comment cmnt(masm_, "[ WithEnterStatement");
2219 CodeForStatementPosition(node);
2220 Load(node->expression());
2221 if (node->is_catch_block()) {
2222 frame_->CallRuntime(Runtime::kPushCatchContext, 1);
2223 } else {
2224 frame_->CallRuntime(Runtime::kPushContext, 1);
2225 }
2226 #ifdef DEBUG
2227 JumpTarget verified_true;
2228 __ cmp(r0, cp);
2229 verified_true.Branch(eq);
2230 __ stop("PushContext: r0 is expected to be the same as cp");
2231 verified_true.Bind();
2232 #endif
2233 // Update context local.
2234 __ str(cp, frame_->Context());
2235 ASSERT(frame_->height() == original_height);
2236 }
2237
2238
2239 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
2240 #ifdef DEBUG
2241 int original_height = frame_->height();
2242 #endif
2243 Comment cmnt(masm_, "[ WithExitStatement");
2244 CodeForStatementPosition(node);
2245 // Pop context.
2246 __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
2247 // Update context local.
2248 __ str(cp, frame_->Context());
2249 ASSERT(frame_->height() == original_height);
2250 }
2251
2252
2253 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
2254 #ifdef DEBUG
2255 int original_height = frame_->height();
2256 #endif
2257 Comment cmnt(masm_, "[ SwitchStatement");
2258 CodeForStatementPosition(node);
2259 node->break_target()->SetExpectedHeight();
2260
2261 Load(node->tag());
2262
2263 JumpTarget next_test;
2264 JumpTarget fall_through;
2265 JumpTarget default_entry;
2266 JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
2267 ZoneList<CaseClause*>* cases = node->cases();
2268 int length = cases->length();
2269 CaseClause* default_clause = NULL;
2270
2271 for (int i = 0; i < length; i++) {
2272 CaseClause* clause = cases->at(i);
2273 if (clause->is_default()) {
2274 // Remember the default clause and compile it at the end.
2275 default_clause = clause;
2276 continue;
2277 }
2278
2279 Comment cmnt(masm_, "[ Case clause");
2280 // Compile the test.
2281 next_test.Bind();
2282 next_test.Unuse();
2283 // Duplicate TOS.
2284 frame_->Dup();
2285 Comparison(eq, NULL, clause->label(), true);
2286 Branch(false, &next_test);
2287
2288 // Before entering the body from the test, remove the switch value from
2289 // the stack.
2290 frame_->Drop();
2291
2292 // Label the body so that fall through is enabled.
2293 if (i > 0 && cases->at(i - 1)->is_default()) {
2294 default_exit.Bind();
2295 } else {
2296 fall_through.Bind();
2297 fall_through.Unuse();
2298 }
2299 VisitStatements(clause->statements());
2300
2301 // If control flow can fall through from the body, jump to the next body
2302 // or the end of the statement.
2303 if (frame_ != NULL) {
2304 if (i < length - 1 && cases->at(i + 1)->is_default()) {
2305 default_entry.Jump();
2306 } else {
2307 fall_through.Jump();
2308 }
2309 }
2310 }
2311
2312 // The final "test" removes the switch value.
2313 next_test.Bind();
2314 frame_->Drop();
2315
2316 // If there is a default clause, compile it.
2317 if (default_clause != NULL) {
2318 Comment cmnt(masm_, "[ Default clause");
2319 default_entry.Bind();
2320 VisitStatements(default_clause->statements());
2321 // If control flow can fall out of the default and there is a case after
2322 // it, jump to that case's body.
2323 if (frame_ != NULL && default_exit.is_bound()) {
2324 default_exit.Jump();
2325 }
2326 }
2327
2328 if (fall_through.is_linked()) {
2329 fall_through.Bind();
2330 }
2331
2332 if (node->break_target()->is_linked()) {
2333 node->break_target()->Bind();
2334 }
2335 node->break_target()->Unuse();
2336 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2337 }
2338
2339
2340 void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
2341 #ifdef DEBUG
2342 int original_height = frame_->height();
2343 #endif
2344 Comment cmnt(masm_, "[ DoWhileStatement");
2345 CodeForStatementPosition(node);
2346 node->break_target()->SetExpectedHeight();
2347 JumpTarget body(JumpTarget::BIDIRECTIONAL);
2348 IncrementLoopNesting();
2349
2350 // Label the top of the loop for the backward CFG edge. If the test
2351 // is always true we can use the continue target, and if the test is
2352 // always false there is no need.
2353 ConditionAnalysis info = AnalyzeCondition(node->cond());
2354 switch (info) {
2355 case ALWAYS_TRUE:
2356 node->continue_target()->SetExpectedHeight();
2357 node->continue_target()->Bind();
2358 break;
2359 case ALWAYS_FALSE:
2360 node->continue_target()->SetExpectedHeight();
2361 break;
2362 case DONT_KNOW:
2363 node->continue_target()->SetExpectedHeight();
2364 body.Bind();
2365 break;
2366 }
2367
2368 CheckStack(); // TODO(1222600): ignore if body contains calls.
2369 Visit(node->body());
2370
2371 // Compile the test.
2372 switch (info) {
2373 case ALWAYS_TRUE:
2374 // If control can fall off the end of the body, jump back to the
2375 // top.
2376 if (has_valid_frame()) {
2377 node->continue_target()->Jump();
2378 }
2379 break;
2380 case ALWAYS_FALSE:
2381 // If we have a continue in the body, we only have to bind its
2382 // jump target.
2383 if (node->continue_target()->is_linked()) {
2384 node->continue_target()->Bind();
2385 }
2386 break;
2387 case DONT_KNOW:
2388 // We have to compile the test expression if it can be reached by
2389 // control flow falling out of the body or via continue.
2390 if (node->continue_target()->is_linked()) {
2391 node->continue_target()->Bind();
2392 }
2393 if (has_valid_frame()) {
2394 Comment cmnt(masm_, "[ DoWhileCondition");
2395 CodeForDoWhileConditionPosition(node);
2396 LoadCondition(node->cond(), &body, node->break_target(), true);
2397 if (has_valid_frame()) {
2398 // A invalid frame here indicates that control did not
2399 // fall out of the test expression.
2400 Branch(true, &body);
2401 }
2402 }
2403 break;
2404 }
2405
2406 if (node->break_target()->is_linked()) {
2407 node->break_target()->Bind();
2408 }
2409 DecrementLoopNesting();
2410 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2411 }
2412
2413
2414 void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
2415 #ifdef DEBUG
2416 int original_height = frame_->height();
2417 #endif
2418 Comment cmnt(masm_, "[ WhileStatement");
2419 CodeForStatementPosition(node);
2420
2421 // If the test is never true and has no side effects there is no need
2422 // to compile the test or body.
2423 ConditionAnalysis info = AnalyzeCondition(node->cond());
2424 if (info == ALWAYS_FALSE) return;
2425
2426 node->break_target()->SetExpectedHeight();
2427 IncrementLoopNesting();
2428
2429 // Label the top of the loop with the continue target for the backward
2430 // CFG edge.
2431 node->continue_target()->SetExpectedHeight();
2432 node->continue_target()->Bind();
2433
2434 if (info == DONT_KNOW) {
2435 JumpTarget body(JumpTarget::BIDIRECTIONAL);
2436 LoadCondition(node->cond(), &body, node->break_target(), true);
2437 if (has_valid_frame()) {
2438 // A NULL frame indicates that control did not fall out of the
2439 // test expression.
2440 Branch(false, node->break_target());
2441 }
2442 if (has_valid_frame() || body.is_linked()) {
2443 body.Bind();
2444 }
2445 }
2446
2447 if (has_valid_frame()) {
2448 CheckStack(); // TODO(1222600): ignore if body contains calls.
2449 Visit(node->body());
2450
2451 // If control flow can fall out of the body, jump back to the top.
2452 if (has_valid_frame()) {
2453 node->continue_target()->Jump();
2454 }
2455 }
2456 if (node->break_target()->is_linked()) {
2457 node->break_target()->Bind();
2458 }
2459 DecrementLoopNesting();
2460 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2461 }
2462
2463
2464 void CodeGenerator::VisitForStatement(ForStatement* node) {
2465 #ifdef DEBUG
2466 int original_height = frame_->height();
2467 #endif
2468 Comment cmnt(masm_, "[ ForStatement");
2469 CodeForStatementPosition(node);
2470 if (node->init() != NULL) {
2471 Visit(node->init());
2472 }
2473
2474 // If the test is never true there is no need to compile the test or
2475 // body.
2476 ConditionAnalysis info = AnalyzeCondition(node->cond());
2477 if (info == ALWAYS_FALSE) return;
2478
2479 node->break_target()->SetExpectedHeight();
2480 IncrementLoopNesting();
2481
2482 // We know that the loop index is a smi if it is not modified in the
2483 // loop body and it is checked against a constant limit in the loop
2484 // condition. In this case, we reset the static type information of the
2485 // loop index to smi before compiling the body, the update expression, and
2486 // the bottom check of the loop condition.
2487 TypeInfoCodeGenState type_info_scope(this,
2488 node->is_fast_smi_loop() ?
2489 node->loop_variable()->AsSlot() :
2490 NULL,
2491 TypeInfo::Smi());
2492
2493 // If there is no update statement, label the top of the loop with the
2494 // continue target, otherwise with the loop target.
2495 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2496 if (node->next() == NULL) {
2497 node->continue_target()->SetExpectedHeight();
2498 node->continue_target()->Bind();
2499 } else {
2500 node->continue_target()->SetExpectedHeight();
2501 loop.Bind();
2502 }
2503
2504 // If the test is always true, there is no need to compile it.
2505 if (info == DONT_KNOW) {
2506 JumpTarget body;
2507 LoadCondition(node->cond(), &body, node->break_target(), true);
2508 if (has_valid_frame()) {
2509 Branch(false, node->break_target());
2510 }
2511 if (has_valid_frame() || body.is_linked()) {
2512 body.Bind();
2513 }
2514 }
2515
2516 if (has_valid_frame()) {
2517 CheckStack(); // TODO(1222600): ignore if body contains calls.
2518 Visit(node->body());
2519
2520 if (node->next() == NULL) {
2521 // If there is no update statement and control flow can fall out
2522 // of the loop, jump directly to the continue label.
2523 if (has_valid_frame()) {
2524 node->continue_target()->Jump();
2525 }
2526 } else {
2527 // If there is an update statement and control flow can reach it
2528 // via falling out of the body of the loop or continuing, we
2529 // compile the update statement.
2530 if (node->continue_target()->is_linked()) {
2531 node->continue_target()->Bind();
2532 }
2533 if (has_valid_frame()) {
2534 // Record source position of the statement as this code which is
2535 // after the code for the body actually belongs to the loop
2536 // statement and not the body.
2537 CodeForStatementPosition(node);
2538 Visit(node->next());
2539 loop.Jump();
2540 }
2541 }
2542 }
2543 if (node->break_target()->is_linked()) {
2544 node->break_target()->Bind();
2545 }
2546 DecrementLoopNesting();
2547 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2548 }
2549
2550
2551 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
2552 #ifdef DEBUG
2553 int original_height = frame_->height();
2554 #endif
2555 Comment cmnt(masm_, "[ ForInStatement");
2556 CodeForStatementPosition(node);
2557
2558 JumpTarget primitive;
2559 JumpTarget jsobject;
2560 JumpTarget fixed_array;
2561 JumpTarget entry(JumpTarget::BIDIRECTIONAL);
2562 JumpTarget end_del_check;
2563 JumpTarget exit;
2564
2565 // Get the object to enumerate over (converted to JSObject).
2566 Load(node->enumerable());
2567
2568 VirtualFrame::SpilledScope spilled_scope(frame_);
2569 // Both SpiderMonkey and kjs ignore null and undefined in contrast
2570 // to the specification. 12.6.4 mandates a call to ToObject.
2571 frame_->EmitPop(r0);
2572 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2573 __ cmp(r0, ip);
2574 exit.Branch(eq);
2575 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2576 __ cmp(r0, ip);
2577 exit.Branch(eq);
2578
2579 // Stack layout in body:
2580 // [iteration counter (Smi)]
2581 // [length of array]
2582 // [FixedArray]
2583 // [Map or 0]
2584 // [Object]
2585
2586 // Check if enumerable is already a JSObject
2587 __ tst(r0, Operand(kSmiTagMask));
2588 primitive.Branch(eq);
2589 __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
2590 jsobject.Branch(hs);
2591
2592 primitive.Bind();
2593 frame_->EmitPush(r0);
2594 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
2595
2596 jsobject.Bind();
2597 // Get the set of properties (as a FixedArray or Map).
2598 // r0: value to be iterated over
2599 frame_->EmitPush(r0); // Push the object being iterated over.
2600
2601 // Check cache validity in generated code. This is a fast case for
2602 // the JSObject::IsSimpleEnum cache validity checks. If we cannot
2603 // guarantee cache validity, call the runtime system to check cache
2604 // validity or get the property names in a fixed array.
2605 JumpTarget call_runtime;
2606 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2607 JumpTarget check_prototype;
2608 JumpTarget use_cache;
2609 __ mov(r1, Operand(r0));
2610 loop.Bind();
2611 // Check that there are no elements.
2612 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
2613 __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
2614 __ cmp(r2, r4);
2615 call_runtime.Branch(ne);
2616 // Check that instance descriptors are not empty so that we can
2617 // check for an enum cache. Leave the map in r3 for the subsequent
2618 // prototype load.
2619 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2620 __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
2621 __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
2622 __ cmp(r2, ip);
2623 call_runtime.Branch(eq);
2624 // Check that there in an enum cache in the non-empty instance
2625 // descriptors. This is the case if the next enumeration index
2626 // field does not contain a smi.
2627 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
2628 __ tst(r2, Operand(kSmiTagMask));
2629 call_runtime.Branch(eq);
2630 // For all objects but the receiver, check that the cache is empty.
2631 // r4: empty fixed array root.
2632 __ cmp(r1, r0);
2633 check_prototype.Branch(eq);
2634 __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
2635 __ cmp(r2, r4);
2636 call_runtime.Branch(ne);
2637 check_prototype.Bind();
2638 // Load the prototype from the map and loop if non-null.
2639 __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
2640 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2641 __ cmp(r1, ip);
2642 loop.Branch(ne);
2643 // The enum cache is valid. Load the map of the object being
2644 // iterated over and use the cache for the iteration.
2645 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
2646 use_cache.Jump();
2647
2648 call_runtime.Bind();
2649 // Call the runtime to get the property names for the object.
2650 frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
2651 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
2652
2653 // If we got a map from the runtime call, we can do a fast
2654 // modification check. Otherwise, we got a fixed array, and we have
2655 // to do a slow check.
2656 // r0: map or fixed array (result from call to
2657 // Runtime::kGetPropertyNamesFast)
2658 __ mov(r2, Operand(r0));
2659 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
2660 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
2661 __ cmp(r1, ip);
2662 fixed_array.Branch(ne);
2663
2664 use_cache.Bind();
2665 // Get enum cache
2666 // r0: map (either the result from a call to
2667 // Runtime::kGetPropertyNamesFast or has been fetched directly from
2668 // the object)
2669 __ mov(r1, Operand(r0));
2670 __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
2671 __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
2672 __ ldr(r2,
2673 FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
2674
2675 frame_->EmitPush(r0); // map
2676 frame_->EmitPush(r2); // enum cache bridge cache
2677 __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
2678 frame_->EmitPush(r0);
2679 __ mov(r0, Operand(Smi::FromInt(0)));
2680 frame_->EmitPush(r0);
2681 entry.Jump();
2682
2683 fixed_array.Bind();
2684 __ mov(r1, Operand(Smi::FromInt(0)));
2685 frame_->EmitPush(r1); // insert 0 in place of Map
2686 frame_->EmitPush(r0);
2687
2688 // Push the length of the array and the initial index onto the stack.
2689 __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
2690 frame_->EmitPush(r0);
2691 __ mov(r0, Operand(Smi::FromInt(0))); // init index
2692 frame_->EmitPush(r0);
2693
2694 // Condition.
2695 entry.Bind();
2696 // sp[0] : index
2697 // sp[1] : array/enum cache length
2698 // sp[2] : array or enum cache
2699 // sp[3] : 0 or map
2700 // sp[4] : enumerable
2701 // Grab the current frame's height for the break and continue
2702 // targets only after all the state is pushed on the frame.
2703 node->break_target()->SetExpectedHeight();
2704 node->continue_target()->SetExpectedHeight();
2705
2706 // Load the current count to r0, load the length to r1.
2707 __ Ldrd(r0, r1, frame_->ElementAt(0));
2708 __ cmp(r0, r1); // compare to the array length
2709 node->break_target()->Branch(hs);
2710
2711 // Get the i'th entry of the array.
2712 __ ldr(r2, frame_->ElementAt(2));
2713 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2714 __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2715
2716 // Get Map or 0.
2717 __ ldr(r2, frame_->ElementAt(3));
2718 // Check if this (still) matches the map of the enumerable.
2719 // If not, we have to filter the key.
2720 __ ldr(r1, frame_->ElementAt(4));
2721 __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
2722 __ cmp(r1, Operand(r2));
2723 end_del_check.Branch(eq);
2724
2725 // Convert the entry to a string (or null if it isn't a property anymore).
2726 __ ldr(r0, frame_->ElementAt(4)); // push enumerable
2727 frame_->EmitPush(r0);
2728 frame_->EmitPush(r3); // push entry
2729 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
2730 __ mov(r3, Operand(r0), SetCC);
2731 // If the property has been removed while iterating, we just skip it.
2732 node->continue_target()->Branch(eq);
2733
2734 end_del_check.Bind();
2735 // Store the entry in the 'each' expression and take another spin in the
2736 // loop. r3: i'th entry of the enum cache (or string there of)
2737 frame_->EmitPush(r3); // push entry
2738 { VirtualFrame::RegisterAllocationScope scope(this);
2739 Reference each(this, node->each());
2740 if (!each.is_illegal()) {
2741 if (each.size() > 0) {
2742 // Loading a reference may leave the frame in an unspilled state.
2743 frame_->SpillAll(); // Sync stack to memory.
2744 // Get the value (under the reference on the stack) from memory.
2745 __ ldr(r0, frame_->ElementAt(each.size()));
2746 frame_->EmitPush(r0);
2747 each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2748 frame_->Drop(2); // The result of the set and the extra pushed value.
2749 } else {
2750 // If the reference was to a slot we rely on the convenient property
2751 // that it doesn't matter whether a value (eg, ebx pushed above) is
2752 // right on top of or right underneath a zero-sized reference.
2753 each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2754 frame_->Drop(1); // Drop the result of the set operation.
2755 }
2756 }
2757 }
2758 // Body.
2759 CheckStack(); // TODO(1222600): ignore if body contains calls.
2760 { VirtualFrame::RegisterAllocationScope scope(this);
2761 Visit(node->body());
2762 }
2763
2764 // Next. Reestablish a spilled frame in case we are coming here via
2765 // a continue in the body.
2766 node->continue_target()->Bind();
2767 frame_->SpillAll();
2768 frame_->EmitPop(r0);
2769 __ add(r0, r0, Operand(Smi::FromInt(1)));
2770 frame_->EmitPush(r0);
2771 entry.Jump();
2772
2773 // Cleanup. No need to spill because VirtualFrame::Drop is safe for
2774 // any frame.
2775 node->break_target()->Bind();
2776 frame_->Drop(5);
2777
2778 // Exit.
2779 exit.Bind();
2780 node->continue_target()->Unuse();
2781 node->break_target()->Unuse();
2782 ASSERT(frame_->height() == original_height);
2783 }
2784
2785
2786 void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
2787 #ifdef DEBUG
2788 int original_height = frame_->height();
2789 #endif
2790 VirtualFrame::SpilledScope spilled_scope(frame_);
2791 Comment cmnt(masm_, "[ TryCatchStatement");
2792 CodeForStatementPosition(node);
2793
2794 JumpTarget try_block;
2795 JumpTarget exit;
2796
2797 try_block.Call();
2798 // --- Catch block ---
2799 frame_->EmitPush(r0);
2800
2801 // Store the caught exception in the catch variable.
2802 Variable* catch_var = node->catch_var()->var();
2803 ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
2804 StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
2805
2806 // Remove the exception from the stack.
2807 frame_->Drop();
2808
2809 { VirtualFrame::RegisterAllocationScope scope(this);
2810 VisitStatements(node->catch_block()->statements());
2811 }
2812 if (frame_ != NULL) {
2813 exit.Jump();
2814 }
2815
2816
2817 // --- Try block ---
2818 try_block.Bind();
2819
2820 frame_->PushTryHandler(TRY_CATCH_HANDLER);
2821 int handler_height = frame_->height();
2822
2823 // Shadow the labels for all escapes from the try block, including
2824 // returns. During shadowing, the original label is hidden as the
2825 // LabelShadow and operations on the original actually affect the
2826 // shadowing label.
2827 //
2828 // We should probably try to unify the escaping labels and the return
2829 // label.
2830 int nof_escapes = node->escaping_targets()->length();
2831 List<ShadowTarget*> shadows(1 + nof_escapes);
2832
2833 // Add the shadow target for the function return.
2834 static const int kReturnShadowIndex = 0;
2835 shadows.Add(new ShadowTarget(&function_return_));
2836 bool function_return_was_shadowed = function_return_is_shadowed_;
2837 function_return_is_shadowed_ = true;
2838 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2839
2840 // Add the remaining shadow targets.
2841 for (int i = 0; i < nof_escapes; i++) {
2842 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2843 }
2844
2845 // Generate code for the statements in the try block.
2846 { VirtualFrame::RegisterAllocationScope scope(this);
2847 VisitStatements(node->try_block()->statements());
2848 }
2849
2850 // Stop the introduced shadowing and count the number of required unlinks.
2851 // After shadowing stops, the original labels are unshadowed and the
2852 // LabelShadows represent the formerly shadowing labels.
2853 bool has_unlinks = false;
2854 for (int i = 0; i < shadows.length(); i++) {
2855 shadows[i]->StopShadowing();
2856 has_unlinks = has_unlinks || shadows[i]->is_linked();
2857 }
2858 function_return_is_shadowed_ = function_return_was_shadowed;
2859
2860 // Get an external reference to the handler address.
2861 ExternalReference handler_address(Isolate::k_handler_address, isolate());
2862
2863 // If we can fall off the end of the try block, unlink from try chain.
2864 if (has_valid_frame()) {
2865 // The next handler address is on top of the frame. Unlink from
2866 // the handler list and drop the rest of this handler from the
2867 // frame.
2868 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2869 frame_->EmitPop(r1); // r0 can contain the return value.
2870 __ mov(r3, Operand(handler_address));
2871 __ str(r1, MemOperand(r3));
2872 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2873 if (has_unlinks) {
2874 exit.Jump();
2875 }
2876 }
2877
2878 // Generate unlink code for the (formerly) shadowing labels that have been
2879 // jumped to. Deallocate each shadow target.
2880 for (int i = 0; i < shadows.length(); i++) {
2881 if (shadows[i]->is_linked()) {
2882 // Unlink from try chain;
2883 shadows[i]->Bind();
2884 // Because we can be jumping here (to spilled code) from unspilled
2885 // code, we need to reestablish a spilled frame at this block.
2886 frame_->SpillAll();
2887
2888 // Reload sp from the top handler, because some statements that we
2889 // break from (eg, for...in) may have left stuff on the stack.
2890 __ mov(r3, Operand(handler_address));
2891 __ ldr(sp, MemOperand(r3));
2892 frame_->Forget(frame_->height() - handler_height);
2893
2894 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2895 frame_->EmitPop(r1); // r0 can contain the return value.
2896 __ str(r1, MemOperand(r3));
2897 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2898
2899 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2900 frame_->PrepareForReturn();
2901 }
2902 shadows[i]->other_target()->Jump();
2903 }
2904 }
2905
2906 exit.Bind();
2907 ASSERT(!has_valid_frame() || frame_->height() == original_height);
2908 }
2909
2910
2911 void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2912 #ifdef DEBUG
2913 int original_height = frame_->height();
2914 #endif
2915 VirtualFrame::SpilledScope spilled_scope(frame_);
2916 Comment cmnt(masm_, "[ TryFinallyStatement");
2917 CodeForStatementPosition(node);
2918
2919 // State: Used to keep track of reason for entering the finally
2920 // block. Should probably be extended to hold information for
2921 // break/continue from within the try block.
2922 enum { FALLING, THROWING, JUMPING };
2923
2924 JumpTarget try_block;
2925 JumpTarget finally_block;
2926
2927 try_block.Call();
2928
2929 frame_->EmitPush(r0); // save exception object on the stack
2930 // In case of thrown exceptions, this is where we continue.
2931 __ mov(r2, Operand(Smi::FromInt(THROWING)));
2932 finally_block.Jump();
2933
2934 // --- Try block ---
2935 try_block.Bind();
2936
2937 frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2938 int handler_height = frame_->height();
2939
2940 // Shadow the labels for all escapes from the try block, including
2941 // returns. Shadowing hides the original label as the LabelShadow and
2942 // operations on the original actually affect the shadowing label.
2943 //
2944 // We should probably try to unify the escaping labels and the return
2945 // label.
2946 int nof_escapes = node->escaping_targets()->length();
2947 List<ShadowTarget*> shadows(1 + nof_escapes);
2948
2949 // Add the shadow target for the function return.
2950 static const int kReturnShadowIndex = 0;
2951 shadows.Add(new ShadowTarget(&function_return_));
2952 bool function_return_was_shadowed = function_return_is_shadowed_;
2953 function_return_is_shadowed_ = true;
2954 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2955
2956 // Add the remaining shadow targets.
2957 for (int i = 0; i < nof_escapes; i++) {
2958 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2959 }
2960
2961 // Generate code for the statements in the try block.
2962 { VirtualFrame::RegisterAllocationScope scope(this);
2963 VisitStatements(node->try_block()->statements());
2964 }
2965
2966 // Stop the introduced shadowing and count the number of required unlinks.
2967 // After shadowing stops, the original labels are unshadowed and the
2968 // LabelShadows represent the formerly shadowing labels.
2969 int nof_unlinks = 0;
2970 for (int i = 0; i < shadows.length(); i++) {
2971 shadows[i]->StopShadowing();
2972 if (shadows[i]->is_linked()) nof_unlinks++;
2973 }
2974 function_return_is_shadowed_ = function_return_was_shadowed;
2975
2976 // Get an external reference to the handler address.
2977 ExternalReference handler_address(Isolate::k_handler_address, isolate());
2978
2979 // If we can fall off the end of the try block, unlink from the try
2980 // chain and set the state on the frame to FALLING.
2981 if (has_valid_frame()) {
2982 // The next handler address is on top of the frame.
2983 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2984 frame_->EmitPop(r1);
2985 __ mov(r3, Operand(handler_address));
2986 __ str(r1, MemOperand(r3));
2987 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2988
2989 // Fake a top of stack value (unneeded when FALLING) and set the
2990 // state in r2, then jump around the unlink blocks if any.
2991 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2992 frame_->EmitPush(r0);
2993 __ mov(r2, Operand(Smi::FromInt(FALLING)));
2994 if (nof_unlinks > 0) {
2995 finally_block.Jump();
2996 }
2997 }
2998
2999 // Generate code to unlink and set the state for the (formerly)
3000 // shadowing targets that have been jumped to.
3001 for (int i = 0; i < shadows.length(); i++) {
3002 if (shadows[i]->is_linked()) {
3003 // If we have come from the shadowed return, the return value is
3004 // in (a non-refcounted reference to) r0. We must preserve it
3005 // until it is pushed.
3006 //
3007 // Because we can be jumping here (to spilled code) from
3008 // unspilled code, we need to reestablish a spilled frame at
3009 // this block.
3010 shadows[i]->Bind();
3011 frame_->SpillAll();
3012
3013 // Reload sp from the top handler, because some statements that
3014 // we break from (eg, for...in) may have left stuff on the
3015 // stack.
3016 __ mov(r3, Operand(handler_address));
3017 __ ldr(sp, MemOperand(r3));
3018 frame_->Forget(frame_->height() - handler_height);
3019
3020 // Unlink this handler and drop it from the frame. The next
3021 // handler address is currently on top of the frame.
3022 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3023 frame_->EmitPop(r1);
3024 __ str(r1, MemOperand(r3));
3025 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3026
3027 if (i == kReturnShadowIndex) {
3028 // If this label shadowed the function return, materialize the
3029 // return value on the stack.
3030 frame_->EmitPush(r0);
3031 } else {
3032 // Fake TOS for targets that shadowed breaks and continues.
3033 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3034 frame_->EmitPush(r0);
3035 }
3036 __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
3037 if (--nof_unlinks > 0) {
3038 // If this is not the last unlink block, jump around the next.
3039 finally_block.Jump();
3040 }
3041 }
3042 }
3043
3044 // --- Finally block ---
3045 finally_block.Bind();
3046
3047 // Push the state on the stack.
3048 frame_->EmitPush(r2);
3049
3050 // We keep two elements on the stack - the (possibly faked) result
3051 // and the state - while evaluating the finally block.
3052 //
3053 // Generate code for the statements in the finally block.
3054 { VirtualFrame::RegisterAllocationScope scope(this);
3055 VisitStatements(node->finally_block()->statements());
3056 }
3057
3058 if (has_valid_frame()) {
3059 // Restore state and return value or faked TOS.
3060 frame_->EmitPop(r2);
3061 frame_->EmitPop(r0);
3062 }
3063
3064 // Generate code to jump to the right destination for all used
3065 // formerly shadowing targets. Deallocate each shadow target.
3066 for (int i = 0; i < shadows.length(); i++) {
3067 if (has_valid_frame() && shadows[i]->is_bound()) {
3068 JumpTarget* original = shadows[i]->other_target();
3069 __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
3070 if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
3071 JumpTarget skip;
3072 skip.Branch(ne);
3073 frame_->PrepareForReturn();
3074 original->Jump();
3075 skip.Bind();
3076 } else {
3077 original->Branch(eq);
3078 }
3079 }
3080 }
3081
3082 if (has_valid_frame()) {
3083 // Check if we need to rethrow the exception.
3084 JumpTarget exit;
3085 __ cmp(r2, Operand(Smi::FromInt(THROWING)));
3086 exit.Branch(ne);
3087
3088 // Rethrow exception.
3089 frame_->EmitPush(r0);
3090 frame_->CallRuntime(Runtime::kReThrow, 1);
3091
3092 // Done.
3093 exit.Bind();
3094 }
3095 ASSERT(!has_valid_frame() || frame_->height() == original_height);
3096 }
3097
3098
3099 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
3100 #ifdef DEBUG
3101 int original_height = frame_->height();
3102 #endif
3103 Comment cmnt(masm_, "[ DebuggerStatament");
3104 CodeForStatementPosition(node);
3105 #ifdef ENABLE_DEBUGGER_SUPPORT
3106 frame_->DebugBreak();
3107 #endif
3108 // Ignore the return value.
3109 ASSERT(frame_->height() == original_height);
3110 }
3111
3112
3113 void CodeGenerator::InstantiateFunction(
3114 Handle<SharedFunctionInfo> function_info,
3115 bool pretenure) {
3116 // Use the fast case closure allocation code that allocates in new
3117 // space for nested functions that don't need literals cloning.
3118 if (!pretenure &&
3119 scope()->is_function_scope() &&
3120 function_info->num_literals() == 0) {
3121 FastNewClosureStub stub(
3122 function_info->strict_mode() ? kStrictMode : kNonStrictMode);
3123 frame_->EmitPush(Operand(function_info));
3124 frame_->SpillAll();
3125 frame_->CallStub(&stub, 1);
3126 frame_->EmitPush(r0);
3127 } else {
3128 // Create a new closure.
3129 frame_->EmitPush(cp);
3130 frame_->EmitPush(Operand(function_info));
3131 frame_->EmitPush(Operand(pretenure
3132 ? FACTORY->true_value()
3133 : FACTORY->false_value()));
3134 frame_->CallRuntime(Runtime::kNewClosure, 3);
3135 frame_->EmitPush(r0);
3136 }
3137 }
3138
3139
3140 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
3141 #ifdef DEBUG
3142 int original_height = frame_->height();
3143 #endif
3144 Comment cmnt(masm_, "[ FunctionLiteral");
3145
3146 // Build the function info and instantiate it.
3147 Handle<SharedFunctionInfo> function_info =
3148 Compiler::BuildFunctionInfo(node, script());
3149 if (function_info.is_null()) {
3150 SetStackOverflow();
3151 ASSERT(frame_->height() == original_height);
3152 return;
3153 }
3154 InstantiateFunction(function_info, node->pretenure());
3155 ASSERT_EQ(original_height + 1, frame_->height());
3156 }
3157
3158
3159 void CodeGenerator::VisitSharedFunctionInfoLiteral(
3160 SharedFunctionInfoLiteral* node) {
3161 #ifdef DEBUG
3162 int original_height = frame_->height();
3163 #endif
3164 Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
3165 InstantiateFunction(node->shared_function_info(), false);
3166 ASSERT_EQ(original_height + 1, frame_->height());
3167 }
3168
3169
3170 void CodeGenerator::VisitConditional(Conditional* node) {
3171 #ifdef DEBUG
3172 int original_height = frame_->height();
3173 #endif
3174 Comment cmnt(masm_, "[ Conditional");
3175 JumpTarget then;
3176 JumpTarget else_;
3177 LoadCondition(node->condition(), &then, &else_, true);
3178 if (has_valid_frame()) {
3179 Branch(false, &else_);
3180 }
3181 if (has_valid_frame() || then.is_linked()) {
3182 then.Bind();
3183 Load(node->then_expression());
3184 }
3185 if (else_.is_linked()) {
3186 JumpTarget exit;
3187 if (has_valid_frame()) exit.Jump();
3188 else_.Bind();
3189 Load(node->else_expression());
3190 if (exit.is_linked()) exit.Bind();
3191 }
3192 ASSERT_EQ(original_height + 1, frame_->height());
3193 }
3194
3195
3196 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
3197 if (slot->type() == Slot::LOOKUP) {
3198 ASSERT(slot->var()->is_dynamic());
3199
3200 // JumpTargets do not yet support merging frames so the frame must be
3201 // spilled when jumping to these targets.
3202 JumpTarget slow;
3203 JumpTarget done;
3204
3205 // Generate fast case for loading from slots that correspond to
3206 // local/global variables or arguments unless they are shadowed by
3207 // eval-introduced bindings.
3208 EmitDynamicLoadFromSlotFastCase(slot,
3209 typeof_state,
3210 &slow,
3211 &done);
3212
3213 slow.Bind();
3214 frame_->EmitPush(cp);
3215 frame_->EmitPush(Operand(slot->var()->name()));
3216
3217 if (typeof_state == INSIDE_TYPEOF) {
3218 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
3219 } else {
3220 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3221 }
3222
3223 done.Bind();
3224 frame_->EmitPush(r0);
3225
3226 } else {
3227 Register scratch = VirtualFrame::scratch0();
3228 TypeInfo info = type_info(slot);
3229 frame_->EmitPush(SlotOperand(slot, scratch), info);
3230
3231 if (slot->var()->mode() == Variable::CONST) {
3232 // Const slots may contain 'the hole' value (the constant hasn't been
3233 // initialized yet) which needs to be converted into the 'undefined'
3234 // value.
3235 Comment cmnt(masm_, "[ Unhole const");
3236 Register tos = frame_->PopToRegister();
3237 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3238 __ cmp(tos, ip);
3239 __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
3240 frame_->EmitPush(tos);
3241 }
3242 }
3243 }
3244
3245
3246 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
3247 TypeofState state) {
3248 VirtualFrame::RegisterAllocationScope scope(this);
3249 LoadFromSlot(slot, state);
3250
3251 // Bail out quickly if we're not using lazy arguments allocation.
3252 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
3253
3254 // ... or if the slot isn't a non-parameter arguments slot.
3255 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
3256
3257 // Load the loaded value from the stack into a register but leave it on the
3258 // stack.
3259 Register tos = frame_->Peek();
3260
3261 // If the loaded value is the sentinel that indicates that we
3262 // haven't loaded the arguments object yet, we need to do it now.
3263 JumpTarget exit;
3264 __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
3265 __ cmp(tos, ip);
3266 exit.Branch(ne);
3267 frame_->Drop();
3268 StoreArgumentsObject(false);
3269 exit.Bind();
3270 }
3271
3272
3273 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
3274 ASSERT(slot != NULL);
3275 VirtualFrame::RegisterAllocationScope scope(this);
3276 if (slot->type() == Slot::LOOKUP) {
3277 ASSERT(slot->var()->is_dynamic());
3278
3279 // For now, just do a runtime call.
3280 frame_->EmitPush(cp);
3281 frame_->EmitPush(Operand(slot->var()->name()));
3282
3283 if (init_state == CONST_INIT) {
3284 // Same as the case for a normal store, but ignores attribute
3285 // (e.g. READ_ONLY) of context slot so that we can initialize
3286 // const properties (introduced via eval("const foo = (some
3287 // expr);")). Also, uses the current function context instead of
3288 // the top context.
3289 //
3290 // Note that we must declare the foo upon entry of eval(), via a
3291 // context slot declaration, but we cannot initialize it at the
3292 // same time, because the const declaration may be at the end of
3293 // the eval code (sigh...) and the const variable may have been
3294 // used before (where its value is 'undefined'). Thus, we can only
3295 // do the initialization when we actually encounter the expression
3296 // and when the expression operands are defined and valid, and
3297 // thus we need the split into 2 operations: declaration of the
3298 // context slot followed by initialization.
3299 frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
3300 } else {
3301 frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
3302 frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
3303 }
3304 // Storing a variable must keep the (new) value on the expression
3305 // stack. This is necessary for compiling assignment expressions.
3306 frame_->EmitPush(r0);
3307
3308 } else {
3309 ASSERT(!slot->var()->is_dynamic());
3310 Register scratch = VirtualFrame::scratch0();
3311 Register scratch2 = VirtualFrame::scratch1();
3312
3313 // The frame must be spilled when branching to this target.
3314 JumpTarget exit;
3315
3316 if (init_state == CONST_INIT) {
3317 ASSERT(slot->var()->mode() == Variable::CONST);
3318 // Only the first const initialization must be executed (the slot
3319 // still contains 'the hole' value). When the assignment is
3320 // executed, the code is identical to a normal store (see below).
3321 Comment cmnt(masm_, "[ Init const");
3322 __ ldr(scratch, SlotOperand(slot, scratch));
3323 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3324 __ cmp(scratch, ip);
3325 exit.Branch(ne);
3326 }
3327
3328 // We must execute the store. Storing a variable must keep the
3329 // (new) value on the stack. This is necessary for compiling
3330 // assignment expressions.
3331 //
3332 // Note: We will reach here even with slot->var()->mode() ==
3333 // Variable::CONST because of const declarations which will
3334 // initialize consts to 'the hole' value and by doing so, end up
3335 // calling this code. r2 may be loaded with context; used below in
3336 // RecordWrite.
3337 Register tos = frame_->Peek();
3338 __ str(tos, SlotOperand(slot, scratch));
3339 if (slot->type() == Slot::CONTEXT) {
3340 // Skip write barrier if the written value is a smi.
3341 __ tst(tos, Operand(kSmiTagMask));
3342 // We don't use tos any more after here.
3343 exit.Branch(eq);
3344 // scratch is loaded with context when calling SlotOperand above.
3345 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
3346 // We need an extra register. Until we have a way to do that in the
3347 // virtual frame we will cheat and ask for a free TOS register.
3348 Register scratch3 = frame_->GetTOSRegister();
3349 __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
3350 }
3351 // If we definitely did not jump over the assignment, we do not need
3352 // to bind the exit label. Doing so can defeat peephole
3353 // optimization.
3354 if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
3355 exit.Bind();
3356 }
3357 }
3358 }
3359
3360
3361 void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
3362 TypeofState typeof_state,
3363 JumpTarget* slow) {
3364 // Check that no extension objects have been created by calls to
3365 // eval from the current scope to the global scope.
3366 Register tmp = frame_->scratch0();
3367 Register tmp2 = frame_->scratch1();
3368 Register context = cp;
3369 Scope* s = scope();
3370 while (s != NULL) {
3371 if (s->num_heap_slots() > 0) {
3372 if (s->calls_eval()) {
3373 frame_->SpillAll();
3374 // Check that extension is NULL.
3375 __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
3376 __ tst(tmp2, tmp2);
3377 slow->Branch(ne);
3378 }
3379 // Load next context in chain.
3380 __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
3381 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3382 context = tmp;
3383 }
3384 // If no outer scope calls eval, we do not need to check more
3385 // context extensions.
3386 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
3387 s = s->outer_scope();
3388 }
3389
3390 if (s->is_eval_scope()) {
3391 frame_->SpillAll();
3392 Label next, fast;
3393 __ Move(tmp, context);
3394 __ bind(&next);
3395 // Terminate at global context.
3396 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
3397 __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
3398 __ cmp(tmp2, ip);
3399 __ b(eq, &fast);
3400 // Check that extension is NULL.
3401 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
3402 __ tst(tmp2, tmp2);
3403 slow->Branch(ne);
3404 // Load next context in chain.
3405 __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
3406 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3407 __ b(&next);
3408 __ bind(&fast);
3409 }
3410
3411 // Load the global object.
3412 LoadGlobal();
3413 // Setup the name register and call load IC.
3414 frame_->CallLoadIC(slot->var()->name(),
3415 typeof_state == INSIDE_TYPEOF
3416 ? RelocInfo::CODE_TARGET
3417 : RelocInfo::CODE_TARGET_CONTEXT);
3418 }
3419
3420
3421 void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
3422 TypeofState typeof_state,
3423 JumpTarget* slow,
3424 JumpTarget* done) {
3425 // Generate fast-case code for variables that might be shadowed by
3426 // eval-introduced variables. Eval is used a lot without
3427 // introducing variables. In those cases, we do not want to
3428 // perform a runtime call for all variables in the scope
3429 // containing the eval.
3430 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
3431 LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
3432 frame_->SpillAll();
3433 done->Jump();
3434
3435 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
3436 frame_->SpillAll();
3437 Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
3438 Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
3439 if (potential_slot != NULL) {
3440 // Generate fast case for locals that rewrite to slots.
3441 __ ldr(r0,
3442 ContextSlotOperandCheckExtensions(potential_slot,
3443 r1,
3444 r2,
3445 slow));
3446 if (potential_slot->var()->mode() == Variable::CONST) {
3447 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3448 __ cmp(r0, ip);
3449 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
3450 }
3451 done->Jump();
3452 } else if (rewrite != NULL) {
3453 // Generate fast case for argument loads.
3454 Property* property = rewrite->AsProperty();
3455 if (property != NULL) {
3456 VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
3457 Literal* key_literal = property->key()->AsLiteral();
3458 if (obj_proxy != NULL &&
3459 key_literal != NULL &&
3460 obj_proxy->IsArguments() &&
3461 key_literal->handle()->IsSmi()) {
3462 // Load arguments object if there are no eval-introduced
3463 // variables. Then load the argument from the arguments
3464 // object using keyed load.
3465 __ ldr(r0,
3466 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
3467 r1,
3468 r2,
3469 slow));
3470 frame_->EmitPush(r0);
3471 __ mov(r1, Operand(key_literal->handle()));
3472 frame_->EmitPush(r1);
3473 EmitKeyedLoad();
3474 done->Jump();
3475 }
3476 }
3477 }
3478 }
3479 }
3480
3481
3482 void CodeGenerator::VisitSlot(Slot* node) {
3483 #ifdef DEBUG
3484 int original_height = frame_->height();
3485 #endif
3486 Comment cmnt(masm_, "[ Slot");
3487 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
3488 ASSERT_EQ(original_height + 1, frame_->height());
3489 }
3490
3491
3492 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3493 #ifdef DEBUG
3494 int original_height = frame_->height();
3495 #endif
3496 Comment cmnt(masm_, "[ VariableProxy");
3497
3498 Variable* var = node->var();
3499 Expression* expr = var->rewrite();
3500 if (expr != NULL) {
3501 Visit(expr);
3502 } else {
3503 ASSERT(var->is_global());
3504 Reference ref(this, node);
3505 ref.GetValue();
3506 }
3507 ASSERT_EQ(original_height + 1, frame_->height());
3508 }
3509
3510
3511 void CodeGenerator::VisitLiteral(Literal* node) {
3512 #ifdef DEBUG
3513 int original_height = frame_->height();
3514 #endif
3515 Comment cmnt(masm_, "[ Literal");
3516 Register reg = frame_->GetTOSRegister();
3517 bool is_smi = node->handle()->IsSmi();
3518 __ mov(reg, Operand(node->handle()));
3519 frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
3520 ASSERT_EQ(original_height + 1, frame_->height());
3521 }
3522
3523
3524 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3525 #ifdef DEBUG
3526 int original_height = frame_->height();
3527 #endif
3528 Comment cmnt(masm_, "[ RexExp Literal");
3529
3530 Register tmp = VirtualFrame::scratch0();
3531 // Free up a TOS register that can be used to push the literal.
3532 Register literal = frame_->GetTOSRegister();
3533
3534 // Retrieve the literal array and check the allocated entry.
3535
3536 // Load the function of this activation.
3537 __ ldr(tmp, frame_->Function());
3538
3539 // Load the literals array of the function.
3540 __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
3541
3542 // Load the literal at the ast saved index.
3543 int literal_offset =
3544 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3545 __ ldr(literal, FieldMemOperand(tmp, literal_offset));
3546
3547 JumpTarget materialized;
3548 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3549 __ cmp(literal, ip);
3550 // This branch locks the virtual frame at the done label to match the
3551 // one we have here, where the literal register is not on the stack and
3552 // nothing is spilled.
3553 materialized.Branch(ne);
3554
3555 // If the entry is undefined we call the runtime system to compute
3556 // the literal.
3557 // literal array (0)
3558 frame_->EmitPush(tmp);
3559 // literal index (1)
3560 frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3561 // RegExp pattern (2)
3562 frame_->EmitPush(Operand(node->pattern()));
3563 // RegExp flags (3)
3564 frame_->EmitPush(Operand(node->flags()));
3565 frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3566 __ Move(literal, r0);
3567
3568 materialized.Bind();
3569
3570 frame_->EmitPush(literal);
3571 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
3572 frame_->EmitPush(Operand(Smi::FromInt(size)));
3573 frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1);
3574 // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime.
3575 // r0 is newly allocated space.
3576
3577 // Reuse literal variable with (possibly) a new register, still holding
3578 // the materialized boilerplate.
3579 literal = frame_->PopToRegister(r0);
3580
3581 __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize);
3582
3583 // Push the clone.
3584 frame_->EmitPush(r0);
3585 ASSERT_EQ(original_height + 1, frame_->height());
3586 }
3587
3588
3589 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3590 #ifdef DEBUG
3591 int original_height = frame_->height();
3592 #endif
3593 Comment cmnt(masm_, "[ ObjectLiteral");
3594
3595 Register literal = frame_->GetTOSRegister();
3596 // Load the function of this activation.
3597 __ ldr(literal, frame_->Function());
3598 // Literal array.
3599 __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
3600 frame_->EmitPush(literal);
3601 // Literal index.
3602 frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3603 // Constant properties.
3604 frame_->EmitPush(Operand(node->constant_properties()));
3605 // Should the object literal have fast elements?
3606 frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
3607 if (node->depth() > 1) {
3608 frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
3609 } else {
3610 frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
3611 }
3612 frame_->EmitPush(r0); // save the result
3613
3614 // Mark all computed expressions that are bound to a key that
3615 // is shadowed by a later occurrence of the same key. For the
3616 // marked expressions, no store code is emitted.
3617 node->CalculateEmitStore();
3618
3619 for (int i = 0; i < node->properties()->length(); i++) {
3620 // At the start of each iteration, the top of stack contains
3621 // the newly created object literal.
3622 ObjectLiteral::Property* property = node->properties()->at(i);
3623 Literal* key = property->key();
3624 Expression* value = property->value();
3625 switch (property->kind()) {
3626 case ObjectLiteral::Property::CONSTANT:
3627 break;
3628 case ObjectLiteral::Property::MATERIALIZED_LITERAL:
3629 if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
3630 // else fall through
3631 case ObjectLiteral::Property::COMPUTED:
3632 if (key->handle()->IsSymbol()) {
3633 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
3634 Builtins::kStoreIC_Initialize));
3635 Load(value);
3636 if (property->emit_store()) {
3637 frame_->PopToR0();
3638 // Fetch the object literal.
3639 frame_->SpillAllButCopyTOSToR1();
3640 __ mov(r2, Operand(key->handle()));
3641 frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
3642 } else {
3643 frame_->Drop();
3644 }
3645 break;
3646 }
3647 // else fall through
3648 case ObjectLiteral::Property::PROTOTYPE: {
3649 frame_->Dup();
3650 Load(key);
3651 Load(value);
3652 if (property->emit_store()) {
3653 frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes
3654 frame_->CallRuntime(Runtime::kSetProperty, 4);
3655 } else {
3656 frame_->Drop(3);
3657 }
3658 break;
3659 }
3660 case ObjectLiteral::Property::SETTER: {
3661 frame_->Dup();
3662 Load(key);
3663 frame_->EmitPush(Operand(Smi::FromInt(1)));
3664 Load(value);
3665 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3666 break;
3667 }
3668 case ObjectLiteral::Property::GETTER: {
3669 frame_->Dup();
3670 Load(key);
3671 frame_->EmitPush(Operand(Smi::FromInt(0)));
3672 Load(value);
3673 frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3674 break;
3675 }
3676 }
3677 }
3678 ASSERT_EQ(original_height + 1, frame_->height());
3679 }
3680
3681
3682 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
3683 #ifdef DEBUG
3684 int original_height = frame_->height();
3685 #endif
3686 Comment cmnt(masm_, "[ ArrayLiteral");
3687
3688 Register tos = frame_->GetTOSRegister();
3689 // Load the function of this activation.
3690 __ ldr(tos, frame_->Function());
3691 // Load the literals array of the function.
3692 __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
3693 frame_->EmitPush(tos);
3694 frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3695 frame_->EmitPush(Operand(node->constant_elements()));
3696 int length = node->values()->length();
3697 if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
3698 FastCloneShallowArrayStub stub(
3699 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
3700 frame_->CallStub(&stub, 3);
3701 __ IncrementCounter(masm_->isolate()->counters()->cow_arrays_created_stub(),
3702 1, r1, r2);
3703 } else if (node->depth() > 1) {
3704 frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
3705 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
3706 frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
3707 } else {
3708 FastCloneShallowArrayStub stub(
3709 FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
3710 frame_->CallStub(&stub, 3);
3711 }
3712 frame_->EmitPush(r0); // save the result
3713 // r0: created object literal
3714
3715 // Generate code to set the elements in the array that are not
3716 // literals.
3717 for (int i = 0; i < node->values()->length(); i++) {
3718 Expression* value = node->values()->at(i);
3719
3720 // If value is a literal the property value is already set in the
3721 // boilerplate object.
3722 if (value->AsLiteral() != NULL) continue;
3723 // If value is a materialized literal the property value is already set
3724 // in the boilerplate object if it is simple.
3725 if (CompileTimeValue::IsCompileTimeValue(value)) continue;
3726
3727 // The property must be set by generated code.
3728 Load(value);
3729 frame_->PopToR0();
3730 // Fetch the object literal.
3731 frame_->SpillAllButCopyTOSToR1();
3732
3733 // Get the elements array.
3734 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
3735
3736 // Write to the indexed properties array.
3737 int offset = i * kPointerSize + FixedArray::kHeaderSize;
3738 __ str(r0, FieldMemOperand(r1, offset));
3739
3740 // Update the write barrier for the array address.
3741 __ RecordWrite(r1, Operand(offset), r3, r2);
3742 }
3743 ASSERT_EQ(original_height + 1, frame_->height());
3744 }
3745
3746
3747 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
3748 #ifdef DEBUG
3749 int original_height = frame_->height();
3750 #endif
3751 // Call runtime routine to allocate the catch extension object and
3752 // assign the exception value to the catch variable.
3753 Comment cmnt(masm_, "[ CatchExtensionObject");
3754 Load(node->key());
3755 Load(node->value());
3756 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
3757 frame_->EmitPush(r0);
3758 ASSERT_EQ(original_height + 1, frame_->height());
3759 }
3760
3761
3762 void CodeGenerator::EmitSlotAssignment(Assignment* node) {
3763 #ifdef DEBUG
3764 int original_height = frame_->height();
3765 #endif
3766 Comment cmnt(masm(), "[ Variable Assignment");
3767 Variable* var = node->target()->AsVariableProxy()->AsVariable();
3768 ASSERT(var != NULL);
3769 Slot* slot = var->AsSlot();
3770 ASSERT(slot != NULL);
3771
3772 // Evaluate the right-hand side.
3773 if (node->is_compound()) {
3774 // For a compound assignment the right-hand side is a binary operation
3775 // between the current property value and the actual right-hand side.
3776 LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
3777
3778 // Perform the binary operation.
3779 Literal* literal = node->value()->AsLiteral();
3780 bool overwrite_value = node->value()->ResultOverwriteAllowed();
3781 if (literal != NULL && literal->handle()->IsSmi()) {
3782 SmiOperation(node->binary_op(),
3783 literal->handle(),
3784 false,
3785 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3786 } else {
3787 GenerateInlineSmi inline_smi =
3788 loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3789 if (literal != NULL) {
3790 ASSERT(!literal->handle()->IsSmi());
3791 inline_smi = DONT_GENERATE_INLINE_SMI;
3792 }
3793 Load(node->value());
3794 GenericBinaryOperation(node->binary_op(),
3795 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3796 inline_smi);
3797 }
3798 } else {
3799 Load(node->value());
3800 }
3801
3802 // Perform the assignment.
3803 if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
3804 CodeForSourcePosition(node->position());
3805 StoreToSlot(slot,
3806 node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
3807 }
3808 ASSERT_EQ(original_height + 1, frame_->height());
3809 }
3810
3811
3812 void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
3813 #ifdef DEBUG
3814 int original_height = frame_->height();
3815 #endif
3816 Comment cmnt(masm(), "[ Named Property Assignment");
3817 Variable* var = node->target()->AsVariableProxy()->AsVariable();
3818 Property* prop = node->target()->AsProperty();
3819 ASSERT(var == NULL || (prop == NULL && var->is_global()));
3820
3821 // Initialize name and evaluate the receiver sub-expression if necessary. If
3822 // the receiver is trivial it is not placed on the stack at this point, but
3823 // loaded whenever actually needed.
3824 Handle<String> name;
3825 bool is_trivial_receiver = false;
3826 if (var != NULL) {
3827 name = var->name();
3828 } else {
3829 Literal* lit = prop->key()->AsLiteral();
3830 ASSERT_NOT_NULL(lit);
3831 name = Handle<String>::cast(lit->handle());
3832 // Do not materialize the receiver on the frame if it is trivial.
3833 is_trivial_receiver = prop->obj()->IsTrivial();
3834 if (!is_trivial_receiver) Load(prop->obj());
3835 }
3836
3837 // Change to slow case in the beginning of an initialization block to
3838 // avoid the quadratic behavior of repeatedly adding fast properties.
3839 if (node->starts_initialization_block()) {
3840 // Initialization block consists of assignments of the form expr.x = ..., so
3841 // this will never be an assignment to a variable, so there must be a
3842 // receiver object.
3843 ASSERT_EQ(NULL, var);
3844 if (is_trivial_receiver) {
3845 Load(prop->obj());
3846 } else {
3847 frame_->Dup();
3848 }
3849 frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3850 }
3851
3852 // Change to fast case at the end of an initialization block. To prepare for
3853 // that add an extra copy of the receiver to the frame, so that it can be
3854 // converted back to fast case after the assignment.
3855 if (node->ends_initialization_block() && !is_trivial_receiver) {
3856 frame_->Dup();
3857 }
3858
3859 // Stack layout:
3860 // [tos] : receiver (only materialized if non-trivial)
3861 // [tos+1] : receiver if at the end of an initialization block
3862
3863 // Evaluate the right-hand side.
3864 if (node->is_compound()) {
3865 // For a compound assignment the right-hand side is a binary operation
3866 // between the current property value and the actual right-hand side.
3867 if (is_trivial_receiver) {
3868 Load(prop->obj());
3869 } else if (var != NULL) {
3870 LoadGlobal();
3871 } else {
3872 frame_->Dup();
3873 }
3874 EmitNamedLoad(name, var != NULL);
3875
3876 // Perform the binary operation.
3877 Literal* literal = node->value()->AsLiteral();
3878 bool overwrite_value = node->value()->ResultOverwriteAllowed();
3879 if (literal != NULL && literal->handle()->IsSmi()) {
3880 SmiOperation(node->binary_op(),
3881 literal->handle(),
3882 false,
3883 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3884 } else {
3885 GenerateInlineSmi inline_smi =
3886 loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3887 if (literal != NULL) {
3888 ASSERT(!literal->handle()->IsSmi());
3889 inline_smi = DONT_GENERATE_INLINE_SMI;
3890 }
3891 Load(node->value());
3892 GenericBinaryOperation(node->binary_op(),
3893 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3894 inline_smi);
3895 }
3896 } else {
3897 // For non-compound assignment just load the right-hand side.
3898 Load(node->value());
3899 }
3900
3901 // Stack layout:
3902 // [tos] : value
3903 // [tos+1] : receiver (only materialized if non-trivial)
3904 // [tos+2] : receiver if at the end of an initialization block
3905
3906 // Perform the assignment. It is safe to ignore constants here.
3907 ASSERT(var == NULL || var->mode() != Variable::CONST);
3908 ASSERT_NE(Token::INIT_CONST, node->op());
3909 if (is_trivial_receiver) {
3910 // Load the receiver and swap with the value.
3911 Load(prop->obj());
3912 Register t0 = frame_->PopToRegister();
3913 Register t1 = frame_->PopToRegister(t0);
3914 frame_->EmitPush(t0);
3915 frame_->EmitPush(t1);
3916 }
3917 CodeForSourcePosition(node->position());
3918 bool is_contextual = (var != NULL);
3919 EmitNamedStore(name, is_contextual);
3920 frame_->EmitPush(r0);
3921
3922 // Change to fast case at the end of an initialization block.
3923 if (node->ends_initialization_block()) {
3924 ASSERT_EQ(NULL, var);
3925 // The argument to the runtime call is the receiver.
3926 if (is_trivial_receiver) {
3927 Load(prop->obj());
3928 } else {
3929 // A copy of the receiver is below the value of the assignment. Swap
3930 // the receiver and the value of the assignment expression.
3931 Register t0 = frame_->PopToRegister();
3932 Register t1 = frame_->PopToRegister(t0);
3933 frame_->EmitPush(t0);
3934 frame_->EmitPush(t1);
3935 }
3936 frame_->CallRuntime(Runtime::kToFastProperties, 1);
3937 }
3938
3939 // Stack layout:
3940 // [tos] : result
3941
3942 ASSERT_EQ(original_height + 1, frame_->height());
3943 }
3944
3945
3946 void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
3947 #ifdef DEBUG
3948 int original_height = frame_->height();
3949 #endif
3950 Comment cmnt(masm_, "[ Keyed Property Assignment");
3951 Property* prop = node->target()->AsProperty();
3952 ASSERT_NOT_NULL(prop);
3953
3954 // Evaluate the receiver subexpression.
3955 Load(prop->obj());
3956
3957 WriteBarrierCharacter wb_info;
3958
3959 // Change to slow case in the beginning of an initialization block to
3960 // avoid the quadratic behavior of repeatedly adding fast properties.
3961 if (node->starts_initialization_block()) {
3962 frame_->Dup();
3963 frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3964 }
3965
3966 // Change to fast case at the end of an initialization block. To prepare for
3967 // that add an extra copy of the receiver to the frame, so that it can be
3968 // converted back to fast case after the assignment.
3969 if (node->ends_initialization_block()) {
3970 frame_->Dup();
3971 }
3972
3973 // Evaluate the key subexpression.
3974 Load(prop->key());
3975
3976 // Stack layout:
3977 // [tos] : key
3978 // [tos+1] : receiver
3979 // [tos+2] : receiver if at the end of an initialization block
3980 //
3981 // Evaluate the right-hand side.
3982 if (node->is_compound()) {
3983 // For a compound assignment the right-hand side is a binary operation
3984 // between the current property value and the actual right-hand side.
3985 // Duplicate receiver and key for loading the current property value.
3986 frame_->Dup2();
3987 EmitKeyedLoad();
3988 frame_->EmitPush(r0);
3989
3990 // Perform the binary operation.
3991 Literal* literal = node->value()->AsLiteral();
3992 bool overwrite_value = node->value()->ResultOverwriteAllowed();
3993 if (literal != NULL && literal->handle()->IsSmi()) {
3994 SmiOperation(node->binary_op(),
3995 literal->handle(),
3996 false,
3997 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3998 } else {
3999 GenerateInlineSmi inline_smi =
4000 loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
4001 if (literal != NULL) {
4002 ASSERT(!literal->handle()->IsSmi());
4003 inline_smi = DONT_GENERATE_INLINE_SMI;
4004 }
4005 Load(node->value());
4006 GenericBinaryOperation(node->binary_op(),
4007 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
4008 inline_smi);
4009 }
4010 wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
4011 } else {
4012 // For non-compound assignment just load the right-hand side.
4013 Load(node->value());
4014 wb_info = node->value()->AsLiteral() != NULL ?
4015 NEVER_NEWSPACE :
4016 (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
4017 }
4018
4019 // Stack layout:
4020 // [tos] : value
4021 // [tos+1] : key
4022 // [tos+2] : receiver
4023 // [tos+3] : receiver if at the end of an initialization block
4024
4025 // Perform the assignment. It is safe to ignore constants here.
4026 ASSERT(node->op() != Token::INIT_CONST);
4027 CodeForSourcePosition(node->position());
4028 EmitKeyedStore(prop->key()->type(), wb_info);
4029 frame_->EmitPush(r0);
4030
4031 // Stack layout:
4032 // [tos] : result
4033 // [tos+1] : receiver if at the end of an initialization block
4034
4035 // Change to fast case at the end of an initialization block.
4036 if (node->ends_initialization_block()) {
4037 // The argument to the runtime call is the extra copy of the receiver,
4038 // which is below the value of the assignment. Swap the receiver and
4039 // the value of the assignment expression.
4040 Register t0 = frame_->PopToRegister();
4041 Register t1 = frame_->PopToRegister(t0);
4042 frame_->EmitPush(t1);
4043 frame_->EmitPush(t0);
4044 frame_->CallRuntime(Runtime::kToFastProperties, 1);
4045 }
4046
4047 // Stack layout:
4048 // [tos] : result
4049
4050 ASSERT_EQ(original_height + 1, frame_->height());
4051 }
4052
4053
4054 void CodeGenerator::VisitAssignment(Assignment* node) {
4055 VirtualFrame::RegisterAllocationScope scope(this);
4056 #ifdef DEBUG
4057 int original_height = frame_->height();
4058 #endif
4059 Comment cmnt(masm_, "[ Assignment");
4060
4061 Variable* var = node->target()->AsVariableProxy()->AsVariable();
4062 Property* prop = node->target()->AsProperty();
4063
4064 if (var != NULL && !var->is_global()) {
4065 EmitSlotAssignment(node);
4066
4067 } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
4068 (var != NULL && var->is_global())) {
4069 // Properties whose keys are property names and global variables are
4070 // treated as named property references. We do not need to consider
4071 // global 'this' because it is not a valid left-hand side.
4072 EmitNamedPropertyAssignment(node);
4073
4074 } else if (prop != NULL) {
4075 // Other properties (including rewritten parameters for a function that
4076 // uses arguments) are keyed property assignments.
4077 EmitKeyedPropertyAssignment(node);
4078
4079 } else {
4080 // Invalid left-hand side.
4081 Load(node->target());
4082 frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
4083 // The runtime call doesn't actually return but the code generator will
4084 // still generate code and expects a certain frame height.
4085 frame_->EmitPush(r0);
4086 }
4087 ASSERT_EQ(original_height + 1, frame_->height());
4088 }
4089
4090
4091 void CodeGenerator::VisitThrow(Throw* node) {
4092 #ifdef DEBUG
4093 int original_height = frame_->height();
4094 #endif
4095 Comment cmnt(masm_, "[ Throw");
4096
4097 Load(node->exception());
4098 CodeForSourcePosition(node->position());
4099 frame_->CallRuntime(Runtime::kThrow, 1);
4100 frame_->EmitPush(r0);
4101 ASSERT_EQ(original_height + 1, frame_->height());
4102 }
4103
4104
4105 void CodeGenerator::VisitProperty(Property* node) {
4106 #ifdef DEBUG
4107 int original_height = frame_->height();
4108 #endif
4109 Comment cmnt(masm_, "[ Property");
4110
4111 { Reference property(this, node);
4112 property.GetValue();
4113 }
4114 ASSERT_EQ(original_height + 1, frame_->height());
4115 }
4116
4117
4118 void CodeGenerator::VisitCall(Call* node) {
4119 #ifdef DEBUG
4120 int original_height = frame_->height();
4121 #endif
4122 Comment cmnt(masm_, "[ Call");
4123
4124 Expression* function = node->expression();
4125 ZoneList<Expression*>* args = node->arguments();
4126
4127 // Standard function call.
4128 // Check if the function is a variable or a property.
4129 Variable* var = function->AsVariableProxy()->AsVariable();
4130 Property* property = function->AsProperty();
4131
4132 // ------------------------------------------------------------------------
4133 // Fast-case: Use inline caching.
4134 // ---
4135 // According to ECMA-262, section 11.2.3, page 44, the function to call
4136 // must be resolved after the arguments have been evaluated. The IC code
4137 // automatically handles this by loading the arguments before the function
4138 // is resolved in cache misses (this also holds for megamorphic calls).
4139 // ------------------------------------------------------------------------
4140
4141 if (var != NULL && var->is_possibly_eval()) {
4142 // ----------------------------------
4143 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
4144 // ----------------------------------
4145
4146 // In a call to eval, we first call %ResolvePossiblyDirectEval to
4147 // resolve the function we need to call and the receiver of the
4148 // call. Then we call the resolved function using the given
4149 // arguments.
4150
4151 // Prepare stack for call to resolved function.
4152 Load(function);
4153
4154 // Allocate a frame slot for the receiver.
4155 frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4156
4157 // Load the arguments.
4158 int arg_count = args->length();
4159 for (int i = 0; i < arg_count; i++) {
4160 Load(args->at(i));
4161 }
4162
4163 VirtualFrame::SpilledScope spilled_scope(frame_);
4164
4165 // If we know that eval can only be shadowed by eval-introduced
4166 // variables we attempt to load the global eval function directly
4167 // in generated code. If we succeed, there is no need to perform a
4168 // context lookup in the runtime system.
4169 JumpTarget done;
4170 if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
4171 ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
4172 JumpTarget slow;
4173 // Prepare the stack for the call to
4174 // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
4175 // function, the first argument to the eval call and the
4176 // receiver.
4177 LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
4178 NOT_INSIDE_TYPEOF,
4179 &slow);
4180 frame_->EmitPush(r0);
4181 if (arg_count > 0) {
4182 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
4183 frame_->EmitPush(r1);
4184 } else {
4185 frame_->EmitPush(r2);
4186 }
4187 __ ldr(r1, frame_->Receiver());
4188 frame_->EmitPush(r1);
4189
4190 // Push the strict mode flag.
4191 frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
4192
4193 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
4194
4195 done.Jump();
4196 slow.Bind();
4197 }
4198
4199 // Prepare the stack for the call to ResolvePossiblyDirectEval by
4200 // pushing the loaded function, the first argument to the eval
4201 // call and the receiver.
4202 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
4203 frame_->EmitPush(r1);
4204 if (arg_count > 0) {
4205 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
4206 frame_->EmitPush(r1);
4207 } else {
4208 frame_->EmitPush(r2);
4209 }
4210 __ ldr(r1, frame_->Receiver());
4211 frame_->EmitPush(r1);
4212
4213 // Push the strict mode flag.
4214 frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
4215
4216 // Resolve the call.
4217 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
4218
4219 // If we generated fast-case code bind the jump-target where fast
4220 // and slow case merge.
4221 if (done.is_linked()) done.Bind();
4222
4223 // Touch up stack with the right values for the function and the receiver.
4224 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
4225 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
4226
4227 // Call the function.
4228 CodeForSourcePosition(node->position());
4229
4230 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4231 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
4232 frame_->CallStub(&call_function, arg_count + 1);
4233
4234 __ ldr(cp, frame_->Context());
4235 // Remove the function from the stack.
4236 frame_->Drop();
4237 frame_->EmitPush(r0);
4238
4239 } else if (var != NULL && !var->is_this() && var->is_global()) {
4240 // ----------------------------------
4241 // JavaScript example: 'foo(1, 2, 3)' // foo is global
4242 // ----------------------------------
4243 // Pass the global object as the receiver and let the IC stub
4244 // patch the stack to use the global proxy as 'this' in the
4245 // invoked function.
4246 LoadGlobal();
4247
4248 // Load the arguments.
4249 int arg_count = args->length();
4250 for (int i = 0; i < arg_count; i++) {
4251 Load(args->at(i));
4252 }
4253
4254 VirtualFrame::SpilledScope spilled_scope(frame_);
4255 // Setup the name register and call the IC initialization code.
4256 __ mov(r2, Operand(var->name()));
4257 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4258 Handle<Code> stub =
4259 ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
4260 CodeForSourcePosition(node->position());
4261 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
4262 arg_count + 1);
4263 __ ldr(cp, frame_->Context());
4264 frame_->EmitPush(r0);
4265
4266 } else if (var != NULL && var->AsSlot() != NULL &&
4267 var->AsSlot()->type() == Slot::LOOKUP) {
4268 // ----------------------------------
4269 // JavaScript examples:
4270 //
4271 // with (obj) foo(1, 2, 3) // foo may be in obj.
4272 //
4273 // function f() {};
4274 // function g() {
4275 // eval(...);
4276 // f(); // f could be in extension object.
4277 // }
4278 // ----------------------------------
4279
4280 JumpTarget slow, done;
4281
4282 // Generate fast case for loading functions from slots that
4283 // correspond to local/global variables or arguments unless they
4284 // are shadowed by eval-introduced bindings.
4285 EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
4286 NOT_INSIDE_TYPEOF,
4287 &slow,
4288 &done);
4289
4290 slow.Bind();
4291 // Load the function
4292 frame_->EmitPush(cp);
4293 frame_->EmitPush(Operand(var->name()));
4294 frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4295 // r0: slot value; r1: receiver
4296
4297 // Load the receiver.
4298 frame_->EmitPush(r0); // function
4299 frame_->EmitPush(r1); // receiver
4300
4301 // If fast case code has been generated, emit code to push the
4302 // function and receiver and have the slow path jump around this
4303 // code.
4304 if (done.is_linked()) {
4305 JumpTarget call;
4306 call.Jump();
4307 done.Bind();
4308 frame_->EmitPush(r0); // function
4309 LoadGlobalReceiver(VirtualFrame::scratch0()); // receiver
4310 call.Bind();
4311 }
4312
4313 // Call the function. At this point, everything is spilled but the
4314 // function and receiver are in r0 and r1.
4315 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4316 frame_->EmitPush(r0);
4317
4318 } else if (property != NULL) {
4319 // Check if the key is a literal string.
4320 Literal* literal = property->key()->AsLiteral();
4321
4322 if (literal != NULL && literal->handle()->IsSymbol()) {
4323 // ------------------------------------------------------------------
4324 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
4325 // ------------------------------------------------------------------
4326
4327 Handle<String> name = Handle<String>::cast(literal->handle());
4328
4329 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
4330 name->IsEqualTo(CStrVector("apply")) &&
4331 args->length() == 2 &&
4332 args->at(1)->AsVariableProxy() != NULL &&
4333 args->at(1)->AsVariableProxy()->IsArguments()) {
4334 // Use the optimized Function.prototype.apply that avoids
4335 // allocating lazily allocated arguments objects.
4336 CallApplyLazy(property->obj(),
4337 args->at(0),
4338 args->at(1)->AsVariableProxy(),
4339 node->position());
4340
4341 } else {
4342 Load(property->obj()); // Receiver.
4343 // Load the arguments.
4344 int arg_count = args->length();
4345 for (int i = 0; i < arg_count; i++) {
4346 Load(args->at(i));
4347 }
4348
4349 VirtualFrame::SpilledScope spilled_scope(frame_);
4350 // Set the name register and call the IC initialization code.
4351 __ mov(r2, Operand(name));
4352 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4353 Handle<Code> stub =
4354 ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
4355 CodeForSourcePosition(node->position());
4356 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4357 __ ldr(cp, frame_->Context());
4358 frame_->EmitPush(r0);
4359 }
4360
4361 } else {
4362 // -------------------------------------------
4363 // JavaScript example: 'array[index](1, 2, 3)'
4364 // -------------------------------------------
4365
4366 // Load the receiver and name of the function.
4367 Load(property->obj());
4368 Load(property->key());
4369
4370 if (property->is_synthetic()) {
4371 EmitKeyedLoad();
4372 // Put the function below the receiver.
4373 // Use the global receiver.
4374 frame_->EmitPush(r0); // Function.
4375 LoadGlobalReceiver(VirtualFrame::scratch0());
4376 // Call the function.
4377 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
4378 frame_->EmitPush(r0);
4379 } else {
4380 // Swap the name of the function and the receiver on the stack to follow
4381 // the calling convention for call ICs.
4382 Register key = frame_->PopToRegister();
4383 Register receiver = frame_->PopToRegister(key);
4384 frame_->EmitPush(key);
4385 frame_->EmitPush(receiver);
4386
4387 // Load the arguments.
4388 int arg_count = args->length();
4389 for (int i = 0; i < arg_count; i++) {
4390 Load(args->at(i));
4391 }
4392
4393 // Load the key into r2 and call the IC initialization code.
4394 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4395 Handle<Code> stub =
4396 ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count,
4397 in_loop);
4398 CodeForSourcePosition(node->position());
4399 frame_->SpillAll();
4400 __ ldr(r2, frame_->ElementAt(arg_count + 1));
4401 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4402 frame_->Drop(); // Drop the key still on the stack.
4403 __ ldr(cp, frame_->Context());
4404 frame_->EmitPush(r0);
4405 }
4406 }
4407
4408 } else {
4409 // ----------------------------------
4410 // JavaScript example: 'foo(1, 2, 3)' // foo is not global
4411 // ----------------------------------
4412
4413 // Load the function.
4414 Load(function);
4415
4416 // Pass the global proxy as the receiver.
4417 LoadGlobalReceiver(VirtualFrame::scratch0());
4418
4419 // Call the function.
4420 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4421 frame_->EmitPush(r0);
4422 }
4423 ASSERT_EQ(original_height + 1, frame_->height());
4424 }
4425
4426
4427 void CodeGenerator::VisitCallNew(CallNew* node) {
4428 #ifdef DEBUG
4429 int original_height = frame_->height();
4430 #endif
4431 Comment cmnt(masm_, "[ CallNew");
4432
4433 // According to ECMA-262, section 11.2.2, page 44, the function
4434 // expression in new calls must be evaluated before the
4435 // arguments. This is different from ordinary calls, where the
4436 // actual function to call is resolved after the arguments have been
4437 // evaluated.
4438
4439 // Push constructor on the stack. If it's not a function it's used as
4440 // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
4441 // ignored.
4442 Load(node->expression());
4443
4444 // Push the arguments ("left-to-right") on the stack.
4445 ZoneList<Expression*>* args = node->arguments();
4446 int arg_count = args->length();
4447 for (int i = 0; i < arg_count; i++) {
4448 Load(args->at(i));
4449 }
4450
4451 // Spill everything from here to simplify the implementation.
4452 VirtualFrame::SpilledScope spilled_scope(frame_);
4453
4454 // Load the argument count into r0 and the function into r1 as per
4455 // calling convention.
4456 __ mov(r0, Operand(arg_count));
4457 __ ldr(r1, frame_->ElementAt(arg_count));
4458
4459 // Call the construct call builtin that handles allocation and
4460 // constructor invocation.
4461 CodeForSourcePosition(node->position());
4462 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
4463 Builtins::kJSConstructCall));
4464 frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
4465 frame_->EmitPush(r0);
4466
4467 ASSERT_EQ(original_height + 1, frame_->height());
4468 }
4469
4470
4471 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4472 Register scratch = VirtualFrame::scratch0();
4473 JumpTarget null, function, leave, non_function_constructor;
4474
4475 // Load the object into register.
4476 ASSERT(args->length() == 1);
4477 Load(args->at(0));
4478 Register tos = frame_->PopToRegister();
4479
4480 // If the object is a smi, we return null.
4481 __ tst(tos, Operand(kSmiTagMask));
4482 null.Branch(eq);
4483
4484 // Check that the object is a JS object but take special care of JS
4485 // functions to make sure they have 'Function' as their class.
4486 __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
4487 null.Branch(lt);
4488
4489 // As long as JS_FUNCTION_TYPE is the last instance type and it is
4490 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4491 // LAST_JS_OBJECT_TYPE.
4492 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4493 STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4494 __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
4495 function.Branch(eq);
4496
4497 // Check if the constructor in the map is a function.
4498 __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
4499 __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
4500 non_function_constructor.Branch(ne);
4501
4502 // The tos register now contains the constructor function. Grab the
4503 // instance class name from there.
4504 __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
4505 __ ldr(tos,
4506 FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
4507 frame_->EmitPush(tos);
4508 leave.Jump();
4509
4510 // Functions have class 'Function'.
4511 function.Bind();
4512 __ mov(tos, Operand(FACTORY->function_class_symbol()));
4513 frame_->EmitPush(tos);
4514 leave.Jump();
4515
4516 // Objects with a non-function constructor have class 'Object'.
4517 non_function_constructor.Bind();
4518 __ mov(tos, Operand(FACTORY->Object_symbol()));
4519 frame_->EmitPush(tos);
4520 leave.Jump();
4521
4522 // Non-JS objects have class null.
4523 null.Bind();
4524 __ LoadRoot(tos, Heap::kNullValueRootIndex);
4525 frame_->EmitPush(tos);
4526
4527 // All done.
4528 leave.Bind();
4529 }
4530
4531
4532 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4533 Register scratch = VirtualFrame::scratch0();
4534 JumpTarget leave;
4535
4536 ASSERT(args->length() == 1);
4537 Load(args->at(0));
4538 Register tos = frame_->PopToRegister(); // tos contains object.
4539 // if (object->IsSmi()) return the object.
4540 __ tst(tos, Operand(kSmiTagMask));
4541 leave.Branch(eq);
4542 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4543 __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
4544 leave.Branch(ne);
4545 // Load the value.
4546 __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
4547 leave.Bind();
4548 frame_->EmitPush(tos);
4549 }
4550
4551
4552 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4553 Register scratch1 = VirtualFrame::scratch0();
4554 Register scratch2 = VirtualFrame::scratch1();
4555 JumpTarget leave;
4556
4557 ASSERT(args->length() == 2);
4558 Load(args->at(0)); // Load the object.
4559 Load(args->at(1)); // Load the value.
4560 Register value = frame_->PopToRegister();
4561 Register object = frame_->PopToRegister(value);
4562 // if (object->IsSmi()) return object.
4563 __ tst(object, Operand(kSmiTagMask));
4564 leave.Branch(eq);
4565 // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4566 __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
4567 leave.Branch(ne);
4568 // Store the value.
4569 __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
4570 // Update the write barrier.
4571 __ RecordWrite(object,
4572 Operand(JSValue::kValueOffset - kHeapObjectTag),
4573 scratch1,
4574 scratch2);
4575 // Leave.
4576 leave.Bind();
4577 frame_->EmitPush(value);
4578 }
4579
4580
4581 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
4582 ASSERT(args->length() == 1);
4583 Load(args->at(0));
4584 Register reg = frame_->PopToRegister();
4585 __ tst(reg, Operand(kSmiTagMask));
4586 cc_reg_ = eq;
4587 }
4588
4589
4590 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
4591 // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
4592 ASSERT_EQ(args->length(), 3);
4593 #ifdef ENABLE_LOGGING_AND_PROFILING
4594 if (ShouldGenerateLog(args->at(0))) {
4595 Load(args->at(1));
4596 Load(args->at(2));
4597 frame_->CallRuntime(Runtime::kLog, 2);
4598 }
4599 #endif
4600 frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4601 }
4602
4603
4604 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
4605 ASSERT(args->length() == 1);
4606 Load(args->at(0));
4607 Register reg = frame_->PopToRegister();
4608 __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
4609 cc_reg_ = eq;
4610 }
4611
4612
4613 // Generates the Math.pow method.
4614 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4615 ASSERT(args->length() == 2);
4616 Load(args->at(0));
4617 Load(args->at(1));
4618
4619 if (!CpuFeatures::IsSupported(VFP3)) {
4620 frame_->CallRuntime(Runtime::kMath_pow, 2);
4621 frame_->EmitPush(r0);
4622 } else {
4623 CpuFeatures::Scope scope(VFP3);
4624 JumpTarget runtime, done;
4625 Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
4626
4627 Register scratch1 = VirtualFrame::scratch0();
4628 Register scratch2 = VirtualFrame::scratch1();
4629
4630 // Get base and exponent to registers.
4631 Register exponent = frame_->PopToRegister();
4632 Register base = frame_->PopToRegister(exponent);
4633 Register heap_number_map = no_reg;
4634
4635 // Set the frame for the runtime jump target. The code below jumps to the
4636 // jump target label so the frame needs to be established before that.
4637 ASSERT(runtime.entry_frame() == NULL);
4638 runtime.set_entry_frame(frame_);
4639
4640 __ JumpIfNotSmi(exponent, &exponent_nonsmi);
4641 __ JumpIfNotSmi(base, &base_nonsmi);
4642
4643 heap_number_map = r6;
4644 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4645
4646 // Exponent is a smi and base is a smi. Get the smi value into vfp register
4647 // d1.
4648 __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
4649 __ b(&powi);
4650
4651 __ bind(&base_nonsmi);
4652 // Exponent is smi and base is non smi. Get the double value from the base
4653 // into vfp register d1.
4654 __ ObjectToDoubleVFPRegister(base, d1,
4655 scratch1, scratch2, heap_number_map, s0,
4656 runtime.entry_label());
4657
4658 __ bind(&powi);
4659
4660 // Load 1.0 into d0.
4661 __ vmov(d0, 1.0);
4662
4663 // Get the absolute untagged value of the exponent and use that for the
4664 // calculation.
4665 __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
4666 // Negate if negative.
4667 __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
4668 __ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
4669
4670 // Run through all the bits in the exponent. The result is calculated in d0
4671 // and d1 holds base^(bit^2).
4672 Label more_bits;
4673 __ bind(&more_bits);
4674 __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
4675 __ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set.
4676 __ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done.
4677 __ b(ne, &more_bits);
4678
4679 // If exponent is positive we are done.
4680 __ cmp(exponent, Operand(0, RelocInfo::NONE));
4681 __ b(ge, &allocate_return);
4682
4683 // If exponent is negative result is 1/result (d2 already holds 1.0 in that
4684 // case). However if d0 has reached infinity this will not provide the
4685 // correct result, so call runtime if that is the case.
4686 __ mov(scratch2, Operand(0x7FF00000));
4687 __ mov(scratch1, Operand(0, RelocInfo::NONE));
4688 __ vmov(d1, scratch1, scratch2); // Load infinity into d1.
4689 __ VFPCompareAndSetFlags(d0, d1);
4690 runtime.Branch(eq); // d0 reached infinity.
4691 __ vdiv(d0, d2, d0);
4692 __ b(&allocate_return);
4693
4694 __ bind(&exponent_nonsmi);
4695 // Special handling of raising to the power of -0.5 and 0.5. First check
4696 // that the value is a heap number and that the lower bits (which for both
4697 // values are zero).
4698 heap_number_map = r6;
4699 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4700 __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
4701 __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
4702 __ cmp(scratch1, heap_number_map);
4703 runtime.Branch(ne);
4704 __ tst(scratch2, scratch2);
4705 runtime.Branch(ne);
4706
4707 // Load the higher bits (which contains the floating point exponent).
4708 __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
4709
4710 // Compare exponent with -0.5.
4711 __ cmp(scratch1, Operand(0xbfe00000));
4712 __ b(ne, &not_minus_half);
4713
4714 // Get the double value from the base into vfp register d0.
4715 __ ObjectToDoubleVFPRegister(base, d0,
4716 scratch1, scratch2, heap_number_map, s0,
4717 runtime.entry_label(),
4718 AVOID_NANS_AND_INFINITIES);
4719
4720 // Convert -0 into +0 by adding +0.
4721 __ vmov(d2, 0.0);
4722 __ vadd(d0, d2, d0);
4723 // Load 1.0 into d2.
4724 __ vmov(d2, 1.0);
4725
4726 // Calculate the reciprocal of the square root.
4727 __ vsqrt(d0, d0);
4728 __ vdiv(d0, d2, d0);
4729
4730 __ b(&allocate_return);
4731
4732 __ bind(&not_minus_half);
4733 // Compare exponent with 0.5.
4734 __ cmp(scratch1, Operand(0x3fe00000));
4735 runtime.Branch(ne);
4736
4737 // Get the double value from the base into vfp register d0.
4738 __ ObjectToDoubleVFPRegister(base, d0,
4739 scratch1, scratch2, heap_number_map, s0,
4740 runtime.entry_label(),
4741 AVOID_NANS_AND_INFINITIES);
4742 // Convert -0 into +0 by adding +0.
4743 __ vmov(d2, 0.0);
4744 __ vadd(d0, d2, d0);
4745 __ vsqrt(d0, d0);
4746
4747 __ bind(&allocate_return);
4748 Register scratch3 = r5;
4749 __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
4750 heap_number_map, runtime.entry_label());
4751 __ mov(base, scratch3);
4752 done.Jump();
4753
4754 runtime.Bind();
4755
4756 // Push back the arguments again for the runtime call.
4757 frame_->EmitPush(base);
4758 frame_->EmitPush(exponent);
4759 frame_->CallRuntime(Runtime::kMath_pow, 2);
4760 __ Move(base, r0);
4761
4762 done.Bind();
4763 frame_->EmitPush(base);
4764 }
4765 }
4766
4767
4768 // Generates the Math.sqrt method.
4769 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
4770 ASSERT(args->length() == 1);
4771 Load(args->at(0));
4772
4773 if (!CpuFeatures::IsSupported(VFP3)) {
4774 frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4775 frame_->EmitPush(r0);
4776 } else {
4777 CpuFeatures::Scope scope(VFP3);
4778 JumpTarget runtime, done;
4779
4780 Register scratch1 = VirtualFrame::scratch0();
4781 Register scratch2 = VirtualFrame::scratch1();
4782
4783 // Get the value from the frame.
4784 Register tos = frame_->PopToRegister();
4785
4786 // Set the frame for the runtime jump target. The code below jumps to the
4787 // jump target label so the frame needs to be established before that.
4788 ASSERT(runtime.entry_frame() == NULL);
4789 runtime.set_entry_frame(frame_);
4790
4791 Register heap_number_map = r6;
4792 Register new_heap_number = r5;
4793 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4794
4795 // Get the double value from the heap number into vfp register d0.
4796 __ ObjectToDoubleVFPRegister(tos, d0,
4797 scratch1, scratch2, heap_number_map, s0,
4798 runtime.entry_label());
4799
4800 // Calculate the square root of d0 and place result in a heap number object.
4801 __ vsqrt(d0, d0);
4802 __ AllocateHeapNumberWithValue(new_heap_number,
4803 d0,
4804 scratch1, scratch2,
4805 heap_number_map,
4806 runtime.entry_label());
4807 __ mov(tos, Operand(new_heap_number));
4808 done.Jump();
4809
4810 runtime.Bind();
4811 // Push back the argument again for the runtime call.
4812 frame_->EmitPush(tos);
4813 frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4814 __ Move(tos, r0);
4815
4816 done.Bind();
4817 frame_->EmitPush(tos);
4818 }
4819 }
4820
4821
4822 class DeferredStringCharCodeAt : public DeferredCode {
4823 public:
4824 DeferredStringCharCodeAt(Register object,
4825 Register index,
4826 Register scratch,
4827 Register result)
4828 : result_(result),
4829 char_code_at_generator_(object,
4830 index,
4831 scratch,
4832 result,
4833 &need_conversion_,
4834 &need_conversion_,
4835 &index_out_of_range_,
4836 STRING_INDEX_IS_NUMBER) {}
4837
4838 StringCharCodeAtGenerator* fast_case_generator() {
4839 return &char_code_at_generator_;
4840 }
4841
4842 virtual void Generate() {
4843 VirtualFrameRuntimeCallHelper call_helper(frame_state());
4844 char_code_at_generator_.GenerateSlow(masm(), call_helper);
4845
4846 __ bind(&need_conversion_);
4847 // Move the undefined value into the result register, which will
4848 // trigger conversion.
4849 __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
4850 __ jmp(exit_label());
4851
4852 __ bind(&index_out_of_range_);
4853 // When the index is out of range, the spec requires us to return
4854 // NaN.
4855 __ LoadRoot(result_, Heap::kNanValueRootIndex);
4856 __ jmp(exit_label());
4857 }
4858
4859 private:
4860 Register result_;
4861
4862 Label need_conversion_;
4863 Label index_out_of_range_;
4864
4865 StringCharCodeAtGenerator char_code_at_generator_;
4866 };
4867
4868
4869 // This generates code that performs a String.prototype.charCodeAt() call
4870 // or returns a smi in order to trigger conversion.
4871 void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
4872 Comment(masm_, "[ GenerateStringCharCodeAt");
4873 ASSERT(args->length() == 2);
4874
4875 Load(args->at(0));
4876 Load(args->at(1));
4877
4878 Register index = frame_->PopToRegister();
4879 Register object = frame_->PopToRegister(index);
4880
4881 // We need two extra registers.
4882 Register scratch = VirtualFrame::scratch0();
4883 Register result = VirtualFrame::scratch1();
4884
4885 DeferredStringCharCodeAt* deferred =
4886 new DeferredStringCharCodeAt(object,
4887 index,
4888 scratch,
4889 result);
4890 deferred->fast_case_generator()->GenerateFast(masm_);
4891 deferred->BindExit();
4892 frame_->EmitPush(result);
4893 }
4894
4895
4896 class DeferredStringCharFromCode : public DeferredCode {
4897 public:
4898 DeferredStringCharFromCode(Register code,
4899 Register result)
4900 : char_from_code_generator_(code, result) {}
4901
4902 StringCharFromCodeGenerator* fast_case_generator() {
4903 return &char_from_code_generator_;
4904 }
4905
4906 virtual void Generate() {
4907 VirtualFrameRuntimeCallHelper call_helper(frame_state());
4908 char_from_code_generator_.GenerateSlow(masm(), call_helper);
4909 }
4910
4911 private:
4912 StringCharFromCodeGenerator char_from_code_generator_;
4913 };
4914
4915
4916 // Generates code for creating a one-char string from a char code.
4917 void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
4918 Comment(masm_, "[ GenerateStringCharFromCode");
4919 ASSERT(args->length() == 1);
4920
4921 Load(args->at(0));
4922
4923 Register result = frame_->GetTOSRegister();
4924 Register code = frame_->PopToRegister(result);
4925
4926 DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
4927 code, result);
4928 deferred->fast_case_generator()->GenerateFast(masm_);
4929 deferred->BindExit();
4930 frame_->EmitPush(result);
4931 }
4932
4933
4934 class DeferredStringCharAt : public DeferredCode {
4935 public:
4936 DeferredStringCharAt(Register object,
4937 Register index,
4938 Register scratch1,
4939 Register scratch2,
4940 Register result)
4941 : result_(result),
4942 char_at_generator_(object,
4943 index,
4944 scratch1,
4945 scratch2,
4946 result,
4947 &need_conversion_,
4948 &need_conversion_,
4949 &index_out_of_range_,
4950 STRING_INDEX_IS_NUMBER) {}
4951
4952 StringCharAtGenerator* fast_case_generator() {
4953 return &char_at_generator_;
4954 }
4955
4956 virtual void Generate() {
4957 VirtualFrameRuntimeCallHelper call_helper(frame_state());
4958 char_at_generator_.GenerateSlow(masm(), call_helper);
4959
4960 __ bind(&need_conversion_);
4961 // Move smi zero into the result register, which will trigger
4962 // conversion.
4963 __ mov(result_, Operand(Smi::FromInt(0)));
4964 __ jmp(exit_label());
4965
4966 __ bind(&index_out_of_range_);
4967 // When the index is out of range, the spec requires us to return
4968 // the empty string.
4969 __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
4970 __ jmp(exit_label());
4971 }
4972
4973 private:
4974 Register result_;
4975
4976 Label need_conversion_;
4977 Label index_out_of_range_;
4978
4979 StringCharAtGenerator char_at_generator_;
4980 };
4981
4982
4983 // This generates code that performs a String.prototype.charAt() call
4984 // or returns a smi in order to trigger conversion.
4985 void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
4986 Comment(masm_, "[ GenerateStringCharAt");
4987 ASSERT(args->length() == 2);
4988
4989 Load(args->at(0));
4990 Load(args->at(1));
4991
4992 Register index = frame_->PopToRegister();
4993 Register object = frame_->PopToRegister(index);
4994
4995 // We need three extra registers.
4996 Register scratch1 = VirtualFrame::scratch0();
4997 Register scratch2 = VirtualFrame::scratch1();
4998 // Use r6 without notifying the virtual frame.
4999 Register result = r6;
5000
5001 DeferredStringCharAt* deferred =
5002 new DeferredStringCharAt(object,
5003 index,
5004 scratch1,
5005 scratch2,
5006 result);
5007 deferred->fast_case_generator()->GenerateFast(masm_);
5008 deferred->BindExit();
5009 frame_->EmitPush(result);
5010 }
5011
5012
5013 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
5014 ASSERT(args->length() == 1);
5015 Load(args->at(0));
5016 JumpTarget answer;
5017 // We need the CC bits to come out as not_equal in the case where the
5018 // object is a smi. This can't be done with the usual test opcode so
5019 // we use XOR to get the right CC bits.
5020 Register possible_array = frame_->PopToRegister();
5021 Register scratch = VirtualFrame::scratch0();
5022 __ and_(scratch, possible_array, Operand(kSmiTagMask));
5023 __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
5024 answer.Branch(ne);
5025 // It is a heap object - get the map. Check if the object is a JS array.
5026 __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
5027 answer.Bind();
5028 cc_reg_ = eq;
5029 }
5030
5031
5032 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
5033 ASSERT(args->length() == 1);
5034 Load(args->at(0));
5035 JumpTarget answer;
5036 // We need the CC bits to come out as not_equal in the case where the
5037 // object is a smi. This can't be done with the usual test opcode so
5038 // we use XOR to get the right CC bits.
5039 Register possible_regexp = frame_->PopToRegister();
5040 Register scratch = VirtualFrame::scratch0();
5041 __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
5042 __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
5043 answer.Branch(ne);
5044 // It is a heap object - get the map. Check if the object is a regexp.
5045 __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
5046 answer.Bind();
5047 cc_reg_ = eq;
5048 }
5049
5050
5051 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
5052 // This generates a fast version of:
5053 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
5054 ASSERT(args->length() == 1);
5055 Load(args->at(0));
5056 Register possible_object = frame_->PopToRegister();
5057 __ tst(possible_object, Operand(kSmiTagMask));
5058 false_target()->Branch(eq);
5059
5060 __ LoadRoot(ip, Heap::kNullValueRootIndex);
5061 __ cmp(possible_object, ip);
5062 true_target()->Branch(eq);
5063
5064 Register map_reg = VirtualFrame::scratch0();
5065 __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
5066 // Undetectable objects behave like undefined when tested with typeof.
5067 __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
5068 __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
5069 false_target()->Branch(ne);
5070
5071 __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
5072 __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
5073 false_target()->Branch(lt);
5074 __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
5075 cc_reg_ = le;
5076 }
5077
5078
5079 void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
5080 // This generates a fast version of:
5081 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
5082 // typeof(arg) == function).
5083 // It includes undetectable objects (as opposed to IsObject).
5084 ASSERT(args->length() == 1);
5085 Load(args->at(0));
5086 Register value = frame_->PopToRegister();
5087 __ tst(value, Operand(kSmiTagMask));
5088 false_target()->Branch(eq);
5089 // Check that this is an object.
5090 __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
5091 __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
5092 __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
5093 cc_reg_ = ge;
5094 }
5095
5096
5097 // Deferred code to check whether the String JavaScript object is safe for using
5098 // default value of. This code is called after the bit caching this information
5099 // in the map has been checked with the map for the object in the map_result_
5100 // register. On return the register map_result_ contains 1 for true and 0 for
5101 // false.
5102 class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
5103 public:
5104 DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
5105 Register map_result,
5106 Register scratch1,
5107 Register scratch2)
5108 : object_(object),
5109 map_result_(map_result),
5110 scratch1_(scratch1),
5111 scratch2_(scratch2) { }
5112
5113 virtual void Generate() {
5114 Label false_result;
5115
5116 // Check that map is loaded as expected.
5117 if (FLAG_debug_code) {
5118 __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
5119 __ cmp(map_result_, ip);
5120 __ Assert(eq, "Map not in expected register");
5121 }
5122
5123 // Check for fast case object. Generate false result for slow case object.
5124 __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
5125 __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
5126 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
5127 __ cmp(scratch1_, ip);
5128 __ b(eq, &false_result);
5129
5130 // Look for valueOf symbol in the descriptor array, and indicate false if
5131 // found. The type is not checked, so if it is a transition it is a false
5132 // negative.
5133 __ ldr(map_result_,
5134 FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
5135 __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
5136 // map_result_: descriptor array
5137 // scratch2_: length of descriptor array
5138 // Calculate the end of the descriptor array.
5139 STATIC_ASSERT(kSmiTag == 0);
5140 STATIC_ASSERT(kSmiTagSize == 1);
5141 STATIC_ASSERT(kPointerSize == 4);
5142 __ add(scratch1_,
5143 map_result_,
5144 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5145 __ add(scratch1_,
5146 scratch1_,
5147 Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
5148
5149 // Calculate location of the first key name.
5150 __ add(map_result_,
5151 map_result_,
5152 Operand(FixedArray::kHeaderSize - kHeapObjectTag +
5153 DescriptorArray::kFirstIndex * kPointerSize));
5154 // Loop through all the keys in the descriptor array. If one of these is the
5155 // symbol valueOf the result is false.
5156 Label entry, loop;
5157 // The use of ip to store the valueOf symbol asumes that it is not otherwise
5158 // used in the loop below.
5159 __ mov(ip, Operand(FACTORY->value_of_symbol()));
5160 __ jmp(&entry);
5161 __ bind(&loop);
5162 __ ldr(scratch2_, MemOperand(map_result_, 0));
5163 __ cmp(scratch2_, ip);
5164 __ b(eq, &false_result);
5165 __ add(map_result_, map_result_, Operand(kPointerSize));
5166 __ bind(&entry);
5167 __ cmp(map_result_, Operand(scratch1_));
5168 __ b(ne, &loop);
5169
5170 // Reload map as register map_result_ was used as temporary above.
5171 __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5172
5173 // If a valueOf property is not found on the object check that it's
5174 // prototype is the un-modified String prototype. If not result is false.
5175 __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
5176 __ tst(scratch1_, Operand(kSmiTagMask));
5177 __ b(eq, &false_result);
5178 __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
5179 __ ldr(scratch2_,
5180 ContextOperand(cp, Context::GLOBAL_INDEX));
5181 __ ldr(scratch2_,
5182 FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
5183 __ ldr(scratch2_,
5184 ContextOperand(
5185 scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
5186 __ cmp(scratch1_, scratch2_);
5187 __ b(ne, &false_result);
5188
5189 // Set the bit in the map to indicate that it has been checked safe for
5190 // default valueOf and set true result.
5191 __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
5192 __ orr(scratch1_,
5193 scratch1_,
5194 Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
5195 __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
5196 __ mov(map_result_, Operand(1));
5197 __ jmp(exit_label());
5198 __ bind(&false_result);
5199 // Set false result.
5200 __ mov(map_result_, Operand(0, RelocInfo::NONE));
5201 }
5202
5203 private:
5204 Register object_;
5205 Register map_result_;
5206 Register scratch1_;
5207 Register scratch2_;
5208 };
5209
5210
5211 void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
5212 ZoneList<Expression*>* args) {
5213 ASSERT(args->length() == 1);
5214 Load(args->at(0));
5215 Register obj = frame_->PopToRegister(); // Pop the string wrapper.
5216 if (FLAG_debug_code) {
5217 __ AbortIfSmi(obj);
5218 }
5219
5220 // Check whether this map has already been checked to be safe for default
5221 // valueOf.
5222 Register map_result = VirtualFrame::scratch0();
5223 __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
5224 __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
5225 __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
5226 true_target()->Branch(ne);
5227
5228 // We need an additional two scratch registers for the deferred code.
5229 Register scratch1 = VirtualFrame::scratch1();
5230 // Use r6 without notifying the virtual frame.
5231 Register scratch2 = r6;
5232
5233 DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
5234 new DeferredIsStringWrapperSafeForDefaultValueOf(
5235 obj, map_result, scratch1, scratch2);
5236 deferred->Branch(eq);
5237 deferred->BindExit();
5238 __ tst(map_result, Operand(map_result));
5239 cc_reg_ = ne;
5240 }
5241
5242
5243 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
5244 // This generates a fast version of:
5245 // (%_ClassOf(arg) === 'Function')
5246 ASSERT(args->length() == 1);
5247 Load(args->at(0));
5248 Register possible_function = frame_->PopToRegister();
5249 __ tst(possible_function, Operand(kSmiTagMask));
5250 false_target()->Branch(eq);
5251 Register map_reg = VirtualFrame::scratch0();
5252 Register scratch = VirtualFrame::scratch1();
5253 __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
5254 cc_reg_ = eq;
5255 }
5256
5257
5258 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
5259 ASSERT(args->length() == 1);
5260 Load(args->at(0));
5261 Register possible_undetectable = frame_->PopToRegister();
5262 __ tst(possible_undetectable, Operand(kSmiTagMask));
5263 false_target()->Branch(eq);
5264 Register scratch = VirtualFrame::scratch0();
5265 __ ldr(scratch,
5266 FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
5267 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5268 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5269 cc_reg_ = ne;
5270 }
5271
5272
5273 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
5274 ASSERT(args->length() == 0);
5275
5276 Register scratch0 = VirtualFrame::scratch0();
5277 Register scratch1 = VirtualFrame::scratch1();
5278 // Get the frame pointer for the calling frame.
5279 __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5280
5281 // Skip the arguments adaptor frame if it exists.
5282 __ ldr(scratch1,
5283 MemOperand(scratch0, StandardFrameConstants::kContextOffset));
5284 __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5285 __ ldr(scratch0,
5286 MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
5287
5288 // Check the marker in the calling frame.
5289 __ ldr(scratch1,
5290 MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
5291 __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5292 cc_reg_ = eq;
5293 }
5294
5295
5296 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
5297 ASSERT(args->length() == 0);
5298
5299 Register tos = frame_->GetTOSRegister();
5300 Register scratch0 = VirtualFrame::scratch0();
5301 Register scratch1 = VirtualFrame::scratch1();
5302
5303 // Check if the calling frame is an arguments adaptor frame.
5304 __ ldr(scratch0,
5305 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5306 __ ldr(scratch1,
5307 MemOperand(scratch0, StandardFrameConstants::kContextOffset));
5308 __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5309
5310 // Get the number of formal parameters.
5311 __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
5312
5313 // Arguments adaptor case: Read the arguments length from the
5314 // adaptor frame.
5315 __ ldr(tos,
5316 MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
5317 eq);
5318
5319 frame_->EmitPush(tos);
5320 }
5321
5322
5323 void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
5324 ASSERT(args->length() == 1);
5325
5326 // Satisfy contract with ArgumentsAccessStub:
5327 // Load the key into r1 and the formal parameters count into r0.
5328 Load(args->at(0));
5329 frame_->PopToR1();
5330 frame_->SpillAll();
5331 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
5332
5333 // Call the shared stub to get to arguments[key].
5334 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
5335 frame_->CallStub(&stub, 0);
5336 frame_->EmitPush(r0);
5337 }
5338
5339
5340 void CodeGenerator::GenerateRandomHeapNumber(
5341 ZoneList<Expression*>* args) {
5342 VirtualFrame::SpilledScope spilled_scope(frame_);
5343 ASSERT(args->length() == 0);
5344
5345 Label slow_allocate_heapnumber;
5346 Label heapnumber_allocated;
5347
5348 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
5349 __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
5350 __ jmp(&heapnumber_allocated);
5351
5352 __ bind(&slow_allocate_heapnumber);
5353 // Allocate a heap number.
5354 __ CallRuntime(Runtime::kNumberAlloc, 0);
5355 __ mov(r4, Operand(r0));
5356
5357 __ bind(&heapnumber_allocated);
5358
5359 // Convert 32 random bits in r0 to 0.(32 random bits) in a double
5360 // by computing:
5361 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
5362 if (CpuFeatures::IsSupported(VFP3)) {
5363 __ PrepareCallCFunction(1, r0);
5364 __ mov(r0, Operand(ExternalReference::isolate_address()));
5365 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
5366
5367 CpuFeatures::Scope scope(VFP3);
5368 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
5369 // Create this constant using mov/orr to avoid PC relative load.
5370 __ mov(r1, Operand(0x41000000));
5371 __ orr(r1, r1, Operand(0x300000));
5372 // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
5373 __ vmov(d7, r0, r1);
5374 // Move 0x4130000000000000 to VFP.
5375 __ mov(r0, Operand(0, RelocInfo::NONE));
5376 __ vmov(d8, r0, r1);
5377 // Subtract and store the result in the heap number.
5378 __ vsub(d7, d7, d8);
5379 __ sub(r0, r4, Operand(kHeapObjectTag));
5380 __ vstr(d7, r0, HeapNumber::kValueOffset);
5381 frame_->EmitPush(r4);
5382 } else {
5383 __ PrepareCallCFunction(2, r0);
5384 __ mov(r0, Operand(r4));
5385 __ mov(r1, Operand(ExternalReference::isolate_address()));
5386 __ CallCFunction(
5387 ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
5388 frame_->EmitPush(r0);
5389 }
5390 }
5391
5392
5393 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
5394 ASSERT_EQ(2, args->length());
5395
5396 Load(args->at(0));
5397 Load(args->at(1));
5398
5399 StringAddStub stub(NO_STRING_ADD_FLAGS);
5400 frame_->SpillAll();
5401 frame_->CallStub(&stub, 2);
5402 frame_->EmitPush(r0);
5403 }
5404
5405
5406 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
5407 ASSERT_EQ(3, args->length());
5408
5409 Load(args->at(0));
5410 Load(args->at(1));
5411 Load(args->at(2));
5412
5413 SubStringStub stub;
5414 frame_->SpillAll();
5415 frame_->CallStub(&stub, 3);
5416 frame_->EmitPush(r0);
5417 }
5418
5419
5420 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
5421 ASSERT_EQ(2, args->length());
5422
5423 Load(args->at(0));
5424 Load(args->at(1));
5425
5426 StringCompareStub stub;
5427 frame_->SpillAll();
5428 frame_->CallStub(&stub, 2);
5429 frame_->EmitPush(r0);
5430 }
5431
5432
5433 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
5434 ASSERT_EQ(4, args->length());
5435
5436 Load(args->at(0));
5437 Load(args->at(1));
5438 Load(args->at(2));
5439 Load(args->at(3));
5440 RegExpExecStub stub;
5441 frame_->SpillAll();
5442 frame_->CallStub(&stub, 4);
5443 frame_->EmitPush(r0);
5444 }
5445
5446
5447 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
5448 ASSERT_EQ(3, args->length());
5449
5450 Load(args->at(0)); // Size of array, smi.
5451 Load(args->at(1)); // "index" property value.
5452 Load(args->at(2)); // "input" property value.
5453 RegExpConstructResultStub stub;
5454 frame_->SpillAll();
5455 frame_->CallStub(&stub, 3);
5456 frame_->EmitPush(r0);
5457 }
5458
5459
5460 class DeferredSearchCache: public DeferredCode {
5461 public:
5462 DeferredSearchCache(Register dst, Register cache, Register key)
5463 : dst_(dst), cache_(cache), key_(key) {
5464 set_comment("[ DeferredSearchCache");
5465 }
5466
5467 virtual void Generate();
5468
5469 private:
5470 Register dst_, cache_, key_;
5471 };
5472
5473
5474 void DeferredSearchCache::Generate() {
5475 __ Push(cache_, key_);
5476 __ CallRuntime(Runtime::kGetFromCache, 2);
5477 __ Move(dst_, r0);
5478 }
5479
5480
5481 void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
5482 ASSERT_EQ(2, args->length());
5483
5484 ASSERT_NE(NULL, args->at(0)->AsLiteral());
5485 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
5486
5487 Handle<FixedArray> jsfunction_result_caches(
5488 Isolate::Current()->global_context()->jsfunction_result_caches());
5489 if (jsfunction_result_caches->length() <= cache_id) {
5490 __ Abort("Attempt to use undefined cache.");
5491 frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5492 return;
5493 }
5494
5495 Load(args->at(1));
5496
5497 frame_->PopToR1();
5498 frame_->SpillAll();
5499 Register key = r1; // Just poped to r1
5500 Register result = r0; // Free, as frame has just been spilled.
5501 Register scratch1 = VirtualFrame::scratch0();
5502 Register scratch2 = VirtualFrame::scratch1();
5503
5504 __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
5505 __ ldr(scratch1,
5506 FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
5507 __ ldr(scratch1,
5508 ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
5509 __ ldr(scratch1,
5510 FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
5511
5512 DeferredSearchCache* deferred =
5513 new DeferredSearchCache(result, scratch1, key);
5514
5515 const int kFingerOffset =
5516 FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
5517 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5518 __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
5519 // result now holds finger offset as a smi.
5520 __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5521 // scratch2 now points to the start of fixed array elements.
5522 __ ldr(result,
5523 MemOperand(
5524 scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
5525 // Note side effect of PreIndex: scratch2 now points to the key of the pair.
5526 __ cmp(key, result);
5527 deferred->Branch(ne);
5528
5529 __ ldr(result, MemOperand(scratch2, kPointerSize));
5530
5531 deferred->BindExit();
5532 frame_->EmitPush(result);
5533 }
5534
5535
5536 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
5537 ASSERT_EQ(args->length(), 1);
5538
5539 // Load the argument on the stack and jump to the runtime.
5540 Load(args->at(0));
5541
5542 NumberToStringStub stub;
5543 frame_->SpillAll();
5544 frame_->CallStub(&stub, 1);
5545 frame_->EmitPush(r0);
5546 }
5547
5548
5549 class DeferredSwapElements: public DeferredCode {
5550 public:
5551 DeferredSwapElements(Register object, Register index1, Register index2)
5552 : object_(object), index1_(index1), index2_(index2) {
5553 set_comment("[ DeferredSwapElements");
5554 }
5555
5556 virtual void Generate();
5557
5558 private:
5559 Register object_, index1_, index2_;
5560 };
5561
5562
5563 void DeferredSwapElements::Generate() {
5564 __ push(object_);
5565 __ push(index1_);
5566 __ push(index2_);
5567 __ CallRuntime(Runtime::kSwapElements, 3);
5568 }
5569
5570
5571 void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
5572 Comment cmnt(masm_, "[ GenerateSwapElements");
5573
5574 ASSERT_EQ(3, args->length());
5575
5576 Load(args->at(0));
5577 Load(args->at(1));
5578 Load(args->at(2));
5579
5580 VirtualFrame::SpilledScope spilled_scope(frame_);
5581
5582 Register index2 = r2;
5583 Register index1 = r1;
5584 Register object = r0;
5585 Register tmp1 = r3;
5586 Register tmp2 = r4;
5587
5588 frame_->EmitPop(index2);
5589 frame_->EmitPop(index1);
5590 frame_->EmitPop(object);
5591
5592 DeferredSwapElements* deferred =
5593 new DeferredSwapElements(object, index1, index2);
5594
5595 // Fetch the map and check if array is in fast case.
5596 // Check that object doesn't require security checks and
5597 // has no indexed interceptor.
5598 __ CompareObjectType(object, tmp1, tmp2, JS_ARRAY_TYPE);
5599 deferred->Branch(ne);
5600 __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
5601 __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
5602 deferred->Branch(ne);
5603
5604 // Check the object's elements are in fast case and writable.
5605 __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
5606 __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
5607 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5608 __ cmp(tmp2, ip);
5609 deferred->Branch(ne);
5610
5611 // Smi-tagging is equivalent to multiplying by 2.
5612 STATIC_ASSERT(kSmiTag == 0);
5613 STATIC_ASSERT(kSmiTagSize == 1);
5614
5615 // Check that both indices are smis.
5616 __ mov(tmp2, index1);
5617 __ orr(tmp2, tmp2, index2);
5618 __ tst(tmp2, Operand(kSmiTagMask));
5619 deferred->Branch(ne);
5620
5621 // Check that both indices are valid.
5622 __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
5623 __ cmp(tmp2, index1);
5624 __ cmp(tmp2, index2, hi);
5625 deferred->Branch(ls);
5626
5627 // Bring the offsets into the fixed array in tmp1 into index1 and
5628 // index2.
5629 __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5630 __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
5631 __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
5632
5633 // Swap elements.
5634 Register tmp3 = object;
5635 object = no_reg;
5636 __ ldr(tmp3, MemOperand(tmp1, index1));
5637 __ ldr(tmp2, MemOperand(tmp1, index2));
5638 __ str(tmp3, MemOperand(tmp1, index2));
5639 __ str(tmp2, MemOperand(tmp1, index1));
5640
5641 Label done;
5642 __ InNewSpace(tmp1, tmp2, eq, &done);
5643 // Possible optimization: do a check that both values are Smis
5644 // (or them and test against Smi mask.)
5645
5646 __ mov(tmp2, tmp1);
5647 __ add(index1, index1, tmp1);
5648 __ add(index2, index2, tmp1);
5649 __ RecordWriteHelper(tmp1, index1, tmp3);
5650 __ RecordWriteHelper(tmp2, index2, tmp3);
5651 __ bind(&done);
5652
5653 deferred->BindExit();
5654 __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
5655 frame_->EmitPush(tmp1);
5656 }
5657
5658
5659 void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
5660 Comment cmnt(masm_, "[ GenerateCallFunction");
5661
5662 ASSERT(args->length() >= 2);
5663
5664 int n_args = args->length() - 2; // for receiver and function.
5665 Load(args->at(0)); // receiver
5666 for (int i = 0; i < n_args; i++) {
5667 Load(args->at(i + 1));
5668 }
5669 Load(args->at(n_args + 1)); // function
5670 frame_->CallJSFunction(n_args);
5671 frame_->EmitPush(r0);
5672 }
5673
5674
5675 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
5676 ASSERT_EQ(args->length(), 1);
5677 Load(args->at(0));
5678 if (CpuFeatures::IsSupported(VFP3)) {
5679 TranscendentalCacheStub stub(TranscendentalCache::SIN,
5680 TranscendentalCacheStub::TAGGED);
5681 frame_->SpillAllButCopyTOSToR0();
5682 frame_->CallStub(&stub, 1);
5683 } else {
5684 frame_->CallRuntime(Runtime::kMath_sin, 1);
5685 }
5686 frame_->EmitPush(r0);
5687 }
5688
5689
5690 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
5691 ASSERT_EQ(args->length(), 1);
5692 Load(args->at(0));
5693 if (CpuFeatures::IsSupported(VFP3)) {
5694 TranscendentalCacheStub stub(TranscendentalCache::COS,
5695 TranscendentalCacheStub::TAGGED);
5696 frame_->SpillAllButCopyTOSToR0();
5697 frame_->CallStub(&stub, 1);
5698 } else {
5699 frame_->CallRuntime(Runtime::kMath_cos, 1);
5700 }
5701 frame_->EmitPush(r0);
5702 }
5703
5704
5705 void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
5706 ASSERT_EQ(args->length(), 1);
5707 Load(args->at(0));
5708 if (CpuFeatures::IsSupported(VFP3)) {
5709 TranscendentalCacheStub stub(TranscendentalCache::LOG,
5710 TranscendentalCacheStub::TAGGED);
5711 frame_->SpillAllButCopyTOSToR0();
5712 frame_->CallStub(&stub, 1);
5713 } else {
5714 frame_->CallRuntime(Runtime::kMath_log, 1);
5715 }
5716 frame_->EmitPush(r0);
5717 }
5718
5719
5720 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
5721 ASSERT(args->length() == 2);
5722
5723 // Load the two objects into registers and perform the comparison.
5724 Load(args->at(0));
5725 Load(args->at(1));
5726 Register lhs = frame_->PopToRegister();
5727 Register rhs = frame_->PopToRegister(lhs);
5728 __ cmp(lhs, rhs);
5729 cc_reg_ = eq;
5730 }
5731
5732
5733 void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
5734 ASSERT(args->length() == 2);
5735
5736 // Load the two objects into registers and perform the comparison.
5737 Load(args->at(0));
5738 Load(args->at(1));
5739 Register right = frame_->PopToRegister();
5740 Register left = frame_->PopToRegister(right);
5741 Register tmp = frame_->scratch0();
5742 Register tmp2 = frame_->scratch1();
5743
5744 // Jumps to done must have the eq flag set if the test is successful
5745 // and clear if the test has failed.
5746 Label done;
5747
5748 // Fail if either is a non-HeapObject.
5749 __ cmp(left, Operand(right));
5750 __ b(eq, &done);
5751 __ and_(tmp, left, Operand(right));
5752 __ eor(tmp, tmp, Operand(kSmiTagMask));
5753 __ tst(tmp, Operand(kSmiTagMask));
5754 __ b(ne, &done);
5755 __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
5756 __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
5757 __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
5758 __ b(ne, &done);
5759 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
5760 __ cmp(tmp, Operand(tmp2));
5761 __ b(ne, &done);
5762 __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
5763 __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
5764 __ cmp(tmp, tmp2);
5765 __ bind(&done);
5766 cc_reg_ = eq;
5767 }
5768
5769
5770 void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
5771 ASSERT(args->length() == 1);
5772 Load(args->at(0));
5773 Register value = frame_->PopToRegister();
5774 Register tmp = frame_->scratch0();
5775 __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
5776 __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
5777 cc_reg_ = eq;
5778 }
5779
5780
5781 void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
5782 ASSERT(args->length() == 1);
5783 Load(args->at(0));
5784 Register value = frame_->PopToRegister();
5785
5786 __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
5787 __ IndexFromHash(value, value);
5788 frame_->EmitPush(value);
5789 }
5790
5791
5792 void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
5793 ASSERT(args->length() == 2);
5794 Load(args->at(0));
5795 Register value = frame_->PopToRegister();
5796 __ LoadRoot(value, Heap::kUndefinedValueRootIndex);
5797 frame_->EmitPush(value);
5798 }
5799
5800
5801 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
5802 #ifdef DEBUG
5803 int original_height = frame_->height();
5804 #endif
5805 if (CheckForInlineRuntimeCall(node)) {
5806 ASSERT((has_cc() && frame_->height() == original_height) ||
5807 (!has_cc() && frame_->height() == original_height + 1));
5808 return;
5809 }
5810
5811 ZoneList<Expression*>* args = node->arguments();
5812 Comment cmnt(masm_, "[ CallRuntime");
5813 const Runtime::Function* function = node->function();
5814
5815 if (function == NULL) {
5816 // Prepare stack for calling JS runtime function.
5817 // Push the builtins object found in the current global object.
5818 Register scratch = VirtualFrame::scratch0();
5819 __ ldr(scratch, GlobalObjectOperand());
5820 Register builtins = frame_->GetTOSRegister();
5821 __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
5822 frame_->EmitPush(builtins);
5823 }
5824
5825 // Push the arguments ("left-to-right").
5826 int arg_count = args->length();
5827 for (int i = 0; i < arg_count; i++) {
5828 Load(args->at(i));
5829 }
5830
5831 VirtualFrame::SpilledScope spilled_scope(frame_);
5832
5833 if (function == NULL) {
5834 // Call the JS runtime function.
5835 __ mov(r2, Operand(node->name()));
5836 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
5837 Handle<Code> stub =
5838 ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
5839 frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
5840 __ ldr(cp, frame_->Context());
5841 frame_->EmitPush(r0);
5842 } else {
5843 // Call the C runtime function.
5844 frame_->CallRuntime(function, arg_count);
5845 frame_->EmitPush(r0);
5846 }
5847 ASSERT_EQ(original_height + 1, frame_->height());
5848 }
5849
5850
5851 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
5852 #ifdef DEBUG
5853 int original_height = frame_->height();
5854 #endif
5855 Comment cmnt(masm_, "[ UnaryOperation");
5856
5857 Token::Value op = node->op();
5858
5859 if (op == Token::NOT) {
5860 LoadCondition(node->expression(), false_target(), true_target(), true);
5861 // LoadCondition may (and usually does) leave a test and branch to
5862 // be emitted by the caller. In that case, negate the condition.
5863 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
5864
5865 } else if (op == Token::DELETE) {
5866 Property* property = node->expression()->AsProperty();
5867 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
5868 if (property != NULL) {
5869 Load(property->obj());
5870 Load(property->key());
5871 frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
5872 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
5873 frame_->EmitPush(r0);
5874
5875 } else if (variable != NULL) {
5876 // Delete of an unqualified identifier is disallowed in strict mode
5877 // but "delete this" is.
5878 ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
5879 Slot* slot = variable->AsSlot();
5880 if (variable->is_global()) {
5881 LoadGlobal();
5882 frame_->EmitPush(Operand(variable->name()));
5883 frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
5884 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
5885 frame_->EmitPush(r0);
5886
5887 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
5888 // Delete from the context holding the named variable.
5889 frame_->EmitPush(cp);
5890 frame_->EmitPush(Operand(variable->name()));
5891 frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
5892 frame_->EmitPush(r0);
5893
5894 } else {
5895 // Default: Result of deleting non-global, not dynamically
5896 // introduced variables is false.
5897 frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
5898 }
5899
5900 } else {
5901 // Default: Result of deleting expressions is true.
5902 Load(node->expression()); // may have side-effects
5903 frame_->Drop();
5904 frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
5905 }
5906
5907 } else if (op == Token::TYPEOF) {
5908 // Special case for loading the typeof expression; see comment on
5909 // LoadTypeofExpression().
5910 LoadTypeofExpression(node->expression());
5911 frame_->CallRuntime(Runtime::kTypeof, 1);
5912 frame_->EmitPush(r0); // r0 has result
5913
5914 } else {
5915 bool can_overwrite = node->expression()->ResultOverwriteAllowed();
5916 UnaryOverwriteMode overwrite =
5917 can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
5918
5919 bool no_negative_zero = node->expression()->no_negative_zero();
5920 Load(node->expression());
5921 switch (op) {
5922 case Token::NOT:
5923 case Token::DELETE:
5924 case Token::TYPEOF:
5925 UNREACHABLE(); // handled above
5926 break;
5927
5928 case Token::SUB: {
5929 frame_->PopToR0();
5930 GenericUnaryOpStub stub(
5931 Token::SUB,
5932 overwrite,
5933 NO_UNARY_FLAGS,
5934 no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
5935 frame_->CallStub(&stub, 0);
5936 frame_->EmitPush(r0); // r0 has result
5937 break;
5938 }
5939
5940 case Token::BIT_NOT: {
5941 Register tos = frame_->PopToRegister();
5942 JumpTarget not_smi_label;
5943 JumpTarget continue_label;
5944 // Smi check.
5945 __ tst(tos, Operand(kSmiTagMask));
5946 not_smi_label.Branch(ne);
5947
5948 __ mvn(tos, Operand(tos));
5949 __ bic(tos, tos, Operand(kSmiTagMask)); // Bit-clear inverted smi-tag.
5950 frame_->EmitPush(tos);
5951 // The fast case is the first to jump to the continue label, so it gets
5952 // to decide the virtual frame layout.
5953 continue_label.Jump();
5954
5955 not_smi_label.Bind();
5956 frame_->SpillAll();
5957 __ Move(r0, tos);
5958 GenericUnaryOpStub stub(Token::BIT_NOT,
5959 overwrite,
5960 NO_UNARY_SMI_CODE_IN_STUB);
5961 frame_->CallStub(&stub, 0);
5962 frame_->EmitPush(r0);
5963
5964 continue_label.Bind();
5965 break;
5966 }
5967
5968 case Token::VOID:
5969 frame_->Drop();
5970 frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5971 break;
5972
5973 case Token::ADD: {
5974 Register tos = frame_->Peek();
5975 // Smi check.
5976 JumpTarget continue_label;
5977 __ tst(tos, Operand(kSmiTagMask));
5978 continue_label.Branch(eq);
5979
5980 frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5981 frame_->EmitPush(r0);
5982
5983 continue_label.Bind();
5984 break;
5985 }
5986 default:
5987 UNREACHABLE();
5988 }
5989 }
5990 ASSERT(!has_valid_frame() ||
5991 (has_cc() && frame_->height() == original_height) ||
5992 (!has_cc() && frame_->height() == original_height + 1));
5993 }
5994
5995
5996 class DeferredCountOperation: public DeferredCode {
5997 public:
5998 DeferredCountOperation(Register value,
5999 bool is_increment,
6000 bool is_postfix,
6001 int target_size)
6002 : value_(value),
6003 is_increment_(is_increment),
6004 is_postfix_(is_postfix),
6005 target_size_(target_size) {}
6006
6007 virtual void Generate() {
6008 VirtualFrame copied_frame(*frame_state()->frame());
6009
6010 Label slow;
6011 // Check for smi operand.
6012 __ tst(value_, Operand(kSmiTagMask));
6013 __ b(ne, &slow);
6014
6015 // Revert optimistic increment/decrement.
6016 if (is_increment_) {
6017 __ sub(value_, value_, Operand(Smi::FromInt(1)));
6018 } else {
6019 __ add(value_, value_, Operand(Smi::FromInt(1)));
6020 }
6021
6022 // Slow case: Convert to number. At this point the
6023 // value to be incremented is in the value register..
6024 __ bind(&slow);
6025
6026 // Convert the operand to a number.
6027 copied_frame.EmitPush(value_);
6028
6029 copied_frame.InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
6030
6031 if (is_postfix_) {
6032 // Postfix: store to result (on the stack).
6033 __ str(r0, MemOperand(sp, target_size_ * kPointerSize));
6034 }
6035
6036 copied_frame.EmitPush(r0);
6037 copied_frame.EmitPush(Operand(Smi::FromInt(1)));
6038
6039 if (is_increment_) {
6040 copied_frame.CallRuntime(Runtime::kNumberAdd, 2);
6041 } else {
6042 copied_frame.CallRuntime(Runtime::kNumberSub, 2);
6043 }
6044
6045 __ Move(value_, r0);
6046
6047 copied_frame.MergeTo(frame_state()->frame());
6048 }
6049
6050 private:
6051 Register value_;
6052 bool is_increment_;
6053 bool is_postfix_;
6054 int target_size_;
6055 };
6056
6057
6058 void CodeGenerator::VisitCountOperation(CountOperation* node) {
6059 #ifdef DEBUG
6060 int original_height = frame_->height();
6061 #endif
6062 Comment cmnt(masm_, "[ CountOperation");
6063 VirtualFrame::RegisterAllocationScope scope(this);
6064
6065 bool is_postfix = node->is_postfix();
6066 bool is_increment = node->op() == Token::INC;
6067
6068 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
6069 bool is_const = (var != NULL && var->mode() == Variable::CONST);
6070 bool is_slot = (var != NULL && var->mode() == Variable::VAR);
6071
6072 if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
6073 // The type info declares that this variable is always a Smi. That
6074 // means it is a Smi both before and after the increment/decrement.
6075 // Lets make use of that to make a very minimal count.
6076 Reference target(this, node->expression(), !is_const);
6077 ASSERT(!target.is_illegal());
6078 target.GetValue(); // Pushes the value.
6079 Register value = frame_->PopToRegister();
6080 if (is_postfix) frame_->EmitPush(value);
6081 if (is_increment) {
6082 __ add(value, value, Operand(Smi::FromInt(1)));
6083 } else {
6084 __ sub(value, value, Operand(Smi::FromInt(1)));
6085 }
6086 frame_->EmitPush(value);
6087 target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
6088 if (is_postfix) frame_->Pop();
6089 ASSERT_EQ(original_height + 1, frame_->height());
6090 return;
6091 }
6092
6093 // If it's a postfix expression and its result is not ignored and the
6094 // reference is non-trivial, then push a placeholder on the stack now
6095 // to hold the result of the expression.
6096 bool placeholder_pushed = false;
6097 if (!is_slot && is_postfix) {
6098 frame_->EmitPush(Operand(Smi::FromInt(0)));
6099 placeholder_pushed = true;
6100 }
6101
6102 // A constant reference is not saved to, so a constant reference is not a
6103 // compound assignment reference.
6104 { Reference target(this, node->expression(), !is_const);
6105 if (target.is_illegal()) {
6106 // Spoof the virtual frame to have the expected height (one higher
6107 // than on entry).
6108 if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
6109 ASSERT_EQ(original_height + 1, frame_->height());
6110 return;
6111 }
6112
6113 // This pushes 0, 1 or 2 words on the object to be used later when updating
6114 // the target. It also pushes the current value of the target.
6115 target.GetValue();
6116
6117 bool value_is_known_smi = frame_->KnownSmiAt(0);
6118 Register value = frame_->PopToRegister();
6119
6120 // Postfix: Store the old value as the result.
6121 if (placeholder_pushed) {
6122 frame_->SetElementAt(value, target.size());
6123 } else if (is_postfix) {
6124 frame_->EmitPush(value);
6125 __ mov(VirtualFrame::scratch0(), value);
6126 value = VirtualFrame::scratch0();
6127 }
6128
6129 // We can't use any type information here since the virtual frame from the
6130 // deferred code may have lost information and we can't merge a virtual
6131 // frame with less specific type knowledge to a virtual frame with more
6132 // specific knowledge that has already used that specific knowledge to
6133 // generate code.
6134 frame_->ForgetTypeInfo();
6135
6136 // The constructor here will capture the current virtual frame and use it to
6137 // merge to after the deferred code has run. No virtual frame changes are
6138 // allowed from here until the 'BindExit' below.
6139 DeferredCode* deferred =
6140 new DeferredCountOperation(value,
6141 is_increment,
6142 is_postfix,
6143 target.size());
6144 if (!value_is_known_smi) {
6145 // Check for smi operand.
6146 __ tst(value, Operand(kSmiTagMask));
6147
6148 deferred->Branch(ne);
6149 }
6150
6151 // Perform optimistic increment/decrement.
6152 if (is_increment) {
6153 __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
6154 } else {
6155 __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
6156 }
6157
6158 // If increment/decrement overflows, go to deferred code.
6159 deferred->Branch(vs);
6160
6161 deferred->BindExit();
6162
6163 // Store the new value in the target if not const.
6164 // At this point the answer is in the value register.
6165 frame_->EmitPush(value);
6166 // Set the target with the result, leaving the result on
6167 // top of the stack. Removes the target from the stack if
6168 // it has a non-zero size.
6169 if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
6170 }
6171
6172 // Postfix: Discard the new value and use the old.
6173 if (is_postfix) frame_->Pop();
6174 ASSERT_EQ(original_height + 1, frame_->height());
6175 }
6176
6177
6178 void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
6179 // According to ECMA-262 section 11.11, page 58, the binary logical
6180 // operators must yield the result of one of the two expressions
6181 // before any ToBoolean() conversions. This means that the value
6182 // produced by a && or || operator is not necessarily a boolean.
6183
6184 // NOTE: If the left hand side produces a materialized value (not in
6185 // the CC register), we force the right hand side to do the
6186 // same. This is necessary because we may have to branch to the exit
6187 // after evaluating the left hand side (due to the shortcut
6188 // semantics), but the compiler must (statically) know if the result
6189 // of compiling the binary operation is materialized or not.
6190 if (node->op() == Token::AND) {
6191 JumpTarget is_true;
6192 LoadCondition(node->left(), &is_true, false_target(), false);
6193 if (has_valid_frame() && !has_cc()) {
6194 // The left-hand side result is on top of the virtual frame.
6195 JumpTarget pop_and_continue;
6196 JumpTarget exit;
6197
6198 frame_->Dup();
6199 // Avoid popping the result if it converts to 'false' using the
6200 // standard ToBoolean() conversion as described in ECMA-262,
6201 // section 9.2, page 30.
6202 ToBoolean(&pop_and_continue, &exit);
6203 Branch(false, &exit);
6204
6205 // Pop the result of evaluating the first part.
6206 pop_and_continue.Bind();
6207 frame_->Pop();
6208
6209 // Evaluate right side expression.
6210 is_true.Bind();
6211 Load(node->right());
6212
6213 // Exit (always with a materialized value).
6214 exit.Bind();
6215 } else if (has_cc() || is_true.is_linked()) {
6216 // The left-hand side is either (a) partially compiled to
6217 // control flow with a final branch left to emit or (b) fully
6218 // compiled to control flow and possibly true.
6219 if (has_cc()) {
6220 Branch(false, false_target());
6221 }
6222 is_true.Bind();
6223 LoadCondition(node->right(), true_target(), false_target(), false);
6224 } else {
6225 // Nothing to do.
6226 ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
6227 }
6228
6229 } else {
6230 ASSERT(node->op() == Token::OR);
6231 JumpTarget is_false;
6232 LoadCondition(node->left(), true_target(), &is_false, false);
6233 if (has_valid_frame() && !has_cc()) {
6234 // The left-hand side result is on top of the virtual frame.
6235 JumpTarget pop_and_continue;
6236 JumpTarget exit;
6237
6238 frame_->Dup();
6239 // Avoid popping the result if it converts to 'true' using the
6240 // standard ToBoolean() conversion as described in ECMA-262,
6241 // section 9.2, page 30.
6242 ToBoolean(&exit, &pop_and_continue);
6243 Branch(true, &exit);
6244
6245 // Pop the result of evaluating the first part.
6246 pop_and_continue.Bind();
6247 frame_->Pop();
6248
6249 // Evaluate right side expression.
6250 is_false.Bind();
6251 Load(node->right());
6252
6253 // Exit (always with a materialized value).
6254 exit.Bind();
6255 } else if (has_cc() || is_false.is_linked()) {
6256 // The left-hand side is either (a) partially compiled to
6257 // control flow with a final branch left to emit or (b) fully
6258 // compiled to control flow and possibly false.
6259 if (has_cc()) {
6260 Branch(true, true_target());
6261 }
6262 is_false.Bind();
6263 LoadCondition(node->right(), true_target(), false_target(), false);
6264 } else {
6265 // Nothing to do.
6266 ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
6267 }
6268 }
6269 }
6270
6271
6272 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
6273 #ifdef DEBUG
6274 int original_height = frame_->height();
6275 #endif
6276 Comment cmnt(masm_, "[ BinaryOperation");
6277
6278 if (node->op() == Token::AND || node->op() == Token::OR) {
6279 GenerateLogicalBooleanOperation(node);
6280 } else {
6281 // Optimize for the case where (at least) one of the expressions
6282 // is a literal small integer.
6283 Literal* lliteral = node->left()->AsLiteral();
6284 Literal* rliteral = node->right()->AsLiteral();
6285 // NOTE: The code below assumes that the slow cases (calls to runtime)
6286 // never return a constant/immutable object.
6287 bool overwrite_left = node->left()->ResultOverwriteAllowed();
6288 bool overwrite_right = node->right()->ResultOverwriteAllowed();
6289
6290 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
6291 VirtualFrame::RegisterAllocationScope scope(this);
6292 Load(node->left());
6293 if (frame_->KnownSmiAt(0)) overwrite_left = false;
6294 SmiOperation(node->op(),
6295 rliteral->handle(),
6296 false,
6297 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
6298 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
6299 VirtualFrame::RegisterAllocationScope scope(this);
6300 Load(node->right());
6301 if (frame_->KnownSmiAt(0)) overwrite_right = false;
6302 SmiOperation(node->op(),
6303 lliteral->handle(),
6304 true,
6305 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
6306 } else {
6307 GenerateInlineSmi inline_smi =
6308 loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
6309 if (lliteral != NULL) {
6310 ASSERT(!lliteral->handle()->IsSmi());
6311 inline_smi = DONT_GENERATE_INLINE_SMI;
6312 }
6313 if (rliteral != NULL) {
6314 ASSERT(!rliteral->handle()->IsSmi());
6315 inline_smi = DONT_GENERATE_INLINE_SMI;
6316 }
6317 VirtualFrame::RegisterAllocationScope scope(this);
6318 OverwriteMode overwrite_mode = NO_OVERWRITE;
6319 if (overwrite_left) {
6320 overwrite_mode = OVERWRITE_LEFT;
6321 } else if (overwrite_right) {
6322 overwrite_mode = OVERWRITE_RIGHT;
6323 }
6324 Load(node->left());
6325 Load(node->right());
6326 GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
6327 }
6328 }
6329 ASSERT(!has_valid_frame() ||
6330 (has_cc() && frame_->height() == original_height) ||
6331 (!has_cc() && frame_->height() == original_height + 1));
6332 }
6333
6334
6335 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
6336 #ifdef DEBUG
6337 int original_height = frame_->height();
6338 #endif
6339 frame_->EmitPush(MemOperand(frame_->Function()));
6340 ASSERT_EQ(original_height + 1, frame_->height());
6341 }
6342
6343
6344 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
6345 #ifdef DEBUG
6346 int original_height = frame_->height();
6347 #endif
6348 Comment cmnt(masm_, "[ CompareOperation");
6349
6350 VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
6351
6352 // Get the expressions from the node.
6353 Expression* left = node->left();
6354 Expression* right = node->right();
6355 Token::Value op = node->op();
6356
6357 // To make typeof testing for natives implemented in JavaScript really
6358 // efficient, we generate special code for expressions of the form:
6359 // 'typeof <expression> == <string>'.
6360 UnaryOperation* operation = left->AsUnaryOperation();
6361 if ((op == Token::EQ || op == Token::EQ_STRICT) &&
6362 (operation != NULL && operation->op() == Token::TYPEOF) &&
6363 (right->AsLiteral() != NULL &&
6364 right->AsLiteral()->handle()->IsString())) {
6365 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
6366
6367 // Load the operand, move it to a register.
6368 LoadTypeofExpression(operation->expression());
6369 Register tos = frame_->PopToRegister();
6370
6371 Register scratch = VirtualFrame::scratch0();
6372
6373 if (check->Equals(HEAP->number_symbol())) {
6374 __ tst(tos, Operand(kSmiTagMask));
6375 true_target()->Branch(eq);
6376 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6377 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
6378 __ cmp(tos, ip);
6379 cc_reg_ = eq;
6380
6381 } else if (check->Equals(HEAP->string_symbol())) {
6382 __ tst(tos, Operand(kSmiTagMask));
6383 false_target()->Branch(eq);
6384
6385 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6386
6387 // It can be an undetectable string object.
6388 __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
6389 __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
6390 __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
6391 false_target()->Branch(eq);
6392
6393 __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
6394 __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
6395 cc_reg_ = lt;
6396
6397 } else if (check->Equals(HEAP->boolean_symbol())) {
6398 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
6399 __ cmp(tos, ip);
6400 true_target()->Branch(eq);
6401 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
6402 __ cmp(tos, ip);
6403 cc_reg_ = eq;
6404
6405 } else if (check->Equals(HEAP->undefined_symbol())) {
6406 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6407 __ cmp(tos, ip);
6408 true_target()->Branch(eq);
6409
6410 __ tst(tos, Operand(kSmiTagMask));
6411 false_target()->Branch(eq);
6412
6413 // It can be an undetectable object.
6414 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6415 __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
6416 __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
6417 __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
6418
6419 cc_reg_ = eq;
6420
6421 } else if (check->Equals(HEAP->function_symbol())) {
6422 __ tst(tos, Operand(kSmiTagMask));
6423 false_target()->Branch(eq);
6424 Register map_reg = scratch;
6425 __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
6426 true_target()->Branch(eq);
6427 // Regular expressions are callable so typeof == 'function'.
6428 __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
6429 cc_reg_ = eq;
6430
6431 } else if (check->Equals(HEAP->object_symbol())) {
6432 __ tst(tos, Operand(kSmiTagMask));
6433 false_target()->Branch(eq);
6434
6435 __ LoadRoot(ip, Heap::kNullValueRootIndex);
6436 __ cmp(tos, ip);
6437 true_target()->Branch(eq);
6438
6439 Register map_reg = scratch;
6440 __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
6441 false_target()->Branch(eq);
6442
6443 // It can be an undetectable object.
6444 __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
6445 __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
6446 __ cmp(tos, Operand(1 << Map::kIsUndetectable));
6447 false_target()->Branch(eq);
6448
6449 __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
6450 __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
6451 false_target()->Branch(lt);
6452 __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
6453 cc_reg_ = le;
6454
6455 } else {
6456 // Uncommon case: typeof testing against a string literal that is
6457 // never returned from the typeof operator.
6458 false_target()->Jump();
6459 }
6460 ASSERT(!has_valid_frame() ||
6461 (has_cc() && frame_->height() == original_height));
6462 return;
6463 }
6464
6465 switch (op) {
6466 case Token::EQ:
6467 Comparison(eq, left, right, false);
6468 break;
6469
6470 case Token::LT:
6471 Comparison(lt, left, right);
6472 break;
6473
6474 case Token::GT:
6475 Comparison(gt, left, right);
6476 break;
6477
6478 case Token::LTE:
6479 Comparison(le, left, right);
6480 break;
6481
6482 case Token::GTE:
6483 Comparison(ge, left, right);
6484 break;
6485
6486 case Token::EQ_STRICT:
6487 Comparison(eq, left, right, true);
6488 break;
6489
6490 case Token::IN: {
6491 Load(left);
6492 Load(right);
6493 frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
6494 frame_->EmitPush(r0);
6495 break;
6496 }
6497
6498 case Token::INSTANCEOF: {
6499 Load(left);
6500 Load(right);
6501 InstanceofStub stub(InstanceofStub::kNoFlags);
6502 frame_->CallStub(&stub, 2);
6503 // At this point if instanceof succeeded then r0 == 0.
6504 __ tst(r0, Operand(r0));
6505 cc_reg_ = eq;
6506 break;
6507 }
6508
6509 default:
6510 UNREACHABLE();
6511 }
6512 ASSERT((has_cc() && frame_->height() == original_height) ||
6513 (!has_cc() && frame_->height() == original_height + 1));
6514 }
6515
6516
6517 void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
6518 #ifdef DEBUG
6519 int original_height = frame_->height();
6520 #endif
6521 Comment cmnt(masm_, "[ CompareToNull");
6522
6523 Load(node->expression());
6524 Register tos = frame_->PopToRegister();
6525 __ LoadRoot(ip, Heap::kNullValueRootIndex);
6526 __ cmp(tos, ip);
6527
6528 // The 'null' value is only equal to 'undefined' if using non-strict
6529 // comparisons.
6530 if (!node->is_strict()) {
6531 true_target()->Branch(eq);
6532 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6533 __ cmp(tos, Operand(ip));
6534 true_target()->Branch(eq);
6535
6536 __ tst(tos, Operand(kSmiTagMask));
6537 false_target()->Branch(eq);
6538
6539 // It can be an undetectable object.
6540 __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6541 __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
6542 __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
6543 __ cmp(tos, Operand(1 << Map::kIsUndetectable));
6544 }
6545
6546 cc_reg_ = eq;
6547 ASSERT(has_cc() && frame_->height() == original_height);
6548 }
6549
6550
6551 class DeferredReferenceGetNamedValue: public DeferredCode {
6552 public:
6553 explicit DeferredReferenceGetNamedValue(Register receiver,
6554 Handle<String> name,
6555 bool is_contextual)
6556 : receiver_(receiver),
6557 name_(name),
6558 is_contextual_(is_contextual),
6559 is_dont_delete_(false) {
6560 set_comment(is_contextual
6561 ? "[ DeferredReferenceGetNamedValue (contextual)"
6562 : "[ DeferredReferenceGetNamedValue");
6563 }
6564
6565 virtual void Generate();
6566
6567 void set_is_dont_delete(bool value) {
6568 ASSERT(is_contextual_);
6569 is_dont_delete_ = value;
6570 }
6571
6572 private:
6573 Register receiver_;
6574 Handle<String> name_;
6575 bool is_contextual_;
6576 bool is_dont_delete_;
6577 };
6578
6579
6580 // Convention for this is that on entry the receiver is in a register that
6581 // is not used by the stack. On exit the answer is found in that same
6582 // register and the stack has the same height.
6583 void DeferredReferenceGetNamedValue::Generate() {
6584 #ifdef DEBUG
6585 int expected_height = frame_state()->frame()->height();
6586 #endif
6587 VirtualFrame copied_frame(*frame_state()->frame());
6588 copied_frame.SpillAll();
6589
6590 Register scratch1 = VirtualFrame::scratch0();
6591 Register scratch2 = VirtualFrame::scratch1();
6592 ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
6593 __ DecrementCounter(masm_->isolate()->counters()->named_load_inline(),
6594 1, scratch1, scratch2);
6595 __ IncrementCounter(masm_->isolate()->counters()->named_load_inline_miss(),
6596 1, scratch1, scratch2);
6597
6598 // Ensure receiver in r0 and name in r2 to match load ic calling convention.
6599 __ Move(r0, receiver_);
6600 __ mov(r2, Operand(name_));
6601
6602 // The rest of the instructions in the deferred code must be together.
6603 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6604 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
6605 Builtins::kLoadIC_Initialize));
6606 RelocInfo::Mode mode = is_contextual_
6607 ? RelocInfo::CODE_TARGET_CONTEXT
6608 : RelocInfo::CODE_TARGET;
6609 __ Call(ic, mode);
6610 // We must mark the code just after the call with the correct marker.
6611 MacroAssembler::NopMarkerTypes code_marker;
6612 if (is_contextual_) {
6613 code_marker = is_dont_delete_
6614 ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
6615 : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
6616 } else {
6617 code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
6618 }
6619 __ MarkCode(code_marker);
6620
6621 // At this point the answer is in r0. We move it to the expected register
6622 // if necessary.
6623 __ Move(receiver_, r0);
6624
6625 // Now go back to the frame that we entered with. This will not overwrite
6626 // the receiver register since that register was not in use when we came
6627 // in. The instructions emitted by this merge are skipped over by the
6628 // inline load patching mechanism when looking for the branch instruction
6629 // that tells it where the code to patch is.
6630 copied_frame.MergeTo(frame_state()->frame());
6631
6632 // Block the constant pool for one more instruction after leaving this
6633 // constant pool block scope to include the branch instruction ending the
6634 // deferred code.
6635 __ BlockConstPoolFor(1);
6636 }
6637 ASSERT_EQ(expected_height, frame_state()->frame()->height());
6638 }
6639
6640
6641 class DeferredReferenceGetKeyedValue: public DeferredCode {
6642 public:
6643 DeferredReferenceGetKeyedValue(Register key, Register receiver)
6644 : key_(key), receiver_(receiver) {
6645 set_comment("[ DeferredReferenceGetKeyedValue");
6646 }
6647
6648 virtual void Generate();
6649
6650 private:
6651 Register key_;
6652 Register receiver_;
6653 };
6654
6655
6656 // Takes key and register in r0 and r1 or vice versa. Returns result
6657 // in r0.
6658 void DeferredReferenceGetKeyedValue::Generate() {
6659 ASSERT((key_.is(r0) && receiver_.is(r1)) ||
6660 (key_.is(r1) && receiver_.is(r0)));
6661
6662 VirtualFrame copied_frame(*frame_state()->frame());
6663 copied_frame.SpillAll();
6664
6665 Register scratch1 = VirtualFrame::scratch0();
6666 Register scratch2 = VirtualFrame::scratch1();
6667 __ DecrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
6668 1, scratch1, scratch2);
6669 __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline_miss(),
6670 1, scratch1, scratch2);
6671
6672 // Ensure key in r0 and receiver in r1 to match keyed load ic calling
6673 // convention.
6674 if (key_.is(r1)) {
6675 __ Swap(r0, r1, ip);
6676 }
6677
6678 // The rest of the instructions in the deferred code must be together.
6679 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6680 // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
6681 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
6682 Builtins::kKeyedLoadIC_Initialize));
6683 __ Call(ic, RelocInfo::CODE_TARGET);
6684 // The call must be followed by a nop instruction to indicate that the
6685 // keyed load has been inlined.
6686 __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
6687
6688 // Now go back to the frame that we entered with. This will not overwrite
6689 // the receiver or key registers since they were not in use when we came
6690 // in. The instructions emitted by this merge are skipped over by the
6691 // inline load patching mechanism when looking for the branch instruction
6692 // that tells it where the code to patch is.
6693 copied_frame.MergeTo(frame_state()->frame());
6694
6695 // Block the constant pool for one more instruction after leaving this
6696 // constant pool block scope to include the branch instruction ending the
6697 // deferred code.
6698 __ BlockConstPoolFor(1);
6699 }
6700 }
6701
6702
6703 class DeferredReferenceSetKeyedValue: public DeferredCode {
6704 public:
6705 DeferredReferenceSetKeyedValue(Register value,
6706 Register key,
6707 Register receiver,
6708 StrictModeFlag strict_mode)
6709 : value_(value),
6710 key_(key),
6711 receiver_(receiver),
6712 strict_mode_(strict_mode) {
6713 set_comment("[ DeferredReferenceSetKeyedValue");
6714 }
6715
6716 virtual void Generate();
6717
6718 private:
6719 Register value_;
6720 Register key_;
6721 Register receiver_;
6722 StrictModeFlag strict_mode_;
6723 };
6724
6725
6726 void DeferredReferenceSetKeyedValue::Generate() {
6727 Register scratch1 = VirtualFrame::scratch0();
6728 Register scratch2 = VirtualFrame::scratch1();
6729 __ DecrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
6730 1, scratch1, scratch2);
6731 __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline_miss(),
6732 1, scratch1, scratch2);
6733
6734 // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
6735 // calling convention.
6736 if (value_.is(r1)) {
6737 __ Swap(r0, r1, ip);
6738 }
6739 ASSERT(receiver_.is(r2));
6740
6741 // The rest of the instructions in the deferred code must be together.
6742 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6743 // Call keyed store IC. It has the arguments value, key and receiver in r0,
6744 // r1 and r2.
6745 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
6746 (strict_mode_ == kStrictMode)
6747 ? Builtins::kKeyedStoreIC_Initialize_Strict
6748 : Builtins::kKeyedStoreIC_Initialize));
6749 __ Call(ic, RelocInfo::CODE_TARGET);
6750 // The call must be followed by a nop instruction to indicate that the
6751 // keyed store has been inlined.
6752 __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
6753
6754 // Block the constant pool for one more instruction after leaving this
6755 // constant pool block scope to include the branch instruction ending the
6756 // deferred code.
6757 __ BlockConstPoolFor(1);
6758 }
6759 }
6760
6761
6762 class DeferredReferenceSetNamedValue: public DeferredCode {
6763 public:
6764 DeferredReferenceSetNamedValue(Register value,
6765 Register receiver,
6766 Handle<String> name,
6767 StrictModeFlag strict_mode)
6768 : value_(value),
6769 receiver_(receiver),
6770 name_(name),
6771 strict_mode_(strict_mode) {
6772 set_comment("[ DeferredReferenceSetNamedValue");
6773 }
6774
6775 virtual void Generate();
6776
6777 private:
6778 Register value_;
6779 Register receiver_;
6780 Handle<String> name_;
6781 StrictModeFlag strict_mode_;
6782 };
6783
6784
6785 // Takes value in r0, receiver in r1 and returns the result (the
6786 // value) in r0.
6787 void DeferredReferenceSetNamedValue::Generate() {
6788 // Record the entry frame and spill.
6789 VirtualFrame copied_frame(*frame_state()->frame());
6790 copied_frame.SpillAll();
6791
6792 // Ensure value in r0, receiver in r1 to match store ic calling
6793 // convention.
6794 ASSERT(value_.is(r0) && receiver_.is(r1));
6795 __ mov(r2, Operand(name_));
6796
6797 // The rest of the instructions in the deferred code must be together.
6798 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6799 // Call keyed store IC. It has the arguments value, key and receiver in r0,
6800 // r1 and r2.
6801 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
6802 (strict_mode_ == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
6803 : Builtins::kStoreIC_Initialize));
6804 __ Call(ic, RelocInfo::CODE_TARGET);
6805 // The call must be followed by a nop instruction to indicate that the
6806 // named store has been inlined.
6807 __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
6808
6809 // Go back to the frame we entered with. The instructions
6810 // generated by this merge are skipped over by the inline store
6811 // patching mechanism when looking for the branch instruction that
6812 // tells it where the code to patch is.
6813 copied_frame.MergeTo(frame_state()->frame());
6814
6815 // Block the constant pool for one more instruction after leaving this
6816 // constant pool block scope to include the branch instruction ending the
6817 // deferred code.
6818 __ BlockConstPoolFor(1);
6819 }
6820 }
6821
6822
6823 // Consumes the top of stack (the receiver) and pushes the result instead.
6824 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
6825 bool contextual_load_in_builtin =
6826 is_contextual &&
6827 (ISOLATE->bootstrapper()->IsActive() ||
6828 (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
6829
6830 if (scope()->is_global_scope() ||
6831 loop_nesting() == 0 ||
6832 contextual_load_in_builtin) {
6833 Comment cmnt(masm(), "[ Load from named Property");
6834 // Setup the name register and call load IC.
6835 frame_->CallLoadIC(name,
6836 is_contextual
6837 ? RelocInfo::CODE_TARGET_CONTEXT
6838 : RelocInfo::CODE_TARGET);
6839 frame_->EmitPush(r0); // Push answer.
6840 } else {
6841 // Inline the in-object property case.
6842 Comment cmnt(masm(), is_contextual
6843 ? "[ Inlined contextual property load"
6844 : "[ Inlined named property load");
6845
6846 // Counter will be decremented in the deferred code. Placed here to avoid
6847 // having it in the instruction stream below where patching will occur.
6848 if (is_contextual) {
6849 __ IncrementCounter(
6850 masm_->isolate()->counters()->named_load_global_inline(),
6851 1, frame_->scratch0(), frame_->scratch1());
6852 } else {
6853 __ IncrementCounter(masm_->isolate()->counters()->named_load_inline(),
6854 1, frame_->scratch0(), frame_->scratch1());
6855 }
6856
6857 // The following instructions are the inlined load of an in-object property.
6858 // Parts of this code is patched, so the exact instructions generated needs
6859 // to be fixed. Therefore the instruction pool is blocked when generating
6860 // this code
6861
6862 // Load the receiver from the stack.
6863 Register receiver = frame_->PopToRegister();
6864
6865 DeferredReferenceGetNamedValue* deferred =
6866 new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
6867
6868 bool is_dont_delete = false;
6869 if (is_contextual) {
6870 if (!info_->closure().is_null()) {
6871 // When doing lazy compilation we can check if the global cell
6872 // already exists and use its "don't delete" status as a hint.
6873 AssertNoAllocation no_gc;
6874 v8::internal::GlobalObject* global_object =
6875 info_->closure()->context()->global();
6876 LookupResult lookup;
6877 global_object->LocalLookupRealNamedProperty(*name, &lookup);
6878 if (lookup.IsProperty() && lookup.type() == NORMAL) {
6879 ASSERT(lookup.holder() == global_object);
6880 ASSERT(global_object->property_dictionary()->ValueAt(
6881 lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
6882 is_dont_delete = lookup.IsDontDelete();
6883 }
6884 }
6885 if (is_dont_delete) {
6886 __ IncrementCounter(
6887 masm_->isolate()->counters()->dont_delete_hint_hit(),
6888 1, frame_->scratch0(), frame_->scratch1());
6889 }
6890 }
6891
6892 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6893 if (!is_contextual) {
6894 // Check that the receiver is a heap object.
6895 __ tst(receiver, Operand(kSmiTagMask));
6896 deferred->Branch(eq);
6897 }
6898
6899 // Check for the_hole_value if necessary.
6900 // Below we rely on the number of instructions generated, and we can't
6901 // cope with the Check macro which does not generate a fixed number of
6902 // instructions.
6903 Label skip, check_the_hole, cont;
6904 if (FLAG_debug_code && is_contextual && is_dont_delete) {
6905 __ b(&skip);
6906 __ bind(&check_the_hole);
6907 __ Check(ne, "DontDelete cells can't contain the hole");
6908 __ b(&cont);
6909 __ bind(&skip);
6910 }
6911
6912 #ifdef DEBUG
6913 int InlinedNamedLoadInstructions = 5;
6914 Label check_inlined_codesize;
6915 masm_->bind(&check_inlined_codesize);
6916 #endif
6917
6918 Register scratch = VirtualFrame::scratch0();
6919 Register scratch2 = VirtualFrame::scratch1();
6920
6921 // Check the map. The null map used below is patched by the inline cache
6922 // code. Therefore we can't use a LoadRoot call.
6923 __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
6924 __ mov(scratch2, Operand(FACTORY->null_value()));
6925 __ cmp(scratch, scratch2);
6926 deferred->Branch(ne);
6927
6928 if (is_contextual) {
6929 #ifdef DEBUG
6930 InlinedNamedLoadInstructions += 1;
6931 #endif
6932 // Load the (initially invalid) cell and get its value.
6933 masm()->mov(receiver, Operand(FACTORY->null_value()));
6934 __ ldr(receiver,
6935 FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
6936
6937 deferred->set_is_dont_delete(is_dont_delete);
6938
6939 if (!is_dont_delete) {
6940 #ifdef DEBUG
6941 InlinedNamedLoadInstructions += 3;
6942 #endif
6943 __ cmp(receiver, Operand(FACTORY->the_hole_value()));
6944 deferred->Branch(eq);
6945 } else if (FLAG_debug_code) {
6946 #ifdef DEBUG
6947 InlinedNamedLoadInstructions += 3;
6948 #endif
6949 __ cmp(receiver, Operand(FACTORY->the_hole_value()));
6950 __ b(&check_the_hole, eq);
6951 __ bind(&cont);
6952 }
6953 } else {
6954 // Initially use an invalid index. The index will be patched by the
6955 // inline cache code.
6956 __ ldr(receiver, MemOperand(receiver, 0));
6957 }
6958
6959 // Make sure that the expected number of instructions are generated.
6960 // If the code before is updated, the offsets in ic-arm.cc
6961 // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
6962 // to be updated.
6963 ASSERT_EQ(InlinedNamedLoadInstructions,
6964 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6965 }
6966
6967 deferred->BindExit();
6968 // At this point the receiver register has the result, either from the
6969 // deferred code or from the inlined code.
6970 frame_->EmitPush(receiver);
6971 }
6972 }
6973
6974
6975 void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
6976 #ifdef DEBUG
6977 int expected_height = frame()->height() - (is_contextual ? 1 : 2);
6978 #endif
6979
6980 Result result;
6981 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
6982 frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
6983 } else {
6984 // Inline the in-object property case.
6985 JumpTarget slow, done;
6986
6987 // Get the value and receiver from the stack.
6988 frame()->PopToR0();
6989 Register value = r0;
6990 frame()->PopToR1();
6991 Register receiver = r1;
6992
6993 DeferredReferenceSetNamedValue* deferred =
6994 new DeferredReferenceSetNamedValue(
6995 value, receiver, name, strict_mode_flag());
6996
6997 // Check that the receiver is a heap object.
6998 __ tst(receiver, Operand(kSmiTagMask));
6999 deferred->Branch(eq);
7000
7001 // The following instructions are the part of the inlined
7002 // in-object property store code which can be patched. Therefore
7003 // the exact number of instructions generated must be fixed, so
7004 // the constant pool is blocked while generating this code.
7005 { Assembler::BlockConstPoolScope block_const_pool(masm_);
7006 Register scratch0 = VirtualFrame::scratch0();
7007 Register scratch1 = VirtualFrame::scratch1();
7008
7009 // Check the map. Initially use an invalid map to force a
7010 // failure. The map check will be patched in the runtime system.
7011 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
7012
7013 #ifdef DEBUG
7014 Label check_inlined_codesize;
7015 masm_->bind(&check_inlined_codesize);
7016 #endif
7017 __ mov(scratch0, Operand(FACTORY->null_value()));
7018 __ cmp(scratch0, scratch1);
7019 deferred->Branch(ne);
7020
7021 int offset = 0;
7022 __ str(value, MemOperand(receiver, offset));
7023
7024 // Update the write barrier and record its size. We do not use
7025 // the RecordWrite macro here because we want the offset
7026 // addition instruction first to make it easy to patch.
7027 Label record_write_start, record_write_done;
7028 __ bind(&record_write_start);
7029 // Add offset into the object.
7030 __ add(scratch0, receiver, Operand(offset));
7031 // Test that the object is not in the new space. We cannot set
7032 // region marks for new space pages.
7033 __ InNewSpace(receiver, scratch1, eq, &record_write_done);
7034 // Record the actual write.
7035 __ RecordWriteHelper(receiver, scratch0, scratch1);
7036 __ bind(&record_write_done);
7037 // Clobber all input registers when running with the debug-code flag
7038 // turned on to provoke errors.
7039 if (FLAG_debug_code) {
7040 __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
7041 __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
7042 __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
7043 }
7044 // Check that this is the first inlined write barrier or that
7045 // this inlined write barrier has the same size as all the other
7046 // inlined write barriers.
7047 ASSERT((Isolate::Current()->inlined_write_barrier_size() == -1) ||
7048 (Isolate::Current()->inlined_write_barrier_size() ==
7049 masm()->InstructionsGeneratedSince(&record_write_start)));
7050 Isolate::Current()->set_inlined_write_barrier_size(
7051 masm()->InstructionsGeneratedSince(&record_write_start));
7052
7053 // Make sure that the expected number of instructions are generated.
7054 ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
7055 masm()->InstructionsGeneratedSince(&check_inlined_codesize));
7056 }
7057 deferred->BindExit();
7058 }
7059 ASSERT_EQ(expected_height, frame()->height());
7060 }
7061
7062
7063 void CodeGenerator::EmitKeyedLoad() {
7064 if (loop_nesting() == 0) {
7065 Comment cmnt(masm_, "[ Load from keyed property");
7066 frame_->CallKeyedLoadIC();
7067 } else {
7068 // Inline the keyed load.
7069 Comment cmnt(masm_, "[ Inlined load from keyed property");
7070
7071 // Counter will be decremented in the deferred code. Placed here to avoid
7072 // having it in the instruction stream below where patching will occur.
7073 __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
7074 1, frame_->scratch0(), frame_->scratch1());
7075
7076 // Load the key and receiver from the stack.
7077 bool key_is_known_smi = frame_->KnownSmiAt(0);
7078 Register key = frame_->PopToRegister();
7079 Register receiver = frame_->PopToRegister(key);
7080
7081 // The deferred code expects key and receiver in registers.
7082 DeferredReferenceGetKeyedValue* deferred =
7083 new DeferredReferenceGetKeyedValue(key, receiver);
7084
7085 // Check that the receiver is a heap object.
7086 __ tst(receiver, Operand(kSmiTagMask));
7087 deferred->Branch(eq);
7088
7089 // The following instructions are the part of the inlined load keyed
7090 // property code which can be patched. Therefore the exact number of
7091 // instructions generated need to be fixed, so the constant pool is blocked
7092 // while generating this code.
7093 { Assembler::BlockConstPoolScope block_const_pool(masm_);
7094 Register scratch1 = VirtualFrame::scratch0();
7095 Register scratch2 = VirtualFrame::scratch1();
7096 // Check the map. The null map used below is patched by the inline cache
7097 // code.
7098 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
7099
7100 // Check that the key is a smi.
7101 if (!key_is_known_smi) {
7102 __ tst(key, Operand(kSmiTagMask));
7103 deferred->Branch(ne);
7104 }
7105
7106 #ifdef DEBUG
7107 Label check_inlined_codesize;
7108 masm_->bind(&check_inlined_codesize);
7109 #endif
7110 __ mov(scratch2, Operand(FACTORY->null_value()));
7111 __ cmp(scratch1, scratch2);
7112 deferred->Branch(ne);
7113
7114 // Get the elements array from the receiver.
7115 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
7116 __ AssertFastElements(scratch1);
7117
7118 // Check that key is within bounds. Use unsigned comparison to handle
7119 // negative keys.
7120 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
7121 __ cmp(scratch2, key);
7122 deferred->Branch(ls); // Unsigned less equal.
7123
7124 // Load and check that the result is not the hole (key is a smi).
7125 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
7126 __ add(scratch1,
7127 scratch1,
7128 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7129 __ ldr(scratch1,
7130 MemOperand(scratch1, key, LSL,
7131 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
7132 __ cmp(scratch1, scratch2);
7133 deferred->Branch(eq);
7134
7135 __ mov(r0, scratch1);
7136 // Make sure that the expected number of instructions are generated.
7137 ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
7138 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
7139 }
7140
7141 deferred->BindExit();
7142 }
7143 }
7144
7145
7146 void CodeGenerator::EmitKeyedStore(StaticType* key_type,
7147 WriteBarrierCharacter wb_info) {
7148 // Generate inlined version of the keyed store if the code is in a loop
7149 // and the key is likely to be a smi.
7150 if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
7151 // Inline the keyed store.
7152 Comment cmnt(masm_, "[ Inlined store to keyed property");
7153
7154 Register scratch1 = VirtualFrame::scratch0();
7155 Register scratch2 = VirtualFrame::scratch1();
7156 Register scratch3 = r3;
7157
7158 // Counter will be decremented in the deferred code. Placed here to avoid
7159 // having it in the instruction stream below where patching will occur.
7160 __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
7161 1, scratch1, scratch2);
7162
7163
7164 // Load the value, key and receiver from the stack.
7165 bool value_is_harmless = frame_->KnownSmiAt(0);
7166 if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
7167 bool key_is_smi = frame_->KnownSmiAt(1);
7168 Register value = frame_->PopToRegister();
7169 Register key = frame_->PopToRegister(value);
7170 VirtualFrame::SpilledScope spilled(frame_);
7171 Register receiver = r2;
7172 frame_->EmitPop(receiver);
7173
7174 #ifdef DEBUG
7175 bool we_remembered_the_write_barrier = value_is_harmless;
7176 #endif
7177
7178 // The deferred code expects value, key and receiver in registers.
7179 DeferredReferenceSetKeyedValue* deferred =
7180 new DeferredReferenceSetKeyedValue(
7181 value, key, receiver, strict_mode_flag());
7182
7183 // Check that the value is a smi. As this inlined code does not set the
7184 // write barrier it is only possible to store smi values.
7185 if (!value_is_harmless) {
7186 // If the value is not likely to be a Smi then let's test the fixed array
7187 // for new space instead. See below.
7188 if (wb_info == LIKELY_SMI) {
7189 __ tst(value, Operand(kSmiTagMask));
7190 deferred->Branch(ne);
7191 #ifdef DEBUG
7192 we_remembered_the_write_barrier = true;
7193 #endif
7194 }
7195 }
7196
7197 if (!key_is_smi) {
7198 // Check that the key is a smi.
7199 __ tst(key, Operand(kSmiTagMask));
7200 deferred->Branch(ne);
7201 }
7202
7203 // Check that the receiver is a heap object.
7204 __ tst(receiver, Operand(kSmiTagMask));
7205 deferred->Branch(eq);
7206
7207 // Check that the receiver is a JSArray.
7208 __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
7209 deferred->Branch(ne);
7210
7211 // Get the elements array from the receiver.
7212 __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
7213 if (!value_is_harmless && wb_info != LIKELY_SMI) {
7214 Label ok;
7215 __ and_(scratch2,
7216 scratch1,
7217 Operand(ExternalReference::new_space_mask(isolate())));
7218 __ cmp(scratch2, Operand(ExternalReference::new_space_start(isolate())));
7219 __ tst(value, Operand(kSmiTagMask), ne);
7220 deferred->Branch(ne);
7221 #ifdef DEBUG
7222 we_remembered_the_write_barrier = true;
7223 #endif
7224 }
7225 // Check that the elements array is not a dictionary.
7226 __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
7227
7228 // The following instructions are the part of the inlined store keyed
7229 // property code which can be patched. Therefore the exact number of
7230 // instructions generated need to be fixed, so the constant pool is blocked
7231 // while generating this code.
7232 { Assembler::BlockConstPoolScope block_const_pool(masm_);
7233 #ifdef DEBUG
7234 Label check_inlined_codesize;
7235 masm_->bind(&check_inlined_codesize);
7236 #endif
7237
7238 // Read the fixed array map from the constant pool (not from the root
7239 // array) so that the value can be patched. When debugging, we patch this
7240 // comparison to always fail so that we will hit the IC call in the
7241 // deferred code which will allow the debugger to break for fast case
7242 // stores.
7243 __ mov(scratch3, Operand(FACTORY->fixed_array_map()));
7244 __ cmp(scratch2, scratch3);
7245 deferred->Branch(ne);
7246
7247 // Check that the key is within bounds. Both the key and the length of
7248 // the JSArray are smis (because the fixed array check above ensures the
7249 // elements are in fast case). Use unsigned comparison to handle negative
7250 // keys.
7251 __ ldr(scratch3, FieldMemOperand(receiver, JSArray::kLengthOffset));
7252 __ cmp(scratch3, key);
7253 deferred->Branch(ls); // Unsigned less equal.
7254
7255 // Store the value.
7256 __ add(scratch1, scratch1,
7257 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7258 __ str(value,
7259 MemOperand(scratch1, key, LSL,
7260 kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
7261
7262 // Make sure that the expected number of instructions are generated.
7263 ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
7264 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
7265 }
7266
7267 ASSERT(we_remembered_the_write_barrier);
7268
7269 deferred->BindExit();
7270 } else {
7271 frame()->CallKeyedStoreIC(strict_mode_flag());
7272 }
7273 }
7274
7275
7276 #ifdef DEBUG
7277 bool CodeGenerator::HasValidEntryRegisters() { return true; }
7278 #endif
7279
7280
7281 #undef __
7282 #define __ ACCESS_MASM(masm)
7283
7284 Handle<String> Reference::GetName() {
7285 ASSERT(type_ == NAMED);
7286 Property* property = expression_->AsProperty();
7287 if (property == NULL) {
7288 // Global variable reference treated as a named property reference.
7289 VariableProxy* proxy = expression_->AsVariableProxy();
7290 ASSERT(proxy->AsVariable() != NULL);
7291 ASSERT(proxy->AsVariable()->is_global());
7292 return proxy->name();
7293 } else {
7294 Literal* raw_name = property->key()->AsLiteral();
7295 ASSERT(raw_name != NULL);
7296 return Handle<String>(String::cast(*raw_name->handle()));
7297 }
7298 }
7299
7300
7301 void Reference::DupIfPersist() {
7302 if (persist_after_get_) {
7303 switch (type_) {
7304 case KEYED:
7305 cgen_->frame()->Dup2();
7306 break;
7307 case NAMED:
7308 cgen_->frame()->Dup();
7309 // Fall through.
7310 case UNLOADED:
7311 case ILLEGAL:
7312 case SLOT:
7313 // Do nothing.
7314 ;
7315 }
7316 } else {
7317 set_unloaded();
7318 }
7319 }
7320
7321
7322 void Reference::GetValue() {
7323 ASSERT(cgen_->HasValidEntryRegisters());
7324 ASSERT(!is_illegal());
7325 ASSERT(!cgen_->has_cc());
7326 MacroAssembler* masm = cgen_->masm();
7327 Property* property = expression_->AsProperty();
7328 if (property != NULL) {
7329 cgen_->CodeForSourcePosition(property->position());
7330 }
7331
7332 switch (type_) {
7333 case SLOT: {
7334 Comment cmnt(masm, "[ Load from Slot");
7335 Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
7336 ASSERT(slot != NULL);
7337 DupIfPersist();
7338 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
7339 break;
7340 }
7341
7342 case NAMED: {
7343 Variable* var = expression_->AsVariableProxy()->AsVariable();
7344 bool is_global = var != NULL;
7345 ASSERT(!is_global || var->is_global());
7346 Handle<String> name = GetName();
7347 DupIfPersist();
7348 cgen_->EmitNamedLoad(name, is_global);
7349 break;
7350 }
7351
7352 case KEYED: {
7353 ASSERT(property != NULL);
7354 DupIfPersist();
7355 cgen_->EmitKeyedLoad();
7356 cgen_->frame()->EmitPush(r0);
7357 break;
7358 }
7359
7360 default:
7361 UNREACHABLE();
7362 }
7363 }
7364
7365
7366 void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
7367 ASSERT(!is_illegal());
7368 ASSERT(!cgen_->has_cc());
7369 MacroAssembler* masm = cgen_->masm();
7370 VirtualFrame* frame = cgen_->frame();
7371 Property* property = expression_->AsProperty();
7372 if (property != NULL) {
7373 cgen_->CodeForSourcePosition(property->position());
7374 }
7375
7376 switch (type_) {
7377 case SLOT: {
7378 Comment cmnt(masm, "[ Store to Slot");
7379 Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
7380 cgen_->StoreToSlot(slot, init_state);
7381 set_unloaded();
7382 break;
7383 }
7384
7385 case NAMED: {
7386 Comment cmnt(masm, "[ Store to named Property");
7387 cgen_->EmitNamedStore(GetName(), false);
7388 frame->EmitPush(r0);
7389 set_unloaded();
7390 break;
7391 }
7392
7393 case KEYED: {
7394 Comment cmnt(masm, "[ Store to keyed Property");
7395 Property* property = expression_->AsProperty();
7396 ASSERT(property != NULL);
7397 cgen_->CodeForSourcePosition(property->position());
7398 cgen_->EmitKeyedStore(property->key()->type(), wb_info);
7399 frame->EmitPush(r0);
7400 set_unloaded();
7401 break;
7402 }
7403
7404 default:
7405 UNREACHABLE();
7406 }
7407 }
7408
7409
7410 const char* GenericBinaryOpStub::GetName() {
7411 if (name_ != NULL) return name_;
7412 const int len = 100;
7413 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len);
7414 if (name_ == NULL) return "OOM";
7415 const char* op_name = Token::Name(op_);
7416 const char* overwrite_name;
7417 switch (mode_) {
7418 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
7419 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
7420 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
7421 default: overwrite_name = "UnknownOverwrite"; break;
7422 }
7423
7424 OS::SNPrintF(Vector<char>(name_, len),
7425 "GenericBinaryOpStub_%s_%s%s_%s",
7426 op_name,
7427 overwrite_name,
7428 specialized_on_rhs_ ? "_ConstantRhs" : "",
7429 BinaryOpIC::GetName(runtime_operands_type_));
7430 return name_;
7431 }
7432
7433 #undef __
7434
7435 } } // namespace v8::internal 50 } } // namespace v8::internal
7436 51
7437 #endif // V8_TARGET_ARCH_ARM 52 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/codegen-arm-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698