OLD | NEW |
| (Empty) |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "v8.h" | |
29 | |
30 #include "bootstrapper.h" | |
31 #include "codegen-inl.h" | |
32 #include "debug.h" | |
33 #include "parser.h" | |
34 #include "register-allocator-inl.h" | |
35 #include "runtime.h" | |
36 #include "scopes.h" | |
37 | |
38 namespace v8 { namespace internal { | |
39 | |
40 #define __ ACCESS_MASM(masm_) | |
41 | |
42 // ------------------------------------------------------------------------- | |
43 // CodeGenState implementation. | |
44 | |
45 CodeGenState::CodeGenState(CodeGenerator* owner) | |
46 : owner_(owner), | |
47 typeof_state_(NOT_INSIDE_TYPEOF), | |
48 destination_(NULL), | |
49 previous_(NULL) { | |
50 owner_->set_state(this); | |
51 } | |
52 | |
53 | |
54 CodeGenState::CodeGenState(CodeGenerator* owner, | |
55 TypeofState typeof_state, | |
56 ControlDestination* destination) | |
57 : owner_(owner), | |
58 typeof_state_(typeof_state), | |
59 destination_(destination), | |
60 previous_(owner->state()) { | |
61 owner_->set_state(this); | |
62 } | |
63 | |
64 | |
65 CodeGenState::~CodeGenState() { | |
66 ASSERT(owner_->state() == this); | |
67 owner_->set_state(previous_); | |
68 } | |
69 | |
70 | |
71 // ------------------------------------------------------------------------- | |
72 // CodeGenerator implementation | |
73 | |
74 CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script, | |
75 bool is_eval) | |
76 : is_eval_(is_eval), | |
77 script_(script), | |
78 deferred_(8), | |
79 masm_(new MacroAssembler(NULL, buffer_size)), | |
80 scope_(NULL), | |
81 frame_(NULL), | |
82 allocator_(NULL), | |
83 state_(NULL), | |
84 loop_nesting_(0), | |
85 function_return_is_shadowed_(false), | |
86 in_spilled_code_(false) { | |
87 } | |
88 | |
89 | |
90 // Calling conventions: | |
91 // ebp: caller's frame pointer | |
92 // esp: stack pointer | |
93 // edi: called JS function | |
94 // esi: callee's context | |
95 | |
96 void CodeGenerator::GenCode(FunctionLiteral* fun) { | |
97 // Record the position for debugging purposes. | |
98 CodeForFunctionPosition(fun); | |
99 | |
100 ZoneList<Statement*>* body = fun->body(); | |
101 | |
102 // Initialize state. | |
103 ASSERT(scope_ == NULL); | |
104 scope_ = fun->scope(); | |
105 ASSERT(allocator_ == NULL); | |
106 RegisterAllocator register_allocator(this); | |
107 allocator_ = ®ister_allocator; | |
108 ASSERT(frame_ == NULL); | |
109 frame_ = new VirtualFrame(this); | |
110 set_in_spilled_code(false); | |
111 | |
112 // Adjust for function-level loop nesting. | |
113 loop_nesting_ += fun->loop_nesting(); | |
114 | |
115 { | |
116 CodeGenState state(this); | |
117 | |
118 // Entry: | |
119 // Stack: receiver, arguments, return address. | |
120 // ebp: caller's frame pointer | |
121 // esp: stack pointer | |
122 // edi: called JS function | |
123 // esi: callee's context | |
124 allocator_->Initialize(); | |
125 frame_->Enter(); | |
126 | |
127 #ifdef DEBUG | |
128 if (strlen(FLAG_stop_at) > 0 && | |
129 fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | |
130 frame_->SpillAll(); | |
131 __ int3(); | |
132 } | |
133 #endif | |
134 | |
135 // Allocate space for locals and initialize them. | |
136 frame_->AllocateStackSlots(scope_->num_stack_slots()); | |
137 // Initialize the function return target after the locals are set | |
138 // up, because it needs the expected frame height from the frame. | |
139 function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL); | |
140 function_return_is_shadowed_ = false; | |
141 | |
142 // Allocate the arguments object and copy the parameters into it. | |
143 if (scope_->arguments() != NULL) { | |
144 ASSERT(scope_->arguments_shadow() != NULL); | |
145 Comment cmnt(masm_, "[ Allocate arguments object"); | |
146 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); | |
147 frame_->PushFunction(); | |
148 frame_->PushReceiverSlotAddress(); | |
149 frame_->Push(Smi::FromInt(scope_->num_parameters())); | |
150 Result answer = frame_->CallStub(&stub, 3); | |
151 frame_->Push(&answer); | |
152 } | |
153 | |
154 if (scope_->num_heap_slots() > 0) { | |
155 Comment cmnt(masm_, "[ allocate local context"); | |
156 // Allocate local context. | |
157 // Get outer context and create a new context based on it. | |
158 frame_->PushFunction(); | |
159 Result context = frame_->CallRuntime(Runtime::kNewContext, 1); | |
160 | |
161 // Update context local. | |
162 frame_->SaveContextRegister(); | |
163 | |
164 // Verify that the runtime call result and esi agree. | |
165 if (FLAG_debug_code) { | |
166 __ cmp(context.reg(), Operand(esi)); | |
167 __ Assert(equal, "Runtime::NewContext should end up in esi"); | |
168 } | |
169 } | |
170 | |
171 // TODO(1241774): Improve this code: | |
172 // 1) only needed if we have a context | |
173 // 2) no need to recompute context ptr every single time | |
174 // 3) don't copy parameter operand code from SlotOperand! | |
175 { | |
176 Comment cmnt2(masm_, "[ copy context parameters into .context"); | |
177 | |
178 // Note that iteration order is relevant here! If we have the same | |
179 // parameter twice (e.g., function (x, y, x)), and that parameter | |
180 // needs to be copied into the context, it must be the last argument | |
181 // passed to the parameter that needs to be copied. This is a rare | |
182 // case so we don't check for it, instead we rely on the copying | |
183 // order: such a parameter is copied repeatedly into the same | |
184 // context location and thus the last value is what is seen inside | |
185 // the function. | |
186 for (int i = 0; i < scope_->num_parameters(); i++) { | |
187 Variable* par = scope_->parameter(i); | |
188 Slot* slot = par->slot(); | |
189 if (slot != NULL && slot->type() == Slot::CONTEXT) { | |
190 // The use of SlotOperand below is safe in unspilled code | |
191 // because the slot is guaranteed to be a context slot. | |
192 // | |
193 // There are no parameters in the global scope. | |
194 ASSERT(!scope_->is_global_scope()); | |
195 frame_->PushParameterAt(i); | |
196 Result value = frame_->Pop(); | |
197 value.ToRegister(); | |
198 | |
199 // SlotOperand loads context.reg() with the context object | |
200 // stored to, used below in RecordWrite. | |
201 Result context = allocator_->Allocate(); | |
202 ASSERT(context.is_valid()); | |
203 __ mov(SlotOperand(slot, context.reg()), value.reg()); | |
204 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; | |
205 Result scratch = allocator_->Allocate(); | |
206 ASSERT(scratch.is_valid()); | |
207 frame_->Spill(context.reg()); | |
208 frame_->Spill(value.reg()); | |
209 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg()); | |
210 } | |
211 } | |
212 } | |
213 | |
214 // This section stores the pointer to the arguments object that | |
215 // was allocated and copied into above. If the address was not | |
216 // saved to TOS, we push ecx onto the stack. | |
217 // | |
218 // Store the arguments object. This must happen after context | |
219 // initialization because the arguments object may be stored in the | |
220 // context. | |
221 if (scope_->arguments() != NULL) { | |
222 Comment cmnt(masm_, "[ store arguments object"); | |
223 { Reference shadow_ref(this, scope_->arguments_shadow()); | |
224 ASSERT(shadow_ref.is_slot()); | |
225 { Reference arguments_ref(this, scope_->arguments()); | |
226 ASSERT(arguments_ref.is_slot()); | |
227 // Here we rely on the convenient property that references to slot | |
228 // take up zero space in the frame (ie, it doesn't matter that the | |
229 // stored value is actually below the reference on the frame). | |
230 arguments_ref.SetValue(NOT_CONST_INIT); | |
231 } | |
232 shadow_ref.SetValue(NOT_CONST_INIT); | |
233 } | |
234 frame_->Drop(); // Value is no longer needed. | |
235 } | |
236 | |
237 // Generate code to 'execute' declarations and initialize functions | |
238 // (source elements). In case of an illegal redeclaration we need to | |
239 // handle that instead of processing the declarations. | |
240 if (scope_->HasIllegalRedeclaration()) { | |
241 Comment cmnt(masm_, "[ illegal redeclarations"); | |
242 scope_->VisitIllegalRedeclaration(this); | |
243 } else { | |
244 Comment cmnt(masm_, "[ declarations"); | |
245 ProcessDeclarations(scope_->declarations()); | |
246 // Bail out if a stack-overflow exception occurred when processing | |
247 // declarations. | |
248 if (HasStackOverflow()) return; | |
249 } | |
250 | |
251 if (FLAG_trace) { | |
252 frame_->CallRuntime(Runtime::kTraceEnter, 0); | |
253 // Ignore the return value. | |
254 } | |
255 CheckStack(); | |
256 | |
257 // Compile the body of the function in a vanilla state. Don't | |
258 // bother compiling all the code if the scope has an illegal | |
259 // redeclaration. | |
260 if (!scope_->HasIllegalRedeclaration()) { | |
261 Comment cmnt(masm_, "[ function body"); | |
262 #ifdef DEBUG | |
263 bool is_builtin = Bootstrapper::IsActive(); | |
264 bool should_trace = | |
265 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls; | |
266 if (should_trace) { | |
267 frame_->CallRuntime(Runtime::kDebugTrace, 0); | |
268 // Ignore the return value. | |
269 } | |
270 #endif | |
271 VisitStatements(body); | |
272 | |
273 // Handle the return from the function. | |
274 if (has_valid_frame()) { | |
275 // If there is a valid frame, control flow can fall off the end of | |
276 // the body. In that case there is an implicit return statement. | |
277 ASSERT(!function_return_is_shadowed_); | |
278 CodeForReturnPosition(fun); | |
279 frame_->PrepareForReturn(); | |
280 Result undefined(Factory::undefined_value(), this); | |
281 if (function_return_.is_bound()) { | |
282 function_return_.Jump(&undefined); | |
283 } else { | |
284 // Though this is a (possibly) backward block, the frames | |
285 // can only differ on their top element. | |
286 function_return_.Bind(&undefined, 1); | |
287 GenerateReturnSequence(&undefined); | |
288 } | |
289 } else if (function_return_.is_linked()) { | |
290 // If the return target has dangling jumps to it, then we have not | |
291 // yet generated the return sequence. This can happen when (a) | |
292 // control does not flow off the end of the body so we did not | |
293 // compile an artificial return statement just above, and (b) there | |
294 // are return statements in the body but (c) they are all shadowed. | |
295 Result return_value(this); | |
296 // Though this is a (possibly) backward block, the frames can | |
297 // only differ on their top element. | |
298 function_return_.Bind(&return_value, 1); | |
299 GenerateReturnSequence(&return_value); | |
300 } | |
301 } | |
302 } | |
303 | |
304 // Adjust for function-level loop nesting. | |
305 loop_nesting_ -= fun->loop_nesting(); | |
306 | |
307 // Code generation state must be reset. | |
308 ASSERT(state_ == NULL); | |
309 ASSERT(loop_nesting() == 0); | |
310 ASSERT(!function_return_is_shadowed_); | |
311 function_return_.Unuse(); | |
312 DeleteFrame(); | |
313 | |
314 // Process any deferred code using the register allocator. | |
315 if (HasStackOverflow()) { | |
316 ClearDeferred(); | |
317 } else { | |
318 ProcessDeferred(); | |
319 } | |
320 | |
321 // There is no need to delete the register allocator, it is a | |
322 // stack-allocated local. | |
323 allocator_ = NULL; | |
324 scope_ = NULL; | |
325 } | |
326 | |
327 | |
328 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { | |
329 // Currently, this assertion will fail if we try to assign to | |
330 // a constant variable that is constant because it is read-only | |
331 // (such as the variable referring to a named function expression). | |
332 // We need to implement assignments to read-only variables. | |
333 // Ideally, we should do this during AST generation (by converting | |
334 // such assignments into expression statements); however, in general | |
335 // we may not be able to make the decision until past AST generation, | |
336 // that is when the entire program is known. | |
337 ASSERT(slot != NULL); | |
338 int index = slot->index(); | |
339 switch (slot->type()) { | |
340 case Slot::PARAMETER: | |
341 return frame_->ParameterAt(index); | |
342 | |
343 case Slot::LOCAL: | |
344 return frame_->LocalAt(index); | |
345 | |
346 case Slot::CONTEXT: { | |
347 // Follow the context chain if necessary. | |
348 ASSERT(!tmp.is(esi)); // do not overwrite context register | |
349 Register context = esi; | |
350 int chain_length = scope()->ContextChainLength(slot->var()->scope()); | |
351 for (int i = 0; i < chain_length; i++) { | |
352 // Load the closure. | |
353 // (All contexts, even 'with' contexts, have a closure, | |
354 // and it is the same for all contexts inside a function. | |
355 // There is no need to go to the function context first.) | |
356 __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); | |
357 // Load the function context (which is the incoming, outer context). | |
358 __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset)); | |
359 context = tmp; | |
360 } | |
361 // We may have a 'with' context now. Get the function context. | |
362 // (In fact this mov may never be the needed, since the scope analysis | |
363 // may not permit a direct context access in this case and thus we are | |
364 // always at a function context. However it is safe to dereference be- | |
365 // cause the function context of a function context is itself. Before | |
366 // deleting this mov we should try to create a counter-example first, | |
367 // though...) | |
368 __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); | |
369 return ContextOperand(tmp, index); | |
370 } | |
371 | |
372 default: | |
373 UNREACHABLE(); | |
374 return Operand(eax); | |
375 } | |
376 } | |
377 | |
378 | |
379 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, | |
380 Result tmp, | |
381 JumpTarget* slow) { | |
382 ASSERT(slot->type() == Slot::CONTEXT); | |
383 ASSERT(tmp.is_register()); | |
384 Result context(esi, this); | |
385 | |
386 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { | |
387 if (s->num_heap_slots() > 0) { | |
388 if (s->calls_eval()) { | |
389 // Check that extension is NULL. | |
390 __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX), | |
391 Immediate(0)); | |
392 slow->Branch(not_equal, not_taken); | |
393 } | |
394 __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX)); | |
395 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
396 context = tmp; | |
397 } | |
398 } | |
399 // Check that last extension is NULL. | |
400 __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX), | |
401 Immediate(0)); | |
402 slow->Branch(not_equal, not_taken); | |
403 __ mov(tmp.reg(), ContextOperand(context.reg(), Context::FCONTEXT_INDEX)); | |
404 return ContextOperand(tmp.reg(), slot->index()); | |
405 } | |
406 | |
407 | |
408 // Emit code to load the value of an expression to the top of the | |
409 // frame. If the expression is boolean-valued it may be compiled (or | |
410 // partially compiled) into control flow to the control destination. | |
411 // If force_control is true, control flow is forced. | |
412 void CodeGenerator::LoadCondition(Expression* x, | |
413 TypeofState typeof_state, | |
414 ControlDestination* dest, | |
415 bool force_control) { | |
416 ASSERT(!in_spilled_code()); | |
417 int original_height = frame_->height(); | |
418 | |
419 { CodeGenState new_state(this, typeof_state, dest); | |
420 Visit(x); | |
421 | |
422 // If we hit a stack overflow, we may not have actually visited | |
423 // the expression. In that case, we ensure that we have a | |
424 // valid-looking frame state because we will continue to generate | |
425 // code as we unwind the C++ stack. | |
426 // | |
427 // It's possible to have both a stack overflow and a valid frame | |
428 // state (eg, a subexpression overflowed, visiting it returned | |
429 // with a dummied frame state, and visiting this expression | |
430 // returned with a normal-looking state). | |
431 if (HasStackOverflow() && | |
432 !dest->is_used() && | |
433 frame_->height() == original_height) { | |
434 dest->Goto(true); | |
435 } | |
436 } | |
437 | |
438 if (force_control && !dest->is_used()) { | |
439 // Convert the TOS value into flow to the control destination. | |
440 ToBoolean(dest); | |
441 } | |
442 | |
443 ASSERT(!(force_control && !dest->is_used())); | |
444 ASSERT(dest->is_used() || frame_->height() == original_height + 1); | |
445 } | |
446 | |
447 | |
448 void CodeGenerator::LoadAndSpill(Expression* expression, | |
449 TypeofState typeof_state) { | |
450 ASSERT(in_spilled_code()); | |
451 set_in_spilled_code(false); | |
452 Load(expression, typeof_state); | |
453 frame_->SpillAll(); | |
454 set_in_spilled_code(true); | |
455 } | |
456 | |
457 | |
458 void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { | |
459 #ifdef DEBUG | |
460 int original_height = frame_->height(); | |
461 #endif | |
462 ASSERT(!in_spilled_code()); | |
463 JumpTarget true_target(this); | |
464 JumpTarget false_target(this); | |
465 ControlDestination dest(&true_target, &false_target, true); | |
466 LoadCondition(x, typeof_state, &dest, false); | |
467 | |
468 if (dest.false_was_fall_through()) { | |
469 // The false target was just bound. | |
470 JumpTarget loaded(this); | |
471 frame_->Push(Factory::false_value()); | |
472 // There may be dangling jumps to the true target. | |
473 if (true_target.is_linked()) { | |
474 loaded.Jump(); | |
475 true_target.Bind(); | |
476 frame_->Push(Factory::true_value()); | |
477 loaded.Bind(); | |
478 } | |
479 | |
480 } else if (dest.is_used()) { | |
481 // There is true, and possibly false, control flow (with true as | |
482 // the fall through). | |
483 JumpTarget loaded(this); | |
484 frame_->Push(Factory::true_value()); | |
485 if (false_target.is_linked()) { | |
486 loaded.Jump(); | |
487 false_target.Bind(); | |
488 frame_->Push(Factory::false_value()); | |
489 loaded.Bind(); | |
490 } | |
491 | |
492 } else { | |
493 // We have a valid value on top of the frame, but we still may | |
494 // have dangling jumps to the true and false targets from nested | |
495 // subexpressions (eg, the left subexpressions of the | |
496 // short-circuited boolean operators). | |
497 ASSERT(has_valid_frame()); | |
498 if (true_target.is_linked() || false_target.is_linked()) { | |
499 JumpTarget loaded(this); | |
500 loaded.Jump(); // Don't lose the current TOS. | |
501 if (true_target.is_linked()) { | |
502 true_target.Bind(); | |
503 frame_->Push(Factory::true_value()); | |
504 if (false_target.is_linked()) { | |
505 loaded.Jump(); | |
506 } | |
507 } | |
508 if (false_target.is_linked()) { | |
509 false_target.Bind(); | |
510 frame_->Push(Factory::false_value()); | |
511 } | |
512 loaded.Bind(); | |
513 } | |
514 } | |
515 | |
516 ASSERT(has_valid_frame()); | |
517 ASSERT(frame_->height() == original_height + 1); | |
518 } | |
519 | |
520 | |
521 void CodeGenerator::LoadGlobal() { | |
522 if (in_spilled_code()) { | |
523 frame_->EmitPush(GlobalObject()); | |
524 } else { | |
525 Result temp = allocator_->Allocate(); | |
526 __ mov(temp.reg(), GlobalObject()); | |
527 frame_->Push(&temp); | |
528 } | |
529 } | |
530 | |
531 | |
532 void CodeGenerator::LoadGlobalReceiver() { | |
533 Result temp = allocator_->Allocate(); | |
534 Register reg = temp.reg(); | |
535 __ mov(reg, GlobalObject()); | |
536 __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset)); | |
537 frame_->Push(&temp); | |
538 } | |
539 | |
540 | |
541 // TODO(1241834): Get rid of this function in favor of just using Load, now | |
542 // that we have the INSIDE_TYPEOF typeof state. => Need to handle global | |
543 // variables w/o reference errors elsewhere. | |
544 void CodeGenerator::LoadTypeofExpression(Expression* x) { | |
545 Variable* variable = x->AsVariableProxy()->AsVariable(); | |
546 if (variable != NULL && !variable->is_this() && variable->is_global()) { | |
547 // NOTE: This is somewhat nasty. We force the compiler to load | |
548 // the variable as if through '<global>.<variable>' to make sure we | |
549 // do not get reference errors. | |
550 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); | |
551 Literal key(variable->name()); | |
552 // TODO(1241834): Fetch the position from the variable instead of using | |
553 // no position. | |
554 Property property(&global, &key, RelocInfo::kNoPosition); | |
555 Load(&property); | |
556 } else { | |
557 Load(x, INSIDE_TYPEOF); | |
558 } | |
559 } | |
560 | |
561 | |
562 Reference::Reference(CodeGenerator* cgen, Expression* expression) | |
563 : cgen_(cgen), expression_(expression), type_(ILLEGAL) { | |
564 cgen->LoadReference(this); | |
565 } | |
566 | |
567 | |
568 Reference::~Reference() { | |
569 cgen_->UnloadReference(this); | |
570 } | |
571 | |
572 | |
573 void CodeGenerator::LoadReference(Reference* ref) { | |
574 // References are loaded from both spilled and unspilled code. Set the | |
575 // state to unspilled to allow that (and explicitly spill after | |
576 // construction at the construction sites). | |
577 bool was_in_spilled_code = in_spilled_code_; | |
578 in_spilled_code_ = false; | |
579 | |
580 Comment cmnt(masm_, "[ LoadReference"); | |
581 Expression* e = ref->expression(); | |
582 Property* property = e->AsProperty(); | |
583 Variable* var = e->AsVariableProxy()->AsVariable(); | |
584 | |
585 if (property != NULL) { | |
586 // The expression is either a property or a variable proxy that rewrites | |
587 // to a property. | |
588 Load(property->obj()); | |
589 // We use a named reference if the key is a literal symbol, unless it is | |
590 // a string that can be legally parsed as an integer. This is because | |
591 // otherwise we will not get into the slow case code that handles [] on | |
592 // String objects. | |
593 Literal* literal = property->key()->AsLiteral(); | |
594 uint32_t dummy; | |
595 if (literal != NULL && | |
596 literal->handle()->IsSymbol() && | |
597 !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) { | |
598 ref->set_type(Reference::NAMED); | |
599 } else { | |
600 Load(property->key()); | |
601 ref->set_type(Reference::KEYED); | |
602 } | |
603 } else if (var != NULL) { | |
604 // The expression is a variable proxy that does not rewrite to a | |
605 // property. Global variables are treated as named property references. | |
606 if (var->is_global()) { | |
607 LoadGlobal(); | |
608 ref->set_type(Reference::NAMED); | |
609 } else { | |
610 ASSERT(var->slot() != NULL); | |
611 ref->set_type(Reference::SLOT); | |
612 } | |
613 } else { | |
614 // Anything else is a runtime error. | |
615 Load(e); | |
616 frame_->CallRuntime(Runtime::kThrowReferenceError, 1); | |
617 } | |
618 | |
619 in_spilled_code_ = was_in_spilled_code; | |
620 } | |
621 | |
622 | |
623 void CodeGenerator::UnloadReference(Reference* ref) { | |
624 // Pop a reference from the stack while preserving TOS. | |
625 Comment cmnt(masm_, "[ UnloadReference"); | |
626 frame_->Nip(ref->size()); | |
627 } | |
628 | |
629 | |
630 class ToBooleanStub: public CodeStub { | |
631 public: | |
632 ToBooleanStub() { } | |
633 | |
634 void Generate(MacroAssembler* masm); | |
635 | |
636 private: | |
637 Major MajorKey() { return ToBoolean; } | |
638 int MinorKey() { return 0; } | |
639 }; | |
640 | |
641 | |
642 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and | |
643 // convert it to a boolean in the condition code register or jump to | |
644 // 'false_target'/'true_target' as appropriate. | |
645 void CodeGenerator::ToBoolean(ControlDestination* dest) { | |
646 Comment cmnt(masm_, "[ ToBoolean"); | |
647 | |
648 // The value to convert should be popped from the frame. | |
649 Result value = frame_->Pop(); | |
650 value.ToRegister(); | |
651 // Fast case checks. | |
652 | |
653 // 'false' => false. | |
654 __ cmp(value.reg(), Factory::false_value()); | |
655 dest->false_target()->Branch(equal); | |
656 | |
657 // 'true' => true. | |
658 __ cmp(value.reg(), Factory::true_value()); | |
659 dest->true_target()->Branch(equal); | |
660 | |
661 // 'undefined' => false. | |
662 __ cmp(value.reg(), Factory::undefined_value()); | |
663 dest->false_target()->Branch(equal); | |
664 | |
665 // Smi => false iff zero. | |
666 ASSERT(kSmiTag == 0); | |
667 __ test(value.reg(), Operand(value.reg())); | |
668 dest->false_target()->Branch(zero); | |
669 __ test(value.reg(), Immediate(kSmiTagMask)); | |
670 dest->true_target()->Branch(zero); | |
671 | |
672 // Call the stub for all other cases. | |
673 frame_->Push(&value); // Undo the Pop() from above. | |
674 ToBooleanStub stub; | |
675 Result temp = frame_->CallStub(&stub, 1); | |
676 // Convert the result to a condition code. | |
677 __ test(temp.reg(), Operand(temp.reg())); | |
678 temp.Unuse(); | |
679 dest->Split(not_equal); | |
680 } | |
681 | |
682 | |
683 class FloatingPointHelper : public AllStatic { | |
684 public: | |
685 // Code pattern for loading floating point values. Input values must | |
686 // be either smi or heap number objects (fp values). Requirements: | |
687 // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as | |
688 // floating point numbers on FPU stack. | |
689 static void LoadFloatOperands(MacroAssembler* masm, Register scratch); | |
690 // Test if operands are smi or number objects (fp). Requirements: | |
691 // operand_1 in eax, operand_2 in edx; falls through on float | |
692 // operands, jumps to the non_float label otherwise. | |
693 static void CheckFloatOperands(MacroAssembler* masm, | |
694 Label* non_float, | |
695 Register scratch); | |
696 // Allocate a heap number in new space with undefined value. | |
697 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. | |
698 static void AllocateHeapNumber(MacroAssembler* masm, | |
699 Label* need_gc, | |
700 Register scratch1, | |
701 Register scratch2); | |
702 }; | |
703 | |
704 | |
705 // Flag that indicates whether or not the code that handles smi arguments | |
706 // should be placed in the stub, inlined, or omitted entirely. | |
707 enum GenericBinaryFlags { | |
708 SMI_CODE_IN_STUB, | |
709 SMI_CODE_INLINED | |
710 }; | |
711 | |
712 | |
713 class GenericBinaryOpStub: public CodeStub { | |
714 public: | |
715 GenericBinaryOpStub(Token::Value op, | |
716 OverwriteMode mode, | |
717 GenericBinaryFlags flags) | |
718 : op_(op), mode_(mode), flags_(flags) { | |
719 ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); | |
720 } | |
721 | |
722 void GenerateSmiCode(MacroAssembler* masm, Label* slow); | |
723 | |
724 private: | |
725 Token::Value op_; | |
726 OverwriteMode mode_; | |
727 GenericBinaryFlags flags_; | |
728 | |
729 const char* GetName(); | |
730 | |
731 #ifdef DEBUG | |
732 void Print() { | |
733 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", | |
734 Token::String(op_), | |
735 static_cast<int>(mode_), | |
736 static_cast<int>(flags_)); | |
737 } | |
738 #endif | |
739 | |
740 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. | |
741 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | |
742 class OpBits: public BitField<Token::Value, 2, 13> {}; | |
743 class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {}; | |
744 | |
745 Major MajorKey() { return GenericBinaryOp; } | |
746 int MinorKey() { | |
747 // Encode the parameters in a unique 16 bit value. | |
748 return OpBits::encode(op_) | |
749 | ModeBits::encode(mode_) | |
750 | FlagBits::encode(flags_); | |
751 } | |
752 void Generate(MacroAssembler* masm); | |
753 }; | |
754 | |
755 | |
756 const char* GenericBinaryOpStub::GetName() { | |
757 switch (op_) { | |
758 case Token::ADD: return "GenericBinaryOpStub_ADD"; | |
759 case Token::SUB: return "GenericBinaryOpStub_SUB"; | |
760 case Token::MUL: return "GenericBinaryOpStub_MUL"; | |
761 case Token::DIV: return "GenericBinaryOpStub_DIV"; | |
762 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; | |
763 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; | |
764 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; | |
765 case Token::SAR: return "GenericBinaryOpStub_SAR"; | |
766 case Token::SHL: return "GenericBinaryOpStub_SHL"; | |
767 case Token::SHR: return "GenericBinaryOpStub_SHR"; | |
768 default: return "GenericBinaryOpStub"; | |
769 } | |
770 } | |
771 | |
772 | |
773 // A deferred code class implementing binary operations on likely smis. | |
774 // This class generates both inline code and deferred code. | |
775 // The fastest path is implemented inline. Deferred code calls | |
776 // the GenericBinaryOpStub stub for slow cases. | |
777 class DeferredInlineBinaryOperation: public DeferredCode { | |
778 public: | |
779 DeferredInlineBinaryOperation(CodeGenerator* generator, | |
780 Token::Value op, | |
781 OverwriteMode mode, | |
782 GenericBinaryFlags flags) | |
783 : DeferredCode(generator), stub_(op, mode, flags), op_(op) { | |
784 set_comment("[ DeferredInlineBinaryOperation"); | |
785 } | |
786 | |
787 // Consumes its arguments, left and right, leaving them invalid. | |
788 Result GenerateInlineCode(Result* left, Result* right); | |
789 | |
790 virtual void Generate(); | |
791 | |
792 private: | |
793 GenericBinaryOpStub stub_; | |
794 Token::Value op_; | |
795 }; | |
796 | |
797 | |
798 void DeferredInlineBinaryOperation::Generate() { | |
799 Result left(generator()); | |
800 Result right(generator()); | |
801 enter()->Bind(&left, &right); | |
802 generator()->frame()->Push(&left); | |
803 generator()->frame()->Push(&right); | |
804 Result answer = generator()->frame()->CallStub(&stub_, 2); | |
805 exit_.Jump(&answer); | |
806 } | |
807 | |
808 | |
809 void CodeGenerator::GenericBinaryOperation(Token::Value op, | |
810 SmiAnalysis* type, | |
811 OverwriteMode overwrite_mode) { | |
812 Comment cmnt(masm_, "[ BinaryOperation"); | |
813 Comment cmnt_token(masm_, Token::String(op)); | |
814 | |
815 if (op == Token::COMMA) { | |
816 // Simply discard left value. | |
817 frame_->Nip(1); | |
818 return; | |
819 } | |
820 | |
821 // Set the flags based on the operation, type and loop nesting level. | |
822 GenericBinaryFlags flags; | |
823 switch (op) { | |
824 case Token::BIT_OR: | |
825 case Token::BIT_AND: | |
826 case Token::BIT_XOR: | |
827 case Token::SHL: | |
828 case Token::SHR: | |
829 case Token::SAR: | |
830 // Bit operations always assume they likely operate on Smis. Still only | |
831 // generate the inline Smi check code if this operation is part of a loop. | |
832 flags = (loop_nesting() > 0) | |
833 ? SMI_CODE_INLINED | |
834 : SMI_CODE_IN_STUB; | |
835 break; | |
836 | |
837 default: | |
838 // By default only inline the Smi check code for likely smis if this | |
839 // operation is part of a loop. | |
840 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) | |
841 ? SMI_CODE_INLINED | |
842 : SMI_CODE_IN_STUB; | |
843 break; | |
844 } | |
845 | |
846 Result right = frame_->Pop(); | |
847 Result left = frame_->Pop(); | |
848 | |
849 if (op == Token::ADD) { | |
850 bool left_is_string = left.static_type().is_jsstring(); | |
851 bool right_is_string = right.static_type().is_jsstring(); | |
852 if (left_is_string || right_is_string) { | |
853 frame_->Push(&left); | |
854 frame_->Push(&right); | |
855 Result answer(this); | |
856 if (left_is_string) { | |
857 if (right_is_string) { | |
858 // TODO(lrn): if (left.is_constant() && right.is_constant()) | |
859 // -- do a compile time cons, if allocation during codegen is allowed. | |
860 answer = frame_->CallRuntime(Runtime::kStringAdd, 2); | |
861 } else { | |
862 answer = | |
863 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2); | |
864 } | |
865 } else if (right_is_string) { | |
866 answer = | |
867 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); | |
868 } | |
869 answer.set_static_type(StaticType::jsstring()); | |
870 frame_->Push(&answer); | |
871 return; | |
872 } | |
873 // Neither operand is known to be a string. | |
874 } | |
875 | |
876 bool left_is_smi = left.is_constant() && left.handle()->IsSmi(); | |
877 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi(); | |
878 bool right_is_smi = right.is_constant() && right.handle()->IsSmi(); | |
879 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi(); | |
880 bool generate_no_smi_code = false; // No smi code at all, inline or in stub. | |
881 | |
882 if (left_is_smi && right_is_smi) { | |
883 // Compute the constant result at compile time, and leave it on the frame. | |
884 int left_int = Smi::cast(*left.handle())->value(); | |
885 int right_int = Smi::cast(*right.handle())->value(); | |
886 if (FoldConstantSmis(op, left_int, right_int)) return; | |
887 } | |
888 | |
889 if (left_is_non_smi || right_is_non_smi) { | |
890 // Set flag so that we go straight to the slow case, with no smi code. | |
891 generate_no_smi_code = true; | |
892 } else if (right_is_smi) { | |
893 ConstantSmiBinaryOperation(op, &left, right.handle(), | |
894 type, false, overwrite_mode); | |
895 return; | |
896 } else if (left_is_smi) { | |
897 ConstantSmiBinaryOperation(op, &right, left.handle(), | |
898 type, true, overwrite_mode); | |
899 return; | |
900 } | |
901 | |
902 if (flags == SMI_CODE_INLINED && !generate_no_smi_code) { | |
903 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); | |
904 } else { | |
905 frame_->Push(&left); | |
906 frame_->Push(&right); | |
907 // If we know the arguments aren't smis, use the binary operation stub | |
908 // that does not check for the fast smi case. | |
909 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED. | |
910 if (generate_no_smi_code) { | |
911 flags = SMI_CODE_INLINED; | |
912 } | |
913 GenericBinaryOpStub stub(op, overwrite_mode, flags); | |
914 Result answer = frame_->CallStub(&stub, 2); | |
915 frame_->Push(&answer); | |
916 } | |
917 } | |
918 | |
919 | |
920 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { | |
921 Object* answer_object = Heap::undefined_value(); | |
922 switch (op) { | |
923 case Token::ADD: | |
924 if (Smi::IsValid(left + right)) { | |
925 answer_object = Smi::FromInt(left + right); | |
926 } | |
927 break; | |
928 case Token::SUB: | |
929 if (Smi::IsValid(left - right)) { | |
930 answer_object = Smi::FromInt(left - right); | |
931 } | |
932 break; | |
933 case Token::MUL: { | |
934 double answer = static_cast<double>(left) * right; | |
935 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { | |
936 // If the product is zero and the non-zero factor is negative, | |
937 // the spec requires us to return floating point negative zero. | |
938 if (answer != 0 || (left >= 0 && right >= 0)) { | |
939 answer_object = Smi::FromInt(static_cast<int>(answer)); | |
940 } | |
941 } | |
942 } | |
943 break; | |
944 case Token::DIV: | |
945 case Token::MOD: | |
946 break; | |
947 case Token::BIT_OR: | |
948 answer_object = Smi::FromInt(left | right); | |
949 break; | |
950 case Token::BIT_AND: | |
951 answer_object = Smi::FromInt(left & right); | |
952 break; | |
953 case Token::BIT_XOR: | |
954 answer_object = Smi::FromInt(left ^ right); | |
955 break; | |
956 | |
957 case Token::SHL: { | |
958 int shift_amount = right & 0x1F; | |
959 if (Smi::IsValid(left << shift_amount)) { | |
960 answer_object = Smi::FromInt(left << shift_amount); | |
961 } | |
962 break; | |
963 } | |
964 case Token::SHR: { | |
965 int shift_amount = right & 0x1F; | |
966 unsigned int unsigned_left = left; | |
967 unsigned_left >>= shift_amount; | |
968 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) { | |
969 answer_object = Smi::FromInt(unsigned_left); | |
970 } | |
971 break; | |
972 } | |
973 case Token::SAR: { | |
974 int shift_amount = right & 0x1F; | |
975 unsigned int unsigned_left = left; | |
976 if (left < 0) { | |
977 // Perform arithmetic shift of a negative number by | |
978 // complementing number, logical shifting, complementing again. | |
979 unsigned_left = ~unsigned_left; | |
980 unsigned_left >>= shift_amount; | |
981 unsigned_left = ~unsigned_left; | |
982 } else { | |
983 unsigned_left >>= shift_amount; | |
984 } | |
985 ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed. | |
986 answer_object = Smi::FromInt(unsigned_left); // Converted to signed. | |
987 break; | |
988 } | |
989 default: | |
990 UNREACHABLE(); | |
991 break; | |
992 } | |
993 if (answer_object == Heap::undefined_value()) { | |
994 return false; | |
995 } | |
996 frame_->Push(Handle<Object>(answer_object)); | |
997 return true; | |
998 } | |
999 | |
1000 | |
1001 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, | |
1002 Result* left, | |
1003 Result* right, | |
1004 OverwriteMode overwrite_mode) { | |
1005 // Implements a binary operation using a deferred code object | |
1006 // and some inline code to operate on smis quickly. | |
1007 DeferredInlineBinaryOperation* deferred = | |
1008 new DeferredInlineBinaryOperation(this, op, overwrite_mode, | |
1009 SMI_CODE_INLINED); | |
1010 // Generate the inline code that handles some smi operations, | |
1011 // and jumps to the deferred code for everything else. | |
1012 Result answer = deferred->GenerateInlineCode(left, right); | |
1013 deferred->BindExit(&answer); | |
1014 frame_->Push(&answer); | |
1015 } | |
1016 | |
1017 | |
1018 class DeferredInlineSmiOperation: public DeferredCode { | |
1019 public: | |
1020 DeferredInlineSmiOperation(CodeGenerator* generator, | |
1021 Token::Value op, | |
1022 Smi* value, | |
1023 OverwriteMode overwrite_mode) | |
1024 : DeferredCode(generator), | |
1025 op_(op), | |
1026 value_(value), | |
1027 overwrite_mode_(overwrite_mode) { | |
1028 set_comment("[ DeferredInlineSmiOperation"); | |
1029 } | |
1030 | |
1031 virtual void Generate(); | |
1032 | |
1033 private: | |
1034 Token::Value op_; | |
1035 Smi* value_; | |
1036 OverwriteMode overwrite_mode_; | |
1037 }; | |
1038 | |
1039 | |
1040 void DeferredInlineSmiOperation::Generate() { | |
1041 Result left(generator()); | |
1042 enter()->Bind(&left); | |
1043 generator()->frame()->Push(&left); | |
1044 generator()->frame()->Push(value_); | |
1045 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); | |
1046 Result answer = generator()->frame()->CallStub(&igostub, 2); | |
1047 exit_.Jump(&answer); | |
1048 } | |
1049 | |
1050 | |
1051 class DeferredInlineSmiOperationReversed: public DeferredCode { | |
1052 public: | |
1053 DeferredInlineSmiOperationReversed(CodeGenerator* generator, | |
1054 Token::Value op, | |
1055 Smi* value, | |
1056 OverwriteMode overwrite_mode) | |
1057 : DeferredCode(generator), | |
1058 op_(op), | |
1059 value_(value), | |
1060 overwrite_mode_(overwrite_mode) { | |
1061 set_comment("[ DeferredInlineSmiOperationReversed"); | |
1062 } | |
1063 | |
1064 virtual void Generate(); | |
1065 | |
1066 private: | |
1067 Token::Value op_; | |
1068 Smi* value_; | |
1069 OverwriteMode overwrite_mode_; | |
1070 }; | |
1071 | |
1072 | |
1073 void DeferredInlineSmiOperationReversed::Generate() { | |
1074 Result right(generator()); | |
1075 enter()->Bind(&right); | |
1076 generator()->frame()->Push(value_); | |
1077 generator()->frame()->Push(&right); | |
1078 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); | |
1079 Result answer = generator()->frame()->CallStub(&igostub, 2); | |
1080 exit_.Jump(&answer); | |
1081 } | |
1082 | |
1083 | |
1084 class DeferredInlineSmiAdd: public DeferredCode { | |
1085 public: | |
1086 DeferredInlineSmiAdd(CodeGenerator* generator, | |
1087 Smi* value, | |
1088 OverwriteMode overwrite_mode) | |
1089 : DeferredCode(generator), | |
1090 value_(value), | |
1091 overwrite_mode_(overwrite_mode) { | |
1092 set_comment("[ DeferredInlineSmiAdd"); | |
1093 } | |
1094 | |
1095 virtual void Generate(); | |
1096 | |
1097 private: | |
1098 Smi* value_; | |
1099 OverwriteMode overwrite_mode_; | |
1100 }; | |
1101 | |
1102 | |
1103 void DeferredInlineSmiAdd::Generate() { | |
1104 // Undo the optimistic add operation and call the shared stub. | |
1105 Result left(generator()); // Initially left + value_. | |
1106 enter()->Bind(&left); | |
1107 left.ToRegister(); | |
1108 generator()->frame()->Spill(left.reg()); | |
1109 __ sub(Operand(left.reg()), Immediate(value_)); | |
1110 generator()->frame()->Push(&left); | |
1111 generator()->frame()->Push(value_); | |
1112 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); | |
1113 Result answer = generator()->frame()->CallStub(&igostub, 2); | |
1114 exit_.Jump(&answer); | |
1115 } | |
1116 | |
1117 | |
1118 class DeferredInlineSmiAddReversed: public DeferredCode { | |
1119 public: | |
1120 DeferredInlineSmiAddReversed(CodeGenerator* generator, | |
1121 Smi* value, | |
1122 OverwriteMode overwrite_mode) | |
1123 : DeferredCode(generator), | |
1124 value_(value), | |
1125 overwrite_mode_(overwrite_mode) { | |
1126 set_comment("[ DeferredInlineSmiAddReversed"); | |
1127 } | |
1128 | |
1129 virtual void Generate(); | |
1130 | |
1131 private: | |
1132 Smi* value_; | |
1133 OverwriteMode overwrite_mode_; | |
1134 }; | |
1135 | |
1136 | |
1137 void DeferredInlineSmiAddReversed::Generate() { | |
1138 // Undo the optimistic add operation and call the shared stub. | |
1139 Result right(generator()); // Initially value_ + right. | |
1140 enter()->Bind(&right); | |
1141 right.ToRegister(); | |
1142 generator()->frame()->Spill(right.reg()); | |
1143 __ sub(Operand(right.reg()), Immediate(value_)); | |
1144 generator()->frame()->Push(value_); | |
1145 generator()->frame()->Push(&right); | |
1146 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); | |
1147 Result answer = generator()->frame()->CallStub(&igostub, 2); | |
1148 exit_.Jump(&answer); | |
1149 } | |
1150 | |
1151 | |
1152 class DeferredInlineSmiSub: public DeferredCode { | |
1153 public: | |
1154 DeferredInlineSmiSub(CodeGenerator* generator, | |
1155 Smi* value, | |
1156 OverwriteMode overwrite_mode) | |
1157 : DeferredCode(generator), | |
1158 value_(value), | |
1159 overwrite_mode_(overwrite_mode) { | |
1160 set_comment("[ DeferredInlineSmiSub"); | |
1161 } | |
1162 | |
1163 virtual void Generate(); | |
1164 | |
1165 private: | |
1166 Smi* value_; | |
1167 OverwriteMode overwrite_mode_; | |
1168 }; | |
1169 | |
1170 | |
1171 void DeferredInlineSmiSub::Generate() { | |
1172 // Undo the optimistic sub operation and call the shared stub. | |
1173 Result left(generator()); // Initially left - value_. | |
1174 enter()->Bind(&left); | |
1175 left.ToRegister(); | |
1176 generator()->frame()->Spill(left.reg()); | |
1177 __ add(Operand(left.reg()), Immediate(value_)); | |
1178 generator()->frame()->Push(&left); | |
1179 generator()->frame()->Push(value_); | |
1180 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); | |
1181 Result answer = generator()->frame()->CallStub(&igostub, 2); | |
1182 exit_.Jump(&answer); | |
1183 } | |
1184 | |
1185 | |
1186 class DeferredInlineSmiSubReversed: public DeferredCode { | |
1187 public: | |
1188 DeferredInlineSmiSubReversed(CodeGenerator* generator, | |
1189 Smi* value, | |
1190 OverwriteMode overwrite_mode) | |
1191 : DeferredCode(generator), | |
1192 value_(value), | |
1193 overwrite_mode_(overwrite_mode) { | |
1194 set_comment("[ DeferredInlineSmiSubReversed"); | |
1195 } | |
1196 | |
1197 virtual void Generate(); | |
1198 | |
1199 private: | |
1200 Smi* value_; | |
1201 OverwriteMode overwrite_mode_; | |
1202 }; | |
1203 | |
1204 | |
1205 void DeferredInlineSmiSubReversed::Generate() { | |
1206 // Call the shared stub. | |
1207 Result right(generator()); | |
1208 enter()->Bind(&right); | |
1209 generator()->frame()->Push(value_); | |
1210 generator()->frame()->Push(&right); | |
1211 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); | |
1212 Result answer = generator()->frame()->CallStub(&igostub, 2); | |
1213 exit_.Jump(&answer); | |
1214 } | |
1215 | |
1216 | |
1217 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, | |
1218 Result* operand, | |
1219 Handle<Object> value, | |
1220 SmiAnalysis* type, | |
1221 bool reversed, | |
1222 OverwriteMode overwrite_mode) { | |
1223 // NOTE: This is an attempt to inline (a bit) more of the code for | |
1224 // some possible smi operations (like + and -) when (at least) one | |
1225 // of the operands is a constant smi. | |
1226 // Consumes the argument "operand". | |
1227 | |
1228 // TODO(199): Optimize some special cases of operations involving a | |
1229 // smi literal (multiply by 2, shift by 0, etc.). | |
1230 if (IsUnsafeSmi(value)) { | |
1231 Result unsafe_operand(value, this); | |
1232 if (reversed) { | |
1233 LikelySmiBinaryOperation(op, &unsafe_operand, operand, | |
1234 overwrite_mode); | |
1235 } else { | |
1236 LikelySmiBinaryOperation(op, operand, &unsafe_operand, | |
1237 overwrite_mode); | |
1238 } | |
1239 ASSERT(!operand->is_valid()); | |
1240 return; | |
1241 } | |
1242 | |
1243 // Get the literal value. | |
1244 Smi* smi_value = Smi::cast(*value); | |
1245 int int_value = smi_value->value(); | |
1246 | |
1247 switch (op) { | |
1248 case Token::ADD: { | |
1249 DeferredCode* deferred = NULL; | |
1250 if (reversed) { | |
1251 deferred = new DeferredInlineSmiAddReversed(this, smi_value, | |
1252 overwrite_mode); | |
1253 } else { | |
1254 deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode); | |
1255 } | |
1256 operand->ToRegister(); | |
1257 frame_->Spill(operand->reg()); | |
1258 __ add(Operand(operand->reg()), Immediate(value)); | |
1259 deferred->enter()->Branch(overflow, operand, not_taken); | |
1260 __ test(operand->reg(), Immediate(kSmiTagMask)); | |
1261 deferred->enter()->Branch(not_zero, operand, not_taken); | |
1262 deferred->BindExit(operand); | |
1263 frame_->Push(operand); | |
1264 break; | |
1265 } | |
1266 | |
1267 case Token::SUB: { | |
1268 DeferredCode* deferred = NULL; | |
1269 Result answer(this); // Only allocate a new register if reversed. | |
1270 if (reversed) { | |
1271 answer = allocator()->Allocate(); | |
1272 ASSERT(answer.is_valid()); | |
1273 deferred = new DeferredInlineSmiSubReversed(this, | |
1274 smi_value, | |
1275 overwrite_mode); | |
1276 __ Set(answer.reg(), Immediate(value)); | |
1277 // We are in the reversed case so they can't both be Smi constants. | |
1278 ASSERT(operand->is_register()); | |
1279 __ sub(answer.reg(), Operand(operand->reg())); | |
1280 } else { | |
1281 operand->ToRegister(); | |
1282 frame_->Spill(operand->reg()); | |
1283 deferred = new DeferredInlineSmiSub(this, | |
1284 smi_value, | |
1285 overwrite_mode); | |
1286 __ sub(Operand(operand->reg()), Immediate(value)); | |
1287 answer = *operand; | |
1288 } | |
1289 deferred->enter()->Branch(overflow, operand, not_taken); | |
1290 __ test(answer.reg(), Immediate(kSmiTagMask)); | |
1291 deferred->enter()->Branch(not_zero, operand, not_taken); | |
1292 operand->Unuse(); | |
1293 deferred->BindExit(&answer); | |
1294 frame_->Push(&answer); | |
1295 break; | |
1296 } | |
1297 | |
1298 case Token::SAR: { | |
1299 if (reversed) { | |
1300 Result constant_operand(value, this); | |
1301 LikelySmiBinaryOperation(op, &constant_operand, operand, | |
1302 overwrite_mode); | |
1303 } else { | |
1304 // Only the least significant 5 bits of the shift value are used. | |
1305 // In the slow case, this masking is done inside the runtime call. | |
1306 int shift_value = int_value & 0x1f; | |
1307 DeferredCode* deferred = | |
1308 new DeferredInlineSmiOperation(this, Token::SAR, smi_value, | |
1309 overwrite_mode); | |
1310 operand->ToRegister(); | |
1311 __ test(operand->reg(), Immediate(kSmiTagMask)); | |
1312 deferred->enter()->Branch(not_zero, operand, not_taken); | |
1313 if (shift_value > 0) { | |
1314 frame_->Spill(operand->reg()); | |
1315 __ sar(operand->reg(), shift_value); | |
1316 __ and_(operand->reg(), ~kSmiTagMask); | |
1317 } | |
1318 deferred->BindExit(operand); | |
1319 frame_->Push(operand); | |
1320 } | |
1321 break; | |
1322 } | |
1323 | |
1324 case Token::SHR: { | |
1325 if (reversed) { | |
1326 Result constant_operand(value, this); | |
1327 LikelySmiBinaryOperation(op, &constant_operand, operand, | |
1328 overwrite_mode); | |
1329 } else { | |
1330 // Only the least significant 5 bits of the shift value are used. | |
1331 // In the slow case, this masking is done inside the runtime call. | |
1332 int shift_value = int_value & 0x1f; | |
1333 DeferredCode* deferred = | |
1334 new DeferredInlineSmiOperation(this, Token::SHR, smi_value, | |
1335 overwrite_mode); | |
1336 operand->ToRegister(); | |
1337 __ test(operand->reg(), Immediate(kSmiTagMask)); | |
1338 deferred->enter()->Branch(not_zero, operand, not_taken); | |
1339 Result answer = allocator()->Allocate(); | |
1340 ASSERT(answer.is_valid()); | |
1341 __ mov(answer.reg(), operand->reg()); | |
1342 __ sar(answer.reg(), kSmiTagSize); | |
1343 __ shr(answer.reg(), shift_value); | |
1344 // A negative Smi shifted right two is in the positive Smi range. | |
1345 if (shift_value < 2) { | |
1346 __ test(answer.reg(), Immediate(0xc0000000)); | |
1347 deferred->enter()->Branch(not_zero, operand, not_taken); | |
1348 } | |
1349 operand->Unuse(); | |
1350 ASSERT(kSmiTagSize == times_2); // Adjust the code if not true. | |
1351 __ lea(answer.reg(), | |
1352 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); | |
1353 deferred->BindExit(&answer); | |
1354 frame_->Push(&answer); | |
1355 } | |
1356 break; | |
1357 } | |
1358 | |
1359 case Token::SHL: { | |
1360 if (reversed) { | |
1361 Result constant_operand(value, this); | |
1362 LikelySmiBinaryOperation(op, &constant_operand, operand, | |
1363 overwrite_mode); | |
1364 } else { | |
1365 // Only the least significant 5 bits of the shift value are used. | |
1366 // In the slow case, this masking is done inside the runtime call. | |
1367 int shift_value = int_value & 0x1f; | |
1368 DeferredCode* deferred = | |
1369 new DeferredInlineSmiOperation(this, Token::SHL, smi_value, | |
1370 overwrite_mode); | |
1371 operand->ToRegister(); | |
1372 __ test(operand->reg(), Immediate(kSmiTagMask)); | |
1373 deferred->enter()->Branch(not_zero, operand, not_taken); | |
1374 if (shift_value != 0) { | |
1375 Result answer = allocator()->Allocate(); | |
1376 ASSERT(answer.is_valid()); | |
1377 __ mov(answer.reg(), operand->reg()); | |
1378 ASSERT(kSmiTag == 0); // adjust code if not the case | |
1379 // We do no shifts, only the Smi conversion, if shift_value is 1. | |
1380 if (shift_value > 1) { | |
1381 __ shl(answer.reg(), shift_value - 1); | |
1382 } | |
1383 // Convert int result to Smi, checking that it is in int range. | |
1384 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
1385 __ add(answer.reg(), Operand(answer.reg())); | |
1386 deferred->enter()->Branch(overflow, operand, not_taken); | |
1387 operand->Unuse(); | |
1388 deferred->BindExit(&answer); | |
1389 frame_->Push(&answer); | |
1390 } else { | |
1391 deferred->BindExit(operand); | |
1392 frame_->Push(operand); | |
1393 } | |
1394 } | |
1395 break; | |
1396 } | |
1397 | |
1398 case Token::BIT_OR: | |
1399 case Token::BIT_XOR: | |
1400 case Token::BIT_AND: { | |
1401 DeferredCode* deferred = NULL; | |
1402 if (reversed) { | |
1403 deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value, | |
1404 overwrite_mode); | |
1405 } else { | |
1406 deferred = new DeferredInlineSmiOperation(this, op, smi_value, | |
1407 overwrite_mode); | |
1408 } | |
1409 operand->ToRegister(); | |
1410 __ test(operand->reg(), Immediate(kSmiTagMask)); | |
1411 deferred->enter()->Branch(not_zero, operand, not_taken); | |
1412 frame_->Spill(operand->reg()); | |
1413 if (op == Token::BIT_AND) { | |
1414 __ and_(Operand(operand->reg()), Immediate(value)); | |
1415 } else if (op == Token::BIT_XOR) { | |
1416 if (int_value != 0) { | |
1417 __ xor_(Operand(operand->reg()), Immediate(value)); | |
1418 } | |
1419 } else { | |
1420 ASSERT(op == Token::BIT_OR); | |
1421 if (int_value != 0) { | |
1422 __ or_(Operand(operand->reg()), Immediate(value)); | |
1423 } | |
1424 } | |
1425 deferred->BindExit(operand); | |
1426 frame_->Push(operand); | |
1427 break; | |
1428 } | |
1429 | |
1430 default: { | |
1431 Result constant_operand(value, this); | |
1432 if (reversed) { | |
1433 LikelySmiBinaryOperation(op, &constant_operand, operand, | |
1434 overwrite_mode); | |
1435 } else { | |
1436 LikelySmiBinaryOperation(op, operand, &constant_operand, | |
1437 overwrite_mode); | |
1438 } | |
1439 break; | |
1440 } | |
1441 } | |
1442 ASSERT(!operand->is_valid()); | |
1443 } | |
1444 | |
1445 | |
1446 class CompareStub: public CodeStub { | |
1447 public: | |
1448 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } | |
1449 | |
1450 void Generate(MacroAssembler* masm); | |
1451 | |
1452 private: | |
1453 Condition cc_; | |
1454 bool strict_; | |
1455 | |
1456 Major MajorKey() { return Compare; } | |
1457 | |
1458 int MinorKey() { | |
1459 // Encode the three parameters in a unique 16 bit value. | |
1460 ASSERT(static_cast<int>(cc_) < (1 << 15)); | |
1461 return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0); | |
1462 } | |
1463 | |
1464 #ifdef DEBUG | |
1465 void Print() { | |
1466 PrintF("CompareStub (cc %d), (strict %s)\n", | |
1467 static_cast<int>(cc_), | |
1468 strict_ ? "true" : "false"); | |
1469 } | |
1470 #endif | |
1471 }; | |
1472 | |
1473 | |
1474 void CodeGenerator::Comparison(Condition cc, | |
1475 bool strict, | |
1476 ControlDestination* dest) { | |
1477 // Strict only makes sense for equality comparisons. | |
1478 ASSERT(!strict || cc == equal); | |
1479 | |
1480 Result left_side(this); | |
1481 Result right_side(this); | |
1482 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. | |
1483 if (cc == greater || cc == less_equal) { | |
1484 cc = ReverseCondition(cc); | |
1485 left_side = frame_->Pop(); | |
1486 right_side = frame_->Pop(); | |
1487 } else { | |
1488 right_side = frame_->Pop(); | |
1489 left_side = frame_->Pop(); | |
1490 } | |
1491 ASSERT(cc == less || cc == equal || cc == greater_equal); | |
1492 | |
1493 // If either side is a constant smi, optimize the comparison. | |
1494 bool left_side_constant_smi = | |
1495 left_side.is_constant() && left_side.handle()->IsSmi(); | |
1496 bool right_side_constant_smi = | |
1497 right_side.is_constant() && right_side.handle()->IsSmi(); | |
1498 bool left_side_constant_null = | |
1499 left_side.is_constant() && left_side.handle()->IsNull(); | |
1500 bool right_side_constant_null = | |
1501 right_side.is_constant() && right_side.handle()->IsNull(); | |
1502 | |
1503 if (left_side_constant_smi || right_side_constant_smi) { | |
1504 if (left_side_constant_smi && right_side_constant_smi) { | |
1505 // Trivial case, comparing two constants. | |
1506 int left_value = Smi::cast(*left_side.handle())->value(); | |
1507 int right_value = Smi::cast(*right_side.handle())->value(); | |
1508 switch (cc) { | |
1509 case less: | |
1510 dest->Goto(left_value < right_value); | |
1511 break; | |
1512 case equal: | |
1513 dest->Goto(left_value == right_value); | |
1514 break; | |
1515 case greater_equal: | |
1516 dest->Goto(left_value >= right_value); | |
1517 break; | |
1518 default: | |
1519 UNREACHABLE(); | |
1520 } | |
1521 } else { // Only one side is a constant Smi. | |
1522 // If left side is a constant Smi, reverse the operands. | |
1523 // Since one side is a constant Smi, conversion order does not matter. | |
1524 if (left_side_constant_smi) { | |
1525 Result temp = left_side; | |
1526 left_side = right_side; | |
1527 right_side = temp; | |
1528 cc = ReverseCondition(cc); | |
1529 // This may reintroduce greater or less_equal as the value of cc. | |
1530 // CompareStub and the inline code both support all values of cc. | |
1531 } | |
1532 // Implement comparison against a constant Smi, inlining the case | |
1533 // where both sides are Smis. | |
1534 left_side.ToRegister(); | |
1535 ASSERT(left_side.is_valid()); | |
1536 JumpTarget is_smi(this); | |
1537 __ test(left_side.reg(), Immediate(kSmiTagMask)); | |
1538 is_smi.Branch(zero, &left_side, &right_side, taken); | |
1539 | |
1540 // Setup and call the compare stub, which expects its arguments | |
1541 // in registers. | |
1542 CompareStub stub(cc, strict); | |
1543 Result result = frame_->CallStub(&stub, &left_side, &right_side); | |
1544 result.ToRegister(); | |
1545 __ cmp(result.reg(), 0); | |
1546 result.Unuse(); | |
1547 dest->true_target()->Branch(cc); | |
1548 dest->false_target()->Jump(); | |
1549 | |
1550 is_smi.Bind(&left_side, &right_side); | |
1551 left_side.ToRegister(); | |
1552 // Test smi equality and comparison by signed int comparison. | |
1553 if (IsUnsafeSmi(right_side.handle())) { | |
1554 right_side.ToRegister(); | |
1555 ASSERT(right_side.is_valid()); | |
1556 __ cmp(left_side.reg(), Operand(right_side.reg())); | |
1557 } else { | |
1558 __ cmp(Operand(left_side.reg()), Immediate(right_side.handle())); | |
1559 } | |
1560 left_side.Unuse(); | |
1561 right_side.Unuse(); | |
1562 dest->Split(cc); | |
1563 } | |
1564 } else if (cc == equal && | |
1565 (left_side_constant_null || right_side_constant_null)) { | |
1566 // To make null checks efficient, we check if either the left side or | |
1567 // the right side is the constant 'null'. | |
1568 // If so, we optimize the code by inlining a null check instead of | |
1569 // calling the (very) general runtime routine for checking equality. | |
1570 Result operand = left_side_constant_null ? right_side : left_side; | |
1571 right_side.Unuse(); | |
1572 left_side.Unuse(); | |
1573 operand.ToRegister(); | |
1574 __ cmp(operand.reg(), Factory::null_value()); | |
1575 if (strict) { | |
1576 operand.Unuse(); | |
1577 dest->Split(equal); | |
1578 } else { | |
1579 // The 'null' value is only equal to 'undefined' if using non-strict | |
1580 // comparisons. | |
1581 dest->true_target()->Branch(equal); | |
1582 __ cmp(operand.reg(), Factory::undefined_value()); | |
1583 dest->true_target()->Branch(equal); | |
1584 __ test(operand.reg(), Immediate(kSmiTagMask)); | |
1585 dest->false_target()->Branch(equal); | |
1586 | |
1587 // It can be an undetectable object. | |
1588 // Use a scratch register in preference to spilling operand.reg(). | |
1589 Result temp = allocator()->Allocate(); | |
1590 ASSERT(temp.is_valid()); | |
1591 __ mov(temp.reg(), | |
1592 FieldOperand(operand.reg(), HeapObject::kMapOffset)); | |
1593 __ movzx_b(temp.reg(), | |
1594 FieldOperand(temp.reg(), Map::kBitFieldOffset)); | |
1595 __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable)); | |
1596 temp.Unuse(); | |
1597 operand.Unuse(); | |
1598 dest->Split(not_zero); | |
1599 } | |
1600 } else { // Neither side is a constant Smi or null. | |
1601 // If either side is a non-smi constant, skip the smi check. | |
1602 bool known_non_smi = | |
1603 (left_side.is_constant() && !left_side.handle()->IsSmi()) || | |
1604 (right_side.is_constant() && !right_side.handle()->IsSmi()); | |
1605 left_side.ToRegister(); | |
1606 right_side.ToRegister(); | |
1607 JumpTarget is_smi(this); | |
1608 if (!known_non_smi) { | |
1609 // Check for the smi case. | |
1610 Result temp = allocator_->Allocate(); | |
1611 ASSERT(temp.is_valid()); | |
1612 __ mov(temp.reg(), left_side.reg()); | |
1613 __ or_(temp.reg(), Operand(right_side.reg())); | |
1614 __ test(temp.reg(), Immediate(kSmiTagMask)); | |
1615 temp.Unuse(); | |
1616 is_smi.Branch(zero, &left_side, &right_side, taken); | |
1617 } | |
1618 // When non-smi, call out to the compare stub, which expects its | |
1619 // arguments in registers. | |
1620 CompareStub stub(cc, strict); | |
1621 Result answer = frame_->CallStub(&stub, &left_side, &right_side); | |
1622 if (cc == equal) { | |
1623 __ test(answer.reg(), Operand(answer.reg())); | |
1624 } else { | |
1625 __ cmp(answer.reg(), 0); | |
1626 } | |
1627 answer.Unuse(); | |
1628 if (known_non_smi) { | |
1629 dest->Split(cc); | |
1630 } else { | |
1631 dest->true_target()->Branch(cc); | |
1632 dest->false_target()->Jump(); | |
1633 is_smi.Bind(&left_side, &right_side); | |
1634 left_side.ToRegister(); | |
1635 right_side.ToRegister(); | |
1636 __ cmp(left_side.reg(), Operand(right_side.reg())); | |
1637 right_side.Unuse(); | |
1638 left_side.Unuse(); | |
1639 dest->Split(cc); | |
1640 } | |
1641 } | |
1642 } | |
1643 | |
1644 | |
1645 class CallFunctionStub: public CodeStub { | |
1646 public: | |
1647 explicit CallFunctionStub(int argc) : argc_(argc) { } | |
1648 | |
1649 void Generate(MacroAssembler* masm); | |
1650 | |
1651 private: | |
1652 int argc_; | |
1653 | |
1654 #ifdef DEBUG | |
1655 void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); } | |
1656 #endif | |
1657 | |
1658 Major MajorKey() { return CallFunction; } | |
1659 int MinorKey() { return argc_; } | |
1660 }; | |
1661 | |
1662 | |
1663 // Call the function just below TOS on the stack with the given | |
1664 // arguments. The receiver is the TOS. | |
1665 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, | |
1666 int position) { | |
1667 // Push the arguments ("left-to-right") on the stack. | |
1668 int arg_count = args->length(); | |
1669 for (int i = 0; i < arg_count; i++) { | |
1670 Load(args->at(i)); | |
1671 } | |
1672 | |
1673 // Record the position for debugging purposes. | |
1674 CodeForSourcePosition(position); | |
1675 | |
1676 // Use the shared code stub to call the function. | |
1677 CallFunctionStub call_function(arg_count); | |
1678 Result answer = frame_->CallStub(&call_function, arg_count + 1); | |
1679 // Restore context and replace function on the stack with the | |
1680 // result of the stub invocation. | |
1681 frame_->RestoreContextRegister(); | |
1682 frame_->SetElementAt(0, &answer); | |
1683 } | |
1684 | |
1685 | |
1686 class DeferredStackCheck: public DeferredCode { | |
1687 public: | |
1688 explicit DeferredStackCheck(CodeGenerator* generator) | |
1689 : DeferredCode(generator) { | |
1690 set_comment("[ DeferredStackCheck"); | |
1691 } | |
1692 | |
1693 virtual void Generate(); | |
1694 }; | |
1695 | |
1696 | |
1697 void DeferredStackCheck::Generate() { | |
1698 enter()->Bind(); | |
1699 StackCheckStub stub; | |
1700 Result ignored = generator()->frame()->CallStub(&stub, 0); | |
1701 ignored.Unuse(); | |
1702 exit_.Jump(); | |
1703 } | |
1704 | |
1705 | |
1706 void CodeGenerator::CheckStack() { | |
1707 if (FLAG_check_stack) { | |
1708 DeferredStackCheck* deferred = new DeferredStackCheck(this); | |
1709 ExternalReference stack_guard_limit = | |
1710 ExternalReference::address_of_stack_guard_limit(); | |
1711 __ cmp(esp, Operand::StaticVariable(stack_guard_limit)); | |
1712 deferred->enter()->Branch(below, not_taken); | |
1713 deferred->BindExit(); | |
1714 } | |
1715 } | |
1716 | |
1717 | |
1718 void CodeGenerator::VisitAndSpill(Statement* statement) { | |
1719 ASSERT(in_spilled_code()); | |
1720 set_in_spilled_code(false); | |
1721 Visit(statement); | |
1722 if (frame_ != NULL) { | |
1723 frame_->SpillAll(); | |
1724 } | |
1725 set_in_spilled_code(true); | |
1726 } | |
1727 | |
1728 | |
1729 void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) { | |
1730 ASSERT(in_spilled_code()); | |
1731 set_in_spilled_code(false); | |
1732 VisitStatements(statements); | |
1733 if (frame_ != NULL) { | |
1734 frame_->SpillAll(); | |
1735 } | |
1736 set_in_spilled_code(true); | |
1737 } | |
1738 | |
1739 | |
1740 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) { | |
1741 ASSERT(!in_spilled_code()); | |
1742 for (int i = 0; has_valid_frame() && i < statements->length(); i++) { | |
1743 Visit(statements->at(i)); | |
1744 } | |
1745 } | |
1746 | |
1747 | |
1748 void CodeGenerator::VisitBlock(Block* node) { | |
1749 ASSERT(!in_spilled_code()); | |
1750 Comment cmnt(masm_, "[ Block"); | |
1751 CodeForStatementPosition(node); | |
1752 node->break_target()->Initialize(this); | |
1753 VisitStatements(node->statements()); | |
1754 if (node->break_target()->is_linked()) { | |
1755 node->break_target()->Bind(); | |
1756 } | |
1757 node->break_target()->Unuse(); | |
1758 } | |
1759 | |
1760 | |
1761 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { | |
1762 frame_->Push(pairs); | |
1763 | |
1764 // Duplicate the context register. | |
1765 Result context(esi, this); | |
1766 frame_->Push(&context); | |
1767 | |
1768 frame_->Push(Smi::FromInt(is_eval() ? 1 : 0)); | |
1769 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); | |
1770 // Return value is ignored. | |
1771 } | |
1772 | |
1773 | |
1774 void CodeGenerator::VisitDeclaration(Declaration* node) { | |
1775 Comment cmnt(masm_, "[ Declaration"); | |
1776 CodeForStatementPosition(node); | |
1777 Variable* var = node->proxy()->var(); | |
1778 ASSERT(var != NULL); // must have been resolved | |
1779 Slot* slot = var->slot(); | |
1780 | |
1781 // If it was not possible to allocate the variable at compile time, | |
1782 // we need to "declare" it at runtime to make sure it actually | |
1783 // exists in the local context. | |
1784 if (slot != NULL && slot->type() == Slot::LOOKUP) { | |
1785 // Variables with a "LOOKUP" slot were introduced as non-locals | |
1786 // during variable resolution and must have mode DYNAMIC. | |
1787 ASSERT(var->is_dynamic()); | |
1788 // For now, just do a runtime call. Duplicate the context register. | |
1789 Result context(esi, this); | |
1790 frame_->Push(&context); | |
1791 frame_->Push(var->name()); | |
1792 // Declaration nodes are always introduced in one of two modes. | |
1793 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST); | |
1794 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY; | |
1795 frame_->Push(Smi::FromInt(attr)); | |
1796 // Push initial value, if any. | |
1797 // Note: For variables we must not push an initial value (such as | |
1798 // 'undefined') because we may have a (legal) redeclaration and we | |
1799 // must not destroy the current value. | |
1800 if (node->mode() == Variable::CONST) { | |
1801 frame_->Push(Factory::the_hole_value()); | |
1802 } else if (node->fun() != NULL) { | |
1803 Load(node->fun()); | |
1804 } else { | |
1805 frame_->Push(Smi::FromInt(0)); // no initial value! | |
1806 } | |
1807 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4); | |
1808 // Ignore the return value (declarations are statements). | |
1809 return; | |
1810 } | |
1811 | |
1812 ASSERT(!var->is_global()); | |
1813 | |
1814 // If we have a function or a constant, we need to initialize the variable. | |
1815 Expression* val = NULL; | |
1816 if (node->mode() == Variable::CONST) { | |
1817 val = new Literal(Factory::the_hole_value()); | |
1818 } else { | |
1819 val = node->fun(); // NULL if we don't have a function | |
1820 } | |
1821 | |
1822 if (val != NULL) { | |
1823 { | |
1824 // Set the initial value. | |
1825 Reference target(this, node->proxy()); | |
1826 Load(val); | |
1827 target.SetValue(NOT_CONST_INIT); | |
1828 // The reference is removed from the stack (preserving TOS) when | |
1829 // it goes out of scope. | |
1830 } | |
1831 // Get rid of the assigned value (declarations are statements). | |
1832 frame_->Drop(); | |
1833 } | |
1834 } | |
1835 | |
1836 | |
1837 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { | |
1838 ASSERT(!in_spilled_code()); | |
1839 Comment cmnt(masm_, "[ ExpressionStatement"); | |
1840 CodeForStatementPosition(node); | |
1841 Expression* expression = node->expression(); | |
1842 expression->MarkAsStatement(); | |
1843 Load(expression); | |
1844 // Remove the lingering expression result from the top of stack. | |
1845 frame_->Drop(); | |
1846 } | |
1847 | |
1848 | |
1849 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) { | |
1850 ASSERT(!in_spilled_code()); | |
1851 Comment cmnt(masm_, "// EmptyStatement"); | |
1852 CodeForStatementPosition(node); | |
1853 // nothing to do | |
1854 } | |
1855 | |
1856 | |
1857 void CodeGenerator::VisitIfStatement(IfStatement* node) { | |
1858 ASSERT(!in_spilled_code()); | |
1859 Comment cmnt(masm_, "[ IfStatement"); | |
1860 // Generate different code depending on which parts of the if statement | |
1861 // are present or not. | |
1862 bool has_then_stm = node->HasThenStatement(); | |
1863 bool has_else_stm = node->HasElseStatement(); | |
1864 | |
1865 CodeForStatementPosition(node); | |
1866 JumpTarget exit(this); | |
1867 if (has_then_stm && has_else_stm) { | |
1868 JumpTarget then(this); | |
1869 JumpTarget else_(this); | |
1870 ControlDestination dest(&then, &else_, true); | |
1871 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); | |
1872 | |
1873 if (dest.false_was_fall_through()) { | |
1874 // The else target was bound, so we compile the else part first. | |
1875 Visit(node->else_statement()); | |
1876 | |
1877 // We may have dangling jumps to the then part. | |
1878 if (then.is_linked()) { | |
1879 if (has_valid_frame()) exit.Jump(); | |
1880 then.Bind(); | |
1881 Visit(node->then_statement()); | |
1882 } | |
1883 } else { | |
1884 // The then target was bound, so we compile the then part first. | |
1885 Visit(node->then_statement()); | |
1886 | |
1887 if (else_.is_linked()) { | |
1888 if (has_valid_frame()) exit.Jump(); | |
1889 else_.Bind(); | |
1890 Visit(node->else_statement()); | |
1891 } | |
1892 } | |
1893 | |
1894 } else if (has_then_stm) { | |
1895 ASSERT(!has_else_stm); | |
1896 JumpTarget then(this); | |
1897 ControlDestination dest(&then, &exit, true); | |
1898 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); | |
1899 | |
1900 if (dest.false_was_fall_through()) { | |
1901 // The exit label was bound. We may have dangling jumps to the | |
1902 // then part. | |
1903 if (then.is_linked()) { | |
1904 exit.Unuse(); | |
1905 exit.Jump(); | |
1906 then.Bind(); | |
1907 Visit(node->then_statement()); | |
1908 } | |
1909 } else { | |
1910 // The then label was bound. | |
1911 Visit(node->then_statement()); | |
1912 } | |
1913 | |
1914 } else if (has_else_stm) { | |
1915 ASSERT(!has_then_stm); | |
1916 JumpTarget else_(this); | |
1917 ControlDestination dest(&exit, &else_, false); | |
1918 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); | |
1919 | |
1920 if (dest.true_was_fall_through()) { | |
1921 // The exit label was bound. We may have dangling jumps to the | |
1922 // else part. | |
1923 if (else_.is_linked()) { | |
1924 exit.Unuse(); | |
1925 exit.Jump(); | |
1926 else_.Bind(); | |
1927 Visit(node->else_statement()); | |
1928 } | |
1929 } else { | |
1930 // The else label was bound. | |
1931 Visit(node->else_statement()); | |
1932 } | |
1933 | |
1934 } else { | |
1935 ASSERT(!has_then_stm && !has_else_stm); | |
1936 // We only care about the condition's side effects (not its value | |
1937 // or control flow effect). LoadCondition is called without | |
1938 // forcing control flow. | |
1939 ControlDestination dest(&exit, &exit, true); | |
1940 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false); | |
1941 if (!dest.is_used()) { | |
1942 // We got a value on the frame rather than (or in addition to) | |
1943 // control flow. | |
1944 frame_->Drop(); | |
1945 } | |
1946 } | |
1947 | |
1948 if (exit.is_linked()) { | |
1949 exit.Bind(); | |
1950 } | |
1951 } | |
1952 | |
1953 | |
1954 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { | |
1955 ASSERT(!in_spilled_code()); | |
1956 Comment cmnt(masm_, "[ ContinueStatement"); | |
1957 CodeForStatementPosition(node); | |
1958 node->target()->continue_target()->Jump(); | |
1959 } | |
1960 | |
1961 | |
1962 void CodeGenerator::VisitBreakStatement(BreakStatement* node) { | |
1963 ASSERT(!in_spilled_code()); | |
1964 Comment cmnt(masm_, "[ BreakStatement"); | |
1965 CodeForStatementPosition(node); | |
1966 node->target()->break_target()->Jump(); | |
1967 } | |
1968 | |
1969 | |
1970 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { | |
1971 ASSERT(!in_spilled_code()); | |
1972 Comment cmnt(masm_, "[ ReturnStatement"); | |
1973 | |
1974 CodeForStatementPosition(node); | |
1975 Load(node->expression()); | |
1976 Result return_value = frame_->Pop(); | |
1977 if (function_return_is_shadowed_) { | |
1978 function_return_.Jump(&return_value); | |
1979 } else { | |
1980 frame_->PrepareForReturn(); | |
1981 if (function_return_.is_bound()) { | |
1982 // If the function return label is already bound we reuse the | |
1983 // code by jumping to the return site. | |
1984 function_return_.Jump(&return_value); | |
1985 } else { | |
1986 // Though this is a (possibly) backward block, the frames can | |
1987 // only differ on their top element. | |
1988 function_return_.Bind(&return_value, 1); | |
1989 GenerateReturnSequence(&return_value); | |
1990 } | |
1991 } | |
1992 } | |
1993 | |
1994 | |
1995 void CodeGenerator::GenerateReturnSequence(Result* return_value) { | |
1996 // The return value is a live (but not currently reference counted) | |
1997 // reference to eax. This is safe because the current frame does not | |
1998 // contain a reference to eax (it is prepared for the return by spilling | |
1999 // all registers). | |
2000 if (FLAG_trace) { | |
2001 frame_->Push(return_value); | |
2002 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1); | |
2003 } | |
2004 return_value->ToRegister(eax); | |
2005 | |
2006 // Add a label for checking the size of the code used for returning. | |
2007 Label check_exit_codesize; | |
2008 masm_->bind(&check_exit_codesize); | |
2009 | |
2010 // Leave the frame and return popping the arguments and the | |
2011 // receiver. | |
2012 frame_->Exit(); | |
2013 masm_->ret((scope_->num_parameters() + 1) * kPointerSize); | |
2014 DeleteFrame(); | |
2015 | |
2016 // Check that the size of the code used for returning matches what is | |
2017 // expected by the debugger. | |
2018 ASSERT_EQ(Debug::kIa32JSReturnSequenceLength, | |
2019 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); | |
2020 } | |
2021 | |
2022 | |
2023 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { | |
2024 ASSERT(!in_spilled_code()); | |
2025 Comment cmnt(masm_, "[ WithEnterStatement"); | |
2026 CodeForStatementPosition(node); | |
2027 Load(node->expression()); | |
2028 Result context(this); | |
2029 if (node->is_catch_block()) { | |
2030 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1); | |
2031 } else { | |
2032 context = frame_->CallRuntime(Runtime::kPushContext, 1); | |
2033 } | |
2034 | |
2035 // Update context local. | |
2036 frame_->SaveContextRegister(); | |
2037 | |
2038 // Verify that the runtime call result and esi agree. | |
2039 if (FLAG_debug_code) { | |
2040 __ cmp(context.reg(), Operand(esi)); | |
2041 __ Assert(equal, "Runtime::NewContext should end up in esi"); | |
2042 } | |
2043 } | |
2044 | |
2045 | |
2046 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) { | |
2047 ASSERT(!in_spilled_code()); | |
2048 Comment cmnt(masm_, "[ WithExitStatement"); | |
2049 CodeForStatementPosition(node); | |
2050 // Pop context. | |
2051 __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX)); | |
2052 // Update context local. | |
2053 frame_->SaveContextRegister(); | |
2054 } | |
2055 | |
2056 | |
2057 int CodeGenerator::FastCaseSwitchMaxOverheadFactor() { | |
2058 return kFastSwitchMaxOverheadFactor; | |
2059 } | |
2060 | |
2061 | |
2062 int CodeGenerator::FastCaseSwitchMinCaseCount() { | |
2063 return kFastSwitchMinCaseCount; | |
2064 } | |
2065 | |
2066 | |
2067 // Generate a computed jump to a switch case. | |
2068 void CodeGenerator::GenerateFastCaseSwitchJumpTable( | |
2069 SwitchStatement* node, | |
2070 int min_index, | |
2071 int range, | |
2072 Label* default_label, | |
2073 Vector<Label*> case_targets, | |
2074 Vector<Label> case_labels) { | |
2075 // Notice: Internal references, used by both the jmp instruction and | |
2076 // the table entries, need to be relocated if the buffer grows. This | |
2077 // prevents the forward use of Labels, since a displacement cannot | |
2078 // survive relocation, and it also cannot safely be distinguished | |
2079 // from a real address. Instead we put in zero-values as | |
2080 // placeholders, and fill in the addresses after the labels have been | |
2081 // bound. | |
2082 | |
2083 JumpTarget setup_default(this); | |
2084 JumpTarget is_smi(this); | |
2085 | |
2086 // A non-null default label pointer indicates a default case among | |
2087 // the case labels. Otherwise we use the break target as a | |
2088 // "default". | |
2089 JumpTarget* default_target = | |
2090 (default_label == NULL) ? node->break_target() : &setup_default; | |
2091 | |
2092 // Test whether input is a smi. | |
2093 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | |
2094 Result switch_value = frame_->Pop(); | |
2095 switch_value.ToRegister(); | |
2096 __ test(switch_value.reg(), Immediate(kSmiTagMask)); | |
2097 is_smi.Branch(equal, &switch_value, taken); | |
2098 | |
2099 // It's a heap object, not a smi or a failure. Check if it is a | |
2100 // heap number. | |
2101 Result temp = allocator()->Allocate(); | |
2102 ASSERT(temp.is_valid()); | |
2103 __ CmpObjectType(switch_value.reg(), HEAP_NUMBER_TYPE, temp.reg()); | |
2104 temp.Unuse(); | |
2105 default_target->Branch(not_equal); | |
2106 | |
2107 // The switch value is a heap number. Convert it to a smi. | |
2108 frame_->Push(&switch_value); | |
2109 Result smi_value = frame_->CallRuntime(Runtime::kNumberToSmi, 1); | |
2110 | |
2111 is_smi.Bind(&smi_value); | |
2112 smi_value.ToRegister(); | |
2113 // Convert the switch value to a 0-based table index. | |
2114 if (min_index != 0) { | |
2115 frame_->Spill(smi_value.reg()); | |
2116 __ sub(Operand(smi_value.reg()), Immediate(min_index << kSmiTagSize)); | |
2117 } | |
2118 // Go to the default case if the table index is negative or not a smi. | |
2119 __ test(smi_value.reg(), Immediate(0x80000000 | kSmiTagMask)); | |
2120 default_target->Branch(not_equal, not_taken); | |
2121 __ cmp(smi_value.reg(), range << kSmiTagSize); | |
2122 default_target->Branch(greater_equal, not_taken); | |
2123 | |
2124 // The expected frame at all the case labels is a version of the | |
2125 // current one (the bidirectional entry frame, which an arbitrary | |
2126 // frame of the correct height can be merged to). Keep a copy to | |
2127 // restore at the start of every label. Create a jump target and | |
2128 // bind it to set its entry frame properly. | |
2129 JumpTarget entry_target(this, JumpTarget::BIDIRECTIONAL); | |
2130 entry_target.Bind(&smi_value); | |
2131 VirtualFrame* start_frame = new VirtualFrame(frame_); | |
2132 | |
2133 // 0 is placeholder. | |
2134 // Jump to the address at table_address + 2 * smi_value.reg(). | |
2135 // The target of the jump is read from table_address + 4 * switch_value. | |
2136 // The Smi encoding of smi_value.reg() is 2 * switch_value. | |
2137 smi_value.ToRegister(); | |
2138 __ jmp(Operand(smi_value.reg(), smi_value.reg(), | |
2139 times_1, 0x0, RelocInfo::INTERNAL_REFERENCE)); | |
2140 smi_value.Unuse(); | |
2141 // Calculate address to overwrite later with actual address of table. | |
2142 int32_t jump_table_ref = masm_->pc_offset() - sizeof(int32_t); | |
2143 __ Align(4); | |
2144 Label table_start; | |
2145 __ bind(&table_start); | |
2146 __ WriteInternalReference(jump_table_ref, table_start); | |
2147 | |
2148 for (int i = 0; i < range; i++) { | |
2149 // These are the table entries. 0x0 is the placeholder for case address. | |
2150 __ dd(0x0, RelocInfo::INTERNAL_REFERENCE); | |
2151 } | |
2152 | |
2153 GenerateFastCaseSwitchCases(node, case_labels, start_frame); | |
2154 | |
2155 // If there was a default case, we need to emit the code to match it. | |
2156 if (default_label != NULL) { | |
2157 if (has_valid_frame()) { | |
2158 node->break_target()->Jump(); | |
2159 } | |
2160 setup_default.Bind(); | |
2161 frame_->MergeTo(start_frame); | |
2162 __ jmp(default_label); | |
2163 DeleteFrame(); | |
2164 } | |
2165 if (node->break_target()->is_linked()) { | |
2166 node->break_target()->Bind(); | |
2167 } | |
2168 | |
2169 for (int i = 0, entry_pos = table_start.pos(); | |
2170 i < range; | |
2171 i++, entry_pos += sizeof(uint32_t)) { | |
2172 if (case_targets[i] == NULL) { | |
2173 __ WriteInternalReference(entry_pos, | |
2174 *node->break_target()->entry_label()); | |
2175 } else { | |
2176 __ WriteInternalReference(entry_pos, *case_targets[i]); | |
2177 } | |
2178 } | |
2179 | |
2180 delete start_frame; | |
2181 } | |
2182 | |
2183 | |
2184 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { | |
2185 ASSERT(!in_spilled_code()); | |
2186 Comment cmnt(masm_, "[ SwitchStatement"); | |
2187 CodeForStatementPosition(node); | |
2188 node->break_target()->Initialize(this); | |
2189 | |
2190 // Compile the switch value. | |
2191 Load(node->tag()); | |
2192 | |
2193 if (TryGenerateFastCaseSwitchStatement(node)) { | |
2194 return; | |
2195 } | |
2196 | |
2197 ZoneList<CaseClause*>* cases = node->cases(); | |
2198 int length = cases->length(); | |
2199 CaseClause* default_clause = NULL; | |
2200 | |
2201 JumpTarget next_test(this); | |
2202 // Compile the case label expressions and comparisons. Exit early | |
2203 // if a comparison is unconditionally true. The target next_test is | |
2204 // bound before the loop in order to indicate control flow to the | |
2205 // first comparison. | |
2206 next_test.Bind(); | |
2207 for (int i = 0; i < length && !next_test.is_unused(); i++) { | |
2208 CaseClause* clause = cases->at(i); | |
2209 clause->body_target()->Initialize(this); | |
2210 // The default is not a test, but remember it for later. | |
2211 if (clause->is_default()) { | |
2212 default_clause = clause; | |
2213 continue; | |
2214 } | |
2215 | |
2216 Comment cmnt(masm_, "[ Case comparison"); | |
2217 // We recycle the same target next_test for each test. Bind it if | |
2218 // the previous test has not done so and then unuse it for the | |
2219 // loop. | |
2220 if (next_test.is_linked()) { | |
2221 next_test.Bind(); | |
2222 } | |
2223 next_test.Unuse(); | |
2224 | |
2225 // Duplicate the switch value. | |
2226 frame_->Dup(); | |
2227 | |
2228 // Compile the label expression. | |
2229 Load(clause->label()); | |
2230 | |
2231 // Compare and branch to the body if true or the next test if | |
2232 // false. Prefer the next test as a fall through. | |
2233 ControlDestination dest(clause->body_target(), &next_test, false); | |
2234 Comparison(equal, true, &dest); | |
2235 | |
2236 // If the comparison fell through to the true target, jump to the | |
2237 // actual body. | |
2238 if (dest.true_was_fall_through()) { | |
2239 clause->body_target()->Unuse(); | |
2240 clause->body_target()->Jump(); | |
2241 } | |
2242 } | |
2243 | |
2244 // If there was control flow to a next test from the last one | |
2245 // compiled, compile a jump to the default or break target. | |
2246 if (!next_test.is_unused()) { | |
2247 if (next_test.is_linked()) { | |
2248 next_test.Bind(); | |
2249 } | |
2250 // Drop the switch value. | |
2251 frame_->Drop(); | |
2252 if (default_clause != NULL) { | |
2253 default_clause->body_target()->Jump(); | |
2254 } else { | |
2255 node->break_target()->Jump(); | |
2256 } | |
2257 } | |
2258 | |
2259 | |
2260 // The last instruction emitted was a jump, either to the default | |
2261 // clause or the break target, or else to a case body from the loop | |
2262 // that compiles the tests. | |
2263 ASSERT(!has_valid_frame()); | |
2264 // Compile case bodies as needed. | |
2265 for (int i = 0; i < length; i++) { | |
2266 CaseClause* clause = cases->at(i); | |
2267 | |
2268 // There are two ways to reach the body: from the corresponding | |
2269 // test or as the fall through of the previous body. | |
2270 if (clause->body_target()->is_linked() || has_valid_frame()) { | |
2271 if (clause->body_target()->is_linked()) { | |
2272 if (has_valid_frame()) { | |
2273 // If we have both a jump to the test and a fall through, put | |
2274 // a jump on the fall through path to avoid the dropping of | |
2275 // the switch value on the test path. The exception is the | |
2276 // default which has already had the switch value dropped. | |
2277 if (clause->is_default()) { | |
2278 clause->body_target()->Bind(); | |
2279 } else { | |
2280 JumpTarget body(this); | |
2281 body.Jump(); | |
2282 clause->body_target()->Bind(); | |
2283 frame_->Drop(); | |
2284 body.Bind(); | |
2285 } | |
2286 } else { | |
2287 // No fall through to worry about. | |
2288 clause->body_target()->Bind(); | |
2289 if (!clause->is_default()) { | |
2290 frame_->Drop(); | |
2291 } | |
2292 } | |
2293 } else { | |
2294 // Otherwise, we have only fall through. | |
2295 ASSERT(has_valid_frame()); | |
2296 } | |
2297 | |
2298 // We are now prepared to compile the body. | |
2299 Comment cmnt(masm_, "[ Case body"); | |
2300 VisitStatements(clause->statements()); | |
2301 } | |
2302 clause->body_target()->Unuse(); | |
2303 } | |
2304 | |
2305 // We may not have a valid frame here so bind the break target only | |
2306 // if needed. | |
2307 if (node->break_target()->is_linked()) { | |
2308 node->break_target()->Bind(); | |
2309 } | |
2310 node->break_target()->Unuse(); | |
2311 } | |
2312 | |
2313 | |
2314 void CodeGenerator::VisitLoopStatement(LoopStatement* node) { | |
2315 ASSERT(!in_spilled_code()); | |
2316 Comment cmnt(masm_, "[ LoopStatement"); | |
2317 CodeForStatementPosition(node); | |
2318 node->break_target()->Initialize(this); | |
2319 | |
2320 // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a | |
2321 // known result for the test expression, with no side effects. | |
2322 enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW; | |
2323 if (node->cond() == NULL) { | |
2324 ASSERT(node->type() == LoopStatement::FOR_LOOP); | |
2325 info = ALWAYS_TRUE; | |
2326 } else { | |
2327 Literal* lit = node->cond()->AsLiteral(); | |
2328 if (lit != NULL) { | |
2329 if (lit->IsTrue()) { | |
2330 info = ALWAYS_TRUE; | |
2331 } else if (lit->IsFalse()) { | |
2332 info = ALWAYS_FALSE; | |
2333 } | |
2334 } | |
2335 } | |
2336 | |
2337 switch (node->type()) { | |
2338 case LoopStatement::DO_LOOP: { | |
2339 JumpTarget body(this, JumpTarget::BIDIRECTIONAL); | |
2340 IncrementLoopNesting(); | |
2341 | |
2342 // Label the top of the loop for the backward jump if necessary. | |
2343 if (info == ALWAYS_TRUE) { | |
2344 // Use the continue target. | |
2345 node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL); | |
2346 node->continue_target()->Bind(); | |
2347 } else if (info == ALWAYS_FALSE) { | |
2348 // No need to label it. | |
2349 node->continue_target()->Initialize(this); | |
2350 } else { | |
2351 // Continue is the test, so use the backward body target. | |
2352 ASSERT(info == DONT_KNOW); | |
2353 node->continue_target()->Initialize(this); | |
2354 body.Bind(); | |
2355 } | |
2356 | |
2357 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
2358 Visit(node->body()); | |
2359 | |
2360 // Compile the test. | |
2361 if (info == ALWAYS_TRUE) { | |
2362 // If control flow can fall off the end of the body, jump back | |
2363 // to the top and bind the break target at the exit. | |
2364 if (has_valid_frame()) { | |
2365 node->continue_target()->Jump(); | |
2366 } | |
2367 if (node->break_target()->is_linked()) { | |
2368 node->break_target()->Bind(); | |
2369 } | |
2370 | |
2371 } else if (info == ALWAYS_FALSE) { | |
2372 // We may have had continues or breaks in the body. | |
2373 if (node->continue_target()->is_linked()) { | |
2374 node->continue_target()->Bind(); | |
2375 } | |
2376 if (node->break_target()->is_linked()) { | |
2377 node->break_target()->Bind(); | |
2378 } | |
2379 | |
2380 } else { | |
2381 ASSERT(info == DONT_KNOW); | |
2382 // We have to compile the test expression if it can be reached by | |
2383 // control flow falling out of the body or via continue. | |
2384 if (node->continue_target()->is_linked()) { | |
2385 node->continue_target()->Bind(); | |
2386 } | |
2387 if (has_valid_frame()) { | |
2388 ControlDestination dest(&body, node->break_target(), false); | |
2389 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); | |
2390 } | |
2391 if (node->break_target()->is_linked()) { | |
2392 node->break_target()->Bind(); | |
2393 } | |
2394 } | |
2395 break; | |
2396 } | |
2397 | |
2398 case LoopStatement::WHILE_LOOP: { | |
2399 // Do not duplicate conditions that may have function literal | |
2400 // subexpressions. This can cause us to compile the function | |
2401 // literal twice. | |
2402 bool test_at_bottom = !node->may_have_function_literal(); | |
2403 | |
2404 IncrementLoopNesting(); | |
2405 | |
2406 // If the condition is always false and has no side effects, we | |
2407 // do not need to compile anything. | |
2408 if (info == ALWAYS_FALSE) break; | |
2409 | |
2410 JumpTarget body; | |
2411 if (test_at_bottom) { | |
2412 body.Initialize(this, JumpTarget::BIDIRECTIONAL); | |
2413 } else { | |
2414 body.Initialize(this); | |
2415 } | |
2416 | |
2417 // Based on the condition analysis, compile the test as necessary. | |
2418 if (info == ALWAYS_TRUE) { | |
2419 // We will not compile the test expression. Label the top of | |
2420 // the loop with the continue target. | |
2421 node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL); | |
2422 node->continue_target()->Bind(); | |
2423 } else { | |
2424 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here. | |
2425 if (test_at_bottom) { | |
2426 // Continue is the test at the bottom, no need to label the | |
2427 // test at the top. The body is a backward target. | |
2428 node->continue_target()->Initialize(this); | |
2429 } else { | |
2430 // Label the test at the top as the continue target. The | |
2431 // body is a forward-only target. | |
2432 node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL); | |
2433 node->continue_target()->Bind(); | |
2434 } | |
2435 // Compile the test with the body as the true target and | |
2436 // preferred fall-through and with the break target as the | |
2437 // false target. | |
2438 ControlDestination dest(&body, node->break_target(), true); | |
2439 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); | |
2440 | |
2441 if (dest.false_was_fall_through()) { | |
2442 // If we got the break target as fall-through, the test may | |
2443 // have been unconditionally false (if there are no jumps to | |
2444 // the body). | |
2445 if (!body.is_linked()) break; | |
2446 | |
2447 // Otherwise, jump around the body on the fall through and | |
2448 // then bind the body target. | |
2449 node->break_target()->Unuse(); | |
2450 node->break_target()->Jump(); | |
2451 body.Bind(); | |
2452 } | |
2453 } | |
2454 | |
2455 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
2456 Visit(node->body()); | |
2457 | |
2458 // Based on the condition analysis, compile the backward jump as | |
2459 // necessary. | |
2460 if (info == ALWAYS_TRUE) { | |
2461 // The loop body has been labeled with the continue target. | |
2462 if (has_valid_frame()) { | |
2463 node->continue_target()->Jump(); | |
2464 } | |
2465 } else { | |
2466 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here. | |
2467 if (test_at_bottom) { | |
2468 // If we have chosen to recompile the test at the bottom, | |
2469 // then it is the continue target. | |
2470 if (node->continue_target()->is_linked()) { | |
2471 node->continue_target()->Bind(); | |
2472 } | |
2473 if (has_valid_frame()) { | |
2474 // The break target is the fall-through (body is a backward | |
2475 // jump from here and thus an invalid fall-through). | |
2476 ControlDestination dest(&body, node->break_target(), false); | |
2477 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); | |
2478 } | |
2479 } else { | |
2480 // If we have chosen not to recompile the test at the | |
2481 // bottom, jump back to the one at the top. | |
2482 if (has_valid_frame()) { | |
2483 node->continue_target()->Jump(); | |
2484 } | |
2485 } | |
2486 } | |
2487 | |
2488 // The break target may be already bound (by the condition), or | |
2489 // there may not be a valid frame. Bind it only if needed. | |
2490 if (node->break_target()->is_linked()) { | |
2491 node->break_target()->Bind(); | |
2492 } | |
2493 break; | |
2494 } | |
2495 | |
2496 case LoopStatement::FOR_LOOP: { | |
2497 // Do not duplicate conditions that may have function literal | |
2498 // subexpressions. This can cause us to compile the function | |
2499 // literal twice. | |
2500 bool test_at_bottom = !node->may_have_function_literal(); | |
2501 | |
2502 // Compile the init expression if present. | |
2503 if (node->init() != NULL) { | |
2504 Visit(node->init()); | |
2505 } | |
2506 | |
2507 IncrementLoopNesting(); | |
2508 | |
2509 // If the condition is always false and has no side effects, we | |
2510 // do not need to compile anything else. | |
2511 if (info == ALWAYS_FALSE) break; | |
2512 | |
2513 // Target for backward edge if no test at the bottom, otherwise | |
2514 // unused. | |
2515 JumpTarget loop(this, JumpTarget::BIDIRECTIONAL); | |
2516 | |
2517 // Target for backward edge if there is a test at the bottom, | |
2518 // otherwise used as target for test at the top. | |
2519 JumpTarget body; | |
2520 if (test_at_bottom) { | |
2521 body.Initialize(this, JumpTarget::BIDIRECTIONAL); | |
2522 } else { | |
2523 body.Initialize(this); | |
2524 } | |
2525 | |
2526 // Based on the condition analysis, compile the test as necessary. | |
2527 if (info == ALWAYS_TRUE) { | |
2528 // We will not compile the test expression. Label the top of | |
2529 // the loop. | |
2530 if (node->next() == NULL) { | |
2531 // Use the continue target if there is no update expression. | |
2532 node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL); | |
2533 node->continue_target()->Bind(); | |
2534 } else { | |
2535 // Otherwise use the backward loop target. | |
2536 node->continue_target()->Initialize(this); | |
2537 loop.Bind(); | |
2538 } | |
2539 } else { | |
2540 ASSERT(info == DONT_KNOW); | |
2541 if (test_at_bottom) { | |
2542 // Continue is either the update expression or the test at | |
2543 // the bottom, no need to label the test at the top. | |
2544 node->continue_target()->Initialize(this); | |
2545 } else if (node->next() == NULL) { | |
2546 // We are not recompiling the test at the bottom and there | |
2547 // is no update expression. | |
2548 node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL); | |
2549 node->continue_target()->Bind(); | |
2550 } else { | |
2551 // We are not recompiling the test at the bottom and there | |
2552 // is an update expression. | |
2553 node->continue_target()->Initialize(this); | |
2554 loop.Bind(); | |
2555 } | |
2556 | |
2557 // Compile the test with the body as the true target and | |
2558 // preferred fall-through and with the break target as the | |
2559 // false target. | |
2560 ControlDestination dest(&body, node->break_target(), true); | |
2561 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); | |
2562 | |
2563 if (dest.false_was_fall_through()) { | |
2564 // If we got the break target as fall-through, the test may | |
2565 // have been unconditionally false (if there are no jumps to | |
2566 // the body). | |
2567 if (!body.is_linked()) break; | |
2568 | |
2569 // Otherwise, jump around the body on the fall through and | |
2570 // then bind the body target. | |
2571 node->break_target()->Unuse(); | |
2572 node->break_target()->Jump(); | |
2573 body.Bind(); | |
2574 } | |
2575 } | |
2576 | |
2577 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
2578 Visit(node->body()); | |
2579 | |
2580 // If there is an update expression, compile it if necessary. | |
2581 if (node->next() != NULL) { | |
2582 if (node->continue_target()->is_linked()) { | |
2583 node->continue_target()->Bind(); | |
2584 } | |
2585 | |
2586 // Control can reach the update by falling out of the body or | |
2587 // by a continue. | |
2588 if (has_valid_frame()) { | |
2589 // Record the source position of the statement as this code | |
2590 // which is after the code for the body actually belongs to | |
2591 // the loop statement and not the body. | |
2592 CodeForStatementPosition(node); | |
2593 Visit(node->next()); | |
2594 } | |
2595 } | |
2596 | |
2597 // Based on the condition analysis, compile the backward jump as | |
2598 // necessary. | |
2599 if (info == ALWAYS_TRUE) { | |
2600 if (has_valid_frame()) { | |
2601 if (node->next() == NULL) { | |
2602 node->continue_target()->Jump(); | |
2603 } else { | |
2604 loop.Jump(); | |
2605 } | |
2606 } | |
2607 } else { | |
2608 ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here. | |
2609 if (test_at_bottom) { | |
2610 if (node->continue_target()->is_linked()) { | |
2611 // We can have dangling jumps to the continue target if | |
2612 // there was no update expression. | |
2613 node->continue_target()->Bind(); | |
2614 } | |
2615 // Control can reach the test at the bottom by falling out | |
2616 // of the body, by a continue in the body, or from the | |
2617 // update expression. | |
2618 if (has_valid_frame()) { | |
2619 // The break target is the fall-through (body is a | |
2620 // backward jump from here). | |
2621 ControlDestination dest(&body, node->break_target(), false); | |
2622 LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); | |
2623 } | |
2624 } else { | |
2625 // Otherwise, jump back to the test at the top. | |
2626 if (has_valid_frame()) { | |
2627 if (node->next() == NULL) { | |
2628 node->continue_target()->Jump(); | |
2629 } else { | |
2630 loop.Jump(); | |
2631 } | |
2632 } | |
2633 } | |
2634 } | |
2635 | |
2636 // The break target may be already bound (by the condition), or | |
2637 // there may not be a valid frame. Bind it only if needed. | |
2638 if (node->break_target()->is_linked()) { | |
2639 node->break_target()->Bind(); | |
2640 } | |
2641 break; | |
2642 } | |
2643 } | |
2644 | |
2645 DecrementLoopNesting(); | |
2646 node->continue_target()->Unuse(); | |
2647 node->break_target()->Unuse(); | |
2648 } | |
2649 | |
2650 | |
2651 void CodeGenerator::VisitForInStatement(ForInStatement* node) { | |
2652 ASSERT(!in_spilled_code()); | |
2653 VirtualFrame::SpilledScope spilled_scope(this); | |
2654 Comment cmnt(masm_, "[ ForInStatement"); | |
2655 CodeForStatementPosition(node); | |
2656 | |
2657 JumpTarget primitive(this); | |
2658 JumpTarget jsobject(this); | |
2659 JumpTarget fixed_array(this); | |
2660 JumpTarget entry(this, JumpTarget::BIDIRECTIONAL); | |
2661 JumpTarget end_del_check(this); | |
2662 JumpTarget exit(this); | |
2663 | |
2664 // Get the object to enumerate over (converted to JSObject). | |
2665 LoadAndSpill(node->enumerable()); | |
2666 | |
2667 // Both SpiderMonkey and kjs ignore null and undefined in contrast | |
2668 // to the specification. 12.6.4 mandates a call to ToObject. | |
2669 frame_->EmitPop(eax); | |
2670 | |
2671 // eax: value to be iterated over | |
2672 __ cmp(eax, Factory::undefined_value()); | |
2673 exit.Branch(equal); | |
2674 __ cmp(eax, Factory::null_value()); | |
2675 exit.Branch(equal); | |
2676 | |
2677 // Stack layout in body: | |
2678 // [iteration counter (smi)] <- slot 0 | |
2679 // [length of array] <- slot 1 | |
2680 // [FixedArray] <- slot 2 | |
2681 // [Map or 0] <- slot 3 | |
2682 // [Object] <- slot 4 | |
2683 | |
2684 // Check if enumerable is already a JSObject | |
2685 // eax: value to be iterated over | |
2686 __ test(eax, Immediate(kSmiTagMask)); | |
2687 primitive.Branch(zero); | |
2688 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); | |
2689 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); | |
2690 __ cmp(ecx, FIRST_JS_OBJECT_TYPE); | |
2691 jsobject.Branch(above_equal); | |
2692 | |
2693 primitive.Bind(); | |
2694 frame_->EmitPush(eax); | |
2695 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1); | |
2696 // function call returns the value in eax, which is where we want it below | |
2697 | |
2698 jsobject.Bind(); | |
2699 // Get the set of properties (as a FixedArray or Map). | |
2700 // eax: value to be iterated over | |
2701 frame_->EmitPush(eax); // push the object being iterated over (slot 4) | |
2702 | |
2703 frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call | |
2704 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1); | |
2705 | |
2706 // If we got a Map, we can do a fast modification check. | |
2707 // Otherwise, we got a FixedArray, and we have to do a slow check. | |
2708 // eax: map or fixed array (result from call to | |
2709 // Runtime::kGetPropertyNamesFast) | |
2710 __ mov(edx, Operand(eax)); | |
2711 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); | |
2712 __ cmp(ecx, Factory::meta_map()); | |
2713 fixed_array.Branch(not_equal); | |
2714 | |
2715 // Get enum cache | |
2716 // eax: map (result from call to Runtime::kGetPropertyNamesFast) | |
2717 __ mov(ecx, Operand(eax)); | |
2718 __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset)); | |
2719 // Get the bridge array held in the enumeration index field. | |
2720 __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset)); | |
2721 // Get the cache from the bridge array. | |
2722 __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset)); | |
2723 | |
2724 frame_->EmitPush(eax); // <- slot 3 | |
2725 frame_->EmitPush(edx); // <- slot 2 | |
2726 __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset)); | |
2727 __ shl(eax, kSmiTagSize); | |
2728 frame_->EmitPush(eax); // <- slot 1 | |
2729 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0 | |
2730 entry.Jump(); | |
2731 | |
2732 fixed_array.Bind(); | |
2733 // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast) | |
2734 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3 | |
2735 frame_->EmitPush(eax); // <- slot 2 | |
2736 | |
2737 // Push the length of the array and the initial index onto the stack. | |
2738 __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset)); | |
2739 __ shl(eax, kSmiTagSize); | |
2740 frame_->EmitPush(eax); // <- slot 1 | |
2741 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0 | |
2742 | |
2743 // Condition. | |
2744 entry.Bind(); | |
2745 // Grab the current frame's height for the break and continue | |
2746 // targets only after all the state is pushed on the frame. | |
2747 node->break_target()->Initialize(this); | |
2748 node->continue_target()->Initialize(this); | |
2749 | |
2750 __ mov(eax, frame_->ElementAt(0)); // load the current count | |
2751 __ cmp(eax, frame_->ElementAt(1)); // compare to the array length | |
2752 node->break_target()->Branch(above_equal); | |
2753 | |
2754 // Get the i'th entry of the array. | |
2755 __ mov(edx, frame_->ElementAt(2)); | |
2756 __ mov(ebx, Operand(edx, eax, times_2, | |
2757 FixedArray::kHeaderSize - kHeapObjectTag)); | |
2758 | |
2759 // Get the expected map from the stack or a zero map in the | |
2760 // permanent slow case eax: current iteration count ebx: i'th entry | |
2761 // of the enum cache | |
2762 __ mov(edx, frame_->ElementAt(3)); | |
2763 // Check if the expected map still matches that of the enumerable. | |
2764 // If not, we have to filter the key. | |
2765 // eax: current iteration count | |
2766 // ebx: i'th entry of the enum cache | |
2767 // edx: expected map value | |
2768 __ mov(ecx, frame_->ElementAt(4)); | |
2769 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); | |
2770 __ cmp(ecx, Operand(edx)); | |
2771 end_del_check.Branch(equal); | |
2772 | |
2773 // Convert the entry to a string (or null if it isn't a property anymore). | |
2774 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable | |
2775 frame_->EmitPush(ebx); // push entry | |
2776 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2); | |
2777 __ mov(ebx, Operand(eax)); | |
2778 | |
2779 // If the property has been removed while iterating, we just skip it. | |
2780 __ cmp(ebx, Factory::null_value()); | |
2781 node->continue_target()->Branch(equal); | |
2782 | |
2783 end_del_check.Bind(); | |
2784 // Store the entry in the 'each' expression and take another spin in the | |
2785 // loop. edx: i'th entry of the enum cache (or string there of) | |
2786 frame_->EmitPush(ebx); | |
2787 { Reference each(this, node->each()); | |
2788 // Loading a reference may leave the frame in an unspilled state. | |
2789 frame_->SpillAll(); | |
2790 if (!each.is_illegal()) { | |
2791 if (each.size() > 0) { | |
2792 frame_->EmitPush(frame_->ElementAt(each.size())); | |
2793 } | |
2794 // If the reference was to a slot we rely on the convenient property | |
2795 // that it doesn't matter whether a value (eg, ebx pushed above) is | |
2796 // right on top of or right underneath a zero-sized reference. | |
2797 each.SetValue(NOT_CONST_INIT); | |
2798 if (each.size() > 0) { | |
2799 // It's safe to pop the value lying on top of the reference before | |
2800 // unloading the reference itself (which preserves the top of stack, | |
2801 // ie, now the topmost value of the non-zero sized reference), since | |
2802 // we will discard the top of stack after unloading the reference | |
2803 // anyway. | |
2804 frame_->Drop(); | |
2805 } | |
2806 } | |
2807 } | |
2808 // Unloading a reference may leave the frame in an unspilled state. | |
2809 frame_->SpillAll(); | |
2810 | |
2811 // Discard the i'th entry pushed above or else the remainder of the | |
2812 // reference, whichever is currently on top of the stack. | |
2813 frame_->Drop(); | |
2814 | |
2815 // Body. | |
2816 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
2817 VisitAndSpill(node->body()); | |
2818 | |
2819 // Next. Reestablish a spilled frame in case we are coming here via | |
2820 // a continue in the body. | |
2821 node->continue_target()->Bind(); | |
2822 frame_->SpillAll(); | |
2823 frame_->EmitPop(eax); | |
2824 __ add(Operand(eax), Immediate(Smi::FromInt(1))); | |
2825 frame_->EmitPush(eax); | |
2826 entry.Jump(); | |
2827 | |
2828 // Cleanup. No need to spill because VirtualFrame::Drop is safe for | |
2829 // any frame. | |
2830 node->break_target()->Bind(); | |
2831 frame_->Drop(5); | |
2832 | |
2833 // Exit. | |
2834 exit.Bind(); | |
2835 | |
2836 node->continue_target()->Unuse(); | |
2837 node->break_target()->Unuse(); | |
2838 } | |
2839 | |
2840 | |
2841 void CodeGenerator::VisitTryCatch(TryCatch* node) { | |
2842 ASSERT(!in_spilled_code()); | |
2843 VirtualFrame::SpilledScope spilled_scope(this); | |
2844 Comment cmnt(masm_, "[ TryCatch"); | |
2845 CodeForStatementPosition(node); | |
2846 | |
2847 JumpTarget try_block(this); | |
2848 JumpTarget exit(this); | |
2849 | |
2850 try_block.Call(); | |
2851 // --- Catch block --- | |
2852 frame_->EmitPush(eax); | |
2853 | |
2854 // Store the caught exception in the catch variable. | |
2855 { Reference ref(this, node->catch_var()); | |
2856 ASSERT(ref.is_slot()); | |
2857 // Load the exception to the top of the stack. Here we make use of the | |
2858 // convenient property that it doesn't matter whether a value is | |
2859 // immediately on top of or underneath a zero-sized reference. | |
2860 ref.SetValue(NOT_CONST_INIT); | |
2861 } | |
2862 | |
2863 // Remove the exception from the stack. | |
2864 frame_->Drop(); | |
2865 | |
2866 VisitStatementsAndSpill(node->catch_block()->statements()); | |
2867 if (has_valid_frame()) { | |
2868 exit.Jump(); | |
2869 } | |
2870 | |
2871 | |
2872 // --- Try block --- | |
2873 try_block.Bind(); | |
2874 | |
2875 frame_->PushTryHandler(TRY_CATCH_HANDLER); | |
2876 int handler_height = frame_->height(); | |
2877 | |
2878 // Shadow the jump targets for all escapes from the try block, including | |
2879 // returns. During shadowing, the original target is hidden as the | |
2880 // ShadowTarget and operations on the original actually affect the | |
2881 // shadowing target. | |
2882 // | |
2883 // We should probably try to unify the escaping targets and the return | |
2884 // target. | |
2885 int nof_escapes = node->escaping_targets()->length(); | |
2886 List<ShadowTarget*> shadows(1 + nof_escapes); | |
2887 | |
2888 // Add the shadow target for the function return. | |
2889 static const int kReturnShadowIndex = 0; | |
2890 shadows.Add(new ShadowTarget(&function_return_)); | |
2891 bool function_return_was_shadowed = function_return_is_shadowed_; | |
2892 function_return_is_shadowed_ = true; | |
2893 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_); | |
2894 | |
2895 // Add the remaining shadow targets. | |
2896 for (int i = 0; i < nof_escapes; i++) { | |
2897 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i))); | |
2898 } | |
2899 | |
2900 // Generate code for the statements in the try block. | |
2901 VisitStatementsAndSpill(node->try_block()->statements()); | |
2902 | |
2903 // Stop the introduced shadowing and count the number of required unlinks. | |
2904 // After shadowing stops, the original targets are unshadowed and the | |
2905 // ShadowTargets represent the formerly shadowing targets. | |
2906 bool has_unlinks = false; | |
2907 for (int i = 0; i < shadows.length(); i++) { | |
2908 shadows[i]->StopShadowing(); | |
2909 has_unlinks = has_unlinks || shadows[i]->is_linked(); | |
2910 } | |
2911 function_return_is_shadowed_ = function_return_was_shadowed; | |
2912 | |
2913 // Get an external reference to the handler address. | |
2914 ExternalReference handler_address(Top::k_handler_address); | |
2915 | |
2916 // Make sure that there's nothing left on the stack above the | |
2917 // handler structure. | |
2918 if (FLAG_debug_code) { | |
2919 __ mov(eax, Operand::StaticVariable(handler_address)); | |
2920 __ lea(eax, Operand(eax, StackHandlerConstants::kAddressDisplacement)); | |
2921 __ cmp(esp, Operand(eax)); | |
2922 __ Assert(equal, "stack pointer should point to top handler"); | |
2923 } | |
2924 | |
2925 // If we can fall off the end of the try block, unlink from try chain. | |
2926 if (has_valid_frame()) { | |
2927 // The next handler address is on top of the frame. Unlink from | |
2928 // the handler list and drop the rest of this handler from the | |
2929 // frame. | |
2930 frame_->EmitPop(Operand::StaticVariable(handler_address)); | |
2931 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
2932 if (has_unlinks) { | |
2933 exit.Jump(); | |
2934 } | |
2935 } | |
2936 | |
2937 // Generate unlink code for the (formerly) shadowing targets that | |
2938 // have been jumped to. Deallocate each shadow target. | |
2939 Result return_value(this); | |
2940 for (int i = 0; i < shadows.length(); i++) { | |
2941 if (shadows[i]->is_linked()) { | |
2942 // Unlink from try chain; be careful not to destroy the TOS if | |
2943 // there is one. | |
2944 if (i == kReturnShadowIndex) { | |
2945 shadows[i]->Bind(&return_value); | |
2946 return_value.ToRegister(eax); | |
2947 } else { | |
2948 shadows[i]->Bind(); | |
2949 } | |
2950 // Because we can be jumping here (to spilled code) from | |
2951 // unspilled code, we need to reestablish a spilled frame at | |
2952 // this block. | |
2953 frame_->SpillAll(); | |
2954 | |
2955 // Reload sp from the top handler, because some statements that we | |
2956 // break from (eg, for...in) may have left stuff on the stack. | |
2957 __ mov(edx, Operand::StaticVariable(handler_address)); | |
2958 const int kNextOffset = StackHandlerConstants::kNextOffset + | |
2959 StackHandlerConstants::kAddressDisplacement; | |
2960 __ lea(esp, Operand(edx, kNextOffset)); | |
2961 frame_->Forget(frame_->height() - handler_height); | |
2962 | |
2963 frame_->EmitPop(Operand::StaticVariable(handler_address)); | |
2964 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
2965 // next_sp popped. | |
2966 | |
2967 if (i == kReturnShadowIndex) { | |
2968 if (!function_return_is_shadowed_) frame_->PrepareForReturn(); | |
2969 shadows[i]->other_target()->Jump(&return_value); | |
2970 } else { | |
2971 shadows[i]->other_target()->Jump(); | |
2972 } | |
2973 } | |
2974 delete shadows[i]; | |
2975 } | |
2976 | |
2977 exit.Bind(); | |
2978 } | |
2979 | |
2980 | |
2981 void CodeGenerator::VisitTryFinally(TryFinally* node) { | |
2982 ASSERT(!in_spilled_code()); | |
2983 VirtualFrame::SpilledScope spilled_scope(this); | |
2984 Comment cmnt(masm_, "[ TryFinally"); | |
2985 CodeForStatementPosition(node); | |
2986 | |
2987 // State: Used to keep track of reason for entering the finally | |
2988 // block. Should probably be extended to hold information for | |
2989 // break/continue from within the try block. | |
2990 enum { FALLING, THROWING, JUMPING }; | |
2991 | |
2992 JumpTarget try_block(this); | |
2993 JumpTarget finally_block(this); | |
2994 | |
2995 try_block.Call(); | |
2996 | |
2997 frame_->EmitPush(eax); | |
2998 // In case of thrown exceptions, this is where we continue. | |
2999 __ Set(ecx, Immediate(Smi::FromInt(THROWING))); | |
3000 finally_block.Jump(); | |
3001 | |
3002 // --- Try block --- | |
3003 try_block.Bind(); | |
3004 | |
3005 frame_->PushTryHandler(TRY_FINALLY_HANDLER); | |
3006 int handler_height = frame_->height(); | |
3007 | |
3008 // Shadow the jump targets for all escapes from the try block, including | |
3009 // returns. During shadowing, the original target is hidden as the | |
3010 // ShadowTarget and operations on the original actually affect the | |
3011 // shadowing target. | |
3012 // | |
3013 // We should probably try to unify the escaping targets and the return | |
3014 // target. | |
3015 int nof_escapes = node->escaping_targets()->length(); | |
3016 List<ShadowTarget*> shadows(1 + nof_escapes); | |
3017 | |
3018 // Add the shadow target for the function return. | |
3019 static const int kReturnShadowIndex = 0; | |
3020 shadows.Add(new ShadowTarget(&function_return_)); | |
3021 bool function_return_was_shadowed = function_return_is_shadowed_; | |
3022 function_return_is_shadowed_ = true; | |
3023 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_); | |
3024 | |
3025 // Add the remaining shadow targets. | |
3026 for (int i = 0; i < nof_escapes; i++) { | |
3027 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i))); | |
3028 } | |
3029 | |
3030 // Generate code for the statements in the try block. | |
3031 VisitStatementsAndSpill(node->try_block()->statements()); | |
3032 | |
3033 // Stop the introduced shadowing and count the number of required unlinks. | |
3034 // After shadowing stops, the original targets are unshadowed and the | |
3035 // ShadowTargets represent the formerly shadowing targets. | |
3036 int nof_unlinks = 0; | |
3037 for (int i = 0; i < shadows.length(); i++) { | |
3038 shadows[i]->StopShadowing(); | |
3039 if (shadows[i]->is_linked()) nof_unlinks++; | |
3040 } | |
3041 function_return_is_shadowed_ = function_return_was_shadowed; | |
3042 | |
3043 // Get an external reference to the handler address. | |
3044 ExternalReference handler_address(Top::k_handler_address); | |
3045 | |
3046 // If we can fall off the end of the try block, unlink from the try | |
3047 // chain and set the state on the frame to FALLING. | |
3048 if (has_valid_frame()) { | |
3049 // The next handler address is on top of the frame. | |
3050 ASSERT(StackHandlerConstants::kNextOffset == 0); | |
3051 frame_->EmitPop(eax); | |
3052 __ mov(Operand::StaticVariable(handler_address), eax); | |
3053 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
3054 | |
3055 // Fake a top of stack value (unneeded when FALLING) and set the | |
3056 // state in ecx, then jump around the unlink blocks if any. | |
3057 frame_->EmitPush(Immediate(Factory::undefined_value())); | |
3058 __ Set(ecx, Immediate(Smi::FromInt(FALLING))); | |
3059 if (nof_unlinks > 0) { | |
3060 finally_block.Jump(); | |
3061 } | |
3062 } | |
3063 | |
3064 // Generate code to unlink and set the state for the (formerly) | |
3065 // shadowing targets that have been jumped to. | |
3066 for (int i = 0; i < shadows.length(); i++) { | |
3067 if (shadows[i]->is_linked()) { | |
3068 // If we have come from the shadowed return, the return value is | |
3069 // on the virtual frame. We must preserve it until it is | |
3070 // pushed. | |
3071 if (i == kReturnShadowIndex) { | |
3072 Result return_value(this); | |
3073 shadows[i]->Bind(&return_value); | |
3074 return_value.ToRegister(eax); | |
3075 } else { | |
3076 shadows[i]->Bind(); | |
3077 } | |
3078 // Because we can be jumping here (to spilled code) from | |
3079 // unspilled code, we need to reestablish a spilled frame at | |
3080 // this block. | |
3081 frame_->SpillAll(); | |
3082 | |
3083 // Reload sp from the top handler, because some statements that | |
3084 // we break from (eg, for...in) may have left stuff on the | |
3085 // stack. | |
3086 __ mov(edx, Operand::StaticVariable(handler_address)); | |
3087 const int kNextOffset = StackHandlerConstants::kNextOffset + | |
3088 StackHandlerConstants::kAddressDisplacement; | |
3089 __ lea(esp, Operand(edx, kNextOffset)); | |
3090 frame_->Forget(frame_->height() - handler_height); | |
3091 | |
3092 // Unlink this handler and drop it from the frame. | |
3093 frame_->EmitPop(Operand::StaticVariable(handler_address)); | |
3094 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
3095 | |
3096 if (i == kReturnShadowIndex) { | |
3097 // If this target shadowed the function return, materialize | |
3098 // the return value on the stack. | |
3099 frame_->EmitPush(eax); | |
3100 } else { | |
3101 // Fake TOS for targets that shadowed breaks and continues. | |
3102 frame_->EmitPush(Immediate(Factory::undefined_value())); | |
3103 } | |
3104 __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i))); | |
3105 if (--nof_unlinks > 0) { | |
3106 // If this is not the last unlink block, jump around the next. | |
3107 finally_block.Jump(); | |
3108 } | |
3109 } | |
3110 } | |
3111 | |
3112 // --- Finally block --- | |
3113 finally_block.Bind(); | |
3114 | |
3115 // Push the state on the stack. | |
3116 frame_->EmitPush(ecx); | |
3117 | |
3118 // We keep two elements on the stack - the (possibly faked) result | |
3119 // and the state - while evaluating the finally block. | |
3120 // | |
3121 // Generate code for the statements in the finally block. | |
3122 VisitStatementsAndSpill(node->finally_block()->statements()); | |
3123 | |
3124 if (has_valid_frame()) { | |
3125 // Restore state and return value or faked TOS. | |
3126 frame_->EmitPop(ecx); | |
3127 frame_->EmitPop(eax); | |
3128 } | |
3129 | |
3130 // Generate code to jump to the right destination for all used | |
3131 // formerly shadowing targets. Deallocate each shadow target. | |
3132 for (int i = 0; i < shadows.length(); i++) { | |
3133 if (has_valid_frame() && shadows[i]->is_bound()) { | |
3134 BreakTarget* original = shadows[i]->other_target(); | |
3135 __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i))); | |
3136 if (i == kReturnShadowIndex) { | |
3137 // The return value is (already) in eax. | |
3138 Result return_value = allocator_->Allocate(eax); | |
3139 ASSERT(return_value.is_valid()); | |
3140 if (function_return_is_shadowed_) { | |
3141 original->Branch(equal, &return_value); | |
3142 } else { | |
3143 // Branch around the preparation for return which may emit | |
3144 // code. | |
3145 JumpTarget skip(this); | |
3146 skip.Branch(not_equal); | |
3147 frame_->PrepareForReturn(); | |
3148 original->Jump(&return_value); | |
3149 skip.Bind(); | |
3150 } | |
3151 } else { | |
3152 original->Branch(equal); | |
3153 } | |
3154 } | |
3155 delete shadows[i]; | |
3156 } | |
3157 | |
3158 if (has_valid_frame()) { | |
3159 // Check if we need to rethrow the exception. | |
3160 JumpTarget exit(this); | |
3161 __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING))); | |
3162 exit.Branch(not_equal); | |
3163 | |
3164 // Rethrow exception. | |
3165 frame_->EmitPush(eax); // undo pop from above | |
3166 frame_->CallRuntime(Runtime::kReThrow, 1); | |
3167 | |
3168 // Done. | |
3169 exit.Bind(); | |
3170 } | |
3171 } | |
3172 | |
3173 | |
3174 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { | |
3175 ASSERT(!in_spilled_code()); | |
3176 Comment cmnt(masm_, "[ DebuggerStatement"); | |
3177 CodeForStatementPosition(node); | |
3178 #ifdef ENABLE_DEBUGGER_SUPPORT | |
3179 // Spill everything, even constants, to the frame. | |
3180 frame_->SpillAll(); | |
3181 frame_->CallRuntime(Runtime::kDebugBreak, 0); | |
3182 // Ignore the return value. | |
3183 #endif | |
3184 } | |
3185 | |
3186 | |
3187 void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { | |
3188 ASSERT(boilerplate->IsBoilerplate()); | |
3189 | |
3190 // Push the boilerplate on the stack. | |
3191 frame_->Push(boilerplate); | |
3192 | |
3193 // Create a new closure. | |
3194 frame_->Push(esi); | |
3195 Result result = frame_->CallRuntime(Runtime::kNewClosure, 2); | |
3196 frame_->Push(&result); | |
3197 } | |
3198 | |
3199 | |
3200 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { | |
3201 Comment cmnt(masm_, "[ FunctionLiteral"); | |
3202 | |
3203 // Build the function boilerplate and instantiate it. | |
3204 Handle<JSFunction> boilerplate = BuildBoilerplate(node); | |
3205 // Check for stack-overflow exception. | |
3206 if (HasStackOverflow()) return; | |
3207 InstantiateBoilerplate(boilerplate); | |
3208 } | |
3209 | |
3210 | |
3211 void CodeGenerator::VisitFunctionBoilerplateLiteral( | |
3212 FunctionBoilerplateLiteral* node) { | |
3213 Comment cmnt(masm_, "[ FunctionBoilerplateLiteral"); | |
3214 InstantiateBoilerplate(node->boilerplate()); | |
3215 } | |
3216 | |
3217 | |
3218 void CodeGenerator::VisitConditional(Conditional* node) { | |
3219 Comment cmnt(masm_, "[ Conditional"); | |
3220 JumpTarget then(this); | |
3221 JumpTarget else_(this); | |
3222 JumpTarget exit(this); | |
3223 ControlDestination dest(&then, &else_, true); | |
3224 LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); | |
3225 | |
3226 if (dest.false_was_fall_through()) { | |
3227 // The else target was bound, so we compile the else part first. | |
3228 Load(node->else_expression(), typeof_state()); | |
3229 | |
3230 if (then.is_linked()) { | |
3231 exit.Jump(); | |
3232 then.Bind(); | |
3233 Load(node->then_expression(), typeof_state()); | |
3234 } | |
3235 } else { | |
3236 // The then target was bound, so we compile the then part first. | |
3237 Load(node->then_expression(), typeof_state()); | |
3238 | |
3239 if (else_.is_linked()) { | |
3240 exit.Jump(); | |
3241 else_.Bind(); | |
3242 Load(node->else_expression(), typeof_state()); | |
3243 } | |
3244 } | |
3245 | |
3246 exit.Bind(); | |
3247 } | |
3248 | |
3249 | |
3250 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { | |
3251 if (slot->type() == Slot::LOOKUP) { | |
3252 ASSERT(slot->var()->is_dynamic()); | |
3253 | |
3254 JumpTarget slow(this); | |
3255 JumpTarget done(this); | |
3256 Result value(this); | |
3257 | |
3258 // Generate fast-case code for variables that might be shadowed by | |
3259 // eval-introduced variables. Eval is used a lot without | |
3260 // introducing variables. In those cases, we do not want to | |
3261 // perform a runtime call for all variables in the scope | |
3262 // containing the eval. | |
3263 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { | |
3264 value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow); | |
3265 // If there was no control flow to slow, we can exit early. | |
3266 if (!slow.is_linked()) { | |
3267 frame_->Push(&value); | |
3268 return; | |
3269 } | |
3270 | |
3271 done.Jump(&value); | |
3272 | |
3273 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { | |
3274 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); | |
3275 // Only generate the fast case for locals that rewrite to slots. | |
3276 // This rules out argument loads. | |
3277 if (potential_slot != NULL) { | |
3278 // Allocate a fresh register to use as a temp in | |
3279 // ContextSlotOperandCheckExtensions and to hold the result | |
3280 // value. | |
3281 value = allocator_->Allocate(); | |
3282 ASSERT(value.is_valid()); | |
3283 __ mov(value.reg(), | |
3284 ContextSlotOperandCheckExtensions(potential_slot, | |
3285 value, | |
3286 &slow)); | |
3287 if (potential_slot->var()->mode() == Variable::CONST) { | |
3288 __ cmp(value.reg(), Factory::the_hole_value()); | |
3289 done.Branch(not_equal, &value); | |
3290 __ mov(value.reg(), Factory::undefined_value()); | |
3291 } | |
3292 // There is always control flow to slow from | |
3293 // ContextSlotOperandCheckExtensions so we have to jump around | |
3294 // it. | |
3295 done.Jump(&value); | |
3296 } | |
3297 } | |
3298 | |
3299 slow.Bind(); | |
3300 frame_->Push(esi); | |
3301 frame_->Push(slot->var()->name()); | |
3302 if (typeof_state == INSIDE_TYPEOF) { | |
3303 value = | |
3304 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); | |
3305 } else { | |
3306 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2); | |
3307 } | |
3308 | |
3309 done.Bind(&value); | |
3310 frame_->Push(&value); | |
3311 | |
3312 } else if (slot->var()->mode() == Variable::CONST) { | |
3313 // Const slots may contain 'the hole' value (the constant hasn't been | |
3314 // initialized yet) which needs to be converted into the 'undefined' | |
3315 // value. | |
3316 // | |
3317 // We currently spill the virtual frame because constants use the | |
3318 // potentially unsafe direct-frame access of SlotOperand. | |
3319 VirtualFrame::SpilledScope spilled_scope(this); | |
3320 Comment cmnt(masm_, "[ Load const"); | |
3321 JumpTarget exit(this); | |
3322 __ mov(ecx, SlotOperand(slot, ecx)); | |
3323 __ cmp(ecx, Factory::the_hole_value()); | |
3324 exit.Branch(not_equal); | |
3325 __ mov(ecx, Factory::undefined_value()); | |
3326 exit.Bind(); | |
3327 frame_->EmitPush(ecx); | |
3328 | |
3329 } else if (slot->type() == Slot::PARAMETER) { | |
3330 frame_->PushParameterAt(slot->index()); | |
3331 | |
3332 } else if (slot->type() == Slot::LOCAL) { | |
3333 frame_->PushLocalAt(slot->index()); | |
3334 | |
3335 } else { | |
3336 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach | |
3337 // here. | |
3338 // | |
3339 // The use of SlotOperand below is safe for an unspilled frame | |
3340 // because it will always be a context slot. | |
3341 ASSERT(slot->type() == Slot::CONTEXT); | |
3342 Result temp = allocator_->Allocate(); | |
3343 ASSERT(temp.is_valid()); | |
3344 __ mov(temp.reg(), SlotOperand(slot, temp.reg())); | |
3345 frame_->Push(&temp); | |
3346 } | |
3347 } | |
3348 | |
3349 | |
3350 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( | |
3351 Slot* slot, | |
3352 TypeofState typeof_state, | |
3353 JumpTarget* slow) { | |
3354 // Check that no extension objects have been created by calls to | |
3355 // eval from the current scope to the global scope. | |
3356 Result context(esi, this); | |
3357 Result tmp = allocator_->Allocate(); | |
3358 ASSERT(tmp.is_valid()); // All non-reserved registers were available. | |
3359 | |
3360 Scope* s = scope(); | |
3361 while (s != NULL) { | |
3362 if (s->num_heap_slots() > 0) { | |
3363 if (s->calls_eval()) { | |
3364 // Check that extension is NULL. | |
3365 __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX), | |
3366 Immediate(0)); | |
3367 slow->Branch(not_equal, not_taken); | |
3368 } | |
3369 // Load next context in chain. | |
3370 __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX)); | |
3371 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
3372 context = tmp; | |
3373 } | |
3374 // If no outer scope calls eval, we do not need to check more | |
3375 // context extensions. If we have reached an eval scope, we check | |
3376 // all extensions from this point. | |
3377 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; | |
3378 s = s->outer_scope(); | |
3379 } | |
3380 | |
3381 if (s->is_eval_scope()) { | |
3382 // Loop up the context chain. There is no frame effect so it is | |
3383 // safe to use raw labels here. | |
3384 Label next, fast; | |
3385 if (!context.reg().is(tmp.reg())) { | |
3386 __ mov(tmp.reg(), context.reg()); | |
3387 } | |
3388 __ bind(&next); | |
3389 // Terminate at global context. | |
3390 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset), | |
3391 Immediate(Factory::global_context_map())); | |
3392 __ j(equal, &fast); | |
3393 // Check that extension is NULL. | |
3394 __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0)); | |
3395 slow->Branch(not_equal, not_taken); | |
3396 // Load next context in chain. | |
3397 __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX)); | |
3398 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
3399 __ jmp(&next); | |
3400 __ bind(&fast); | |
3401 } | |
3402 context.Unuse(); | |
3403 tmp.Unuse(); | |
3404 | |
3405 // All extension objects were empty and it is safe to use a global | |
3406 // load IC call. | |
3407 LoadGlobal(); | |
3408 frame_->Push(slot->var()->name()); | |
3409 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) | |
3410 ? RelocInfo::CODE_TARGET | |
3411 : RelocInfo::CODE_TARGET_CONTEXT; | |
3412 Result answer = frame_->CallLoadIC(mode); | |
3413 | |
3414 // Discard the global object. The result is in answer. | |
3415 frame_->Drop(); | |
3416 return answer; | |
3417 } | |
3418 | |
3419 | |
3420 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { | |
3421 if (slot->type() == Slot::LOOKUP) { | |
3422 ASSERT(slot->var()->is_dynamic()); | |
3423 | |
3424 // For now, just do a runtime call. | |
3425 frame_->Push(esi); | |
3426 frame_->Push(slot->var()->name()); | |
3427 | |
3428 Result value(this); | |
3429 if (init_state == CONST_INIT) { | |
3430 // Same as the case for a normal store, but ignores attribute | |
3431 // (e.g. READ_ONLY) of context slot so that we can initialize const | |
3432 // properties (introduced via eval("const foo = (some expr);")). Also, | |
3433 // uses the current function context instead of the top context. | |
3434 // | |
3435 // Note that we must declare the foo upon entry of eval(), via a | |
3436 // context slot declaration, but we cannot initialize it at the same | |
3437 // time, because the const declaration may be at the end of the eval | |
3438 // code (sigh...) and the const variable may have been used before | |
3439 // (where its value is 'undefined'). Thus, we can only do the | |
3440 // initialization when we actually encounter the expression and when | |
3441 // the expression operands are defined and valid, and thus we need the | |
3442 // split into 2 operations: declaration of the context slot followed | |
3443 // by initialization. | |
3444 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); | |
3445 } else { | |
3446 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); | |
3447 } | |
3448 // Storing a variable must keep the (new) value on the expression | |
3449 // stack. This is necessary for compiling chained assignment | |
3450 // expressions. | |
3451 frame_->Push(&value); | |
3452 | |
3453 } else { | |
3454 ASSERT(!slot->var()->is_dynamic()); | |
3455 | |
3456 JumpTarget exit(this); | |
3457 if (init_state == CONST_INIT) { | |
3458 ASSERT(slot->var()->mode() == Variable::CONST); | |
3459 // Only the first const initialization must be executed (the slot | |
3460 // still contains 'the hole' value). When the assignment is executed, | |
3461 // the code is identical to a normal store (see below). | |
3462 // | |
3463 // We spill the frame in the code below because the direct-frame | |
3464 // access of SlotOperand is potentially unsafe with an unspilled | |
3465 // frame. | |
3466 VirtualFrame::SpilledScope spilled_scope(this); | |
3467 Comment cmnt(masm_, "[ Init const"); | |
3468 __ mov(ecx, SlotOperand(slot, ecx)); | |
3469 __ cmp(ecx, Factory::the_hole_value()); | |
3470 exit.Branch(not_equal); | |
3471 } | |
3472 | |
3473 // We must execute the store. Storing a variable must keep the (new) | |
3474 // value on the stack. This is necessary for compiling assignment | |
3475 // expressions. | |
3476 // | |
3477 // Note: We will reach here even with slot->var()->mode() == | |
3478 // Variable::CONST because of const declarations which will initialize | |
3479 // consts to 'the hole' value and by doing so, end up calling this code. | |
3480 if (slot->type() == Slot::PARAMETER) { | |
3481 frame_->StoreToParameterAt(slot->index()); | |
3482 } else if (slot->type() == Slot::LOCAL) { | |
3483 frame_->StoreToLocalAt(slot->index()); | |
3484 } else { | |
3485 // The other slot types (LOOKUP and GLOBAL) cannot reach here. | |
3486 // | |
3487 // The use of SlotOperand below is safe for an unspilled frame | |
3488 // because the slot is a context slot. | |
3489 ASSERT(slot->type() == Slot::CONTEXT); | |
3490 frame_->Dup(); | |
3491 Result value = frame_->Pop(); | |
3492 value.ToRegister(); | |
3493 Result start = allocator_->Allocate(); | |
3494 ASSERT(start.is_valid()); | |
3495 __ mov(SlotOperand(slot, start.reg()), value.reg()); | |
3496 // RecordWrite may destroy the value registers. | |
3497 // | |
3498 // TODO(204): Avoid actually spilling when the value is not | |
3499 // needed (probably the common case). | |
3500 frame_->Spill(value.reg()); | |
3501 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; | |
3502 Result temp = allocator_->Allocate(); | |
3503 ASSERT(temp.is_valid()); | |
3504 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg()); | |
3505 // The results start, value, and temp are unused by going out of | |
3506 // scope. | |
3507 } | |
3508 | |
3509 exit.Bind(); | |
3510 } | |
3511 } | |
3512 | |
3513 | |
3514 void CodeGenerator::VisitSlot(Slot* node) { | |
3515 Comment cmnt(masm_, "[ Slot"); | |
3516 LoadFromSlot(node, typeof_state()); | |
3517 } | |
3518 | |
3519 | |
3520 void CodeGenerator::VisitVariableProxy(VariableProxy* node) { | |
3521 Comment cmnt(masm_, "[ VariableProxy"); | |
3522 Variable* var = node->var(); | |
3523 Expression* expr = var->rewrite(); | |
3524 if (expr != NULL) { | |
3525 Visit(expr); | |
3526 } else { | |
3527 ASSERT(var->is_global()); | |
3528 Reference ref(this, node); | |
3529 ref.GetValue(typeof_state()); | |
3530 } | |
3531 } | |
3532 | |
3533 | |
3534 void CodeGenerator::VisitLiteral(Literal* node) { | |
3535 Comment cmnt(masm_, "[ Literal"); | |
3536 frame_->Push(node->handle()); | |
3537 } | |
3538 | |
3539 | |
3540 void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) { | |
3541 ASSERT(target.is_valid()); | |
3542 ASSERT(value->IsSmi()); | |
3543 int bits = reinterpret_cast<int>(*value); | |
3544 __ Set(target, Immediate(bits & 0x0000FFFF)); | |
3545 __ xor_(target, bits & 0xFFFF0000); | |
3546 } | |
3547 | |
3548 | |
3549 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { | |
3550 if (!value->IsSmi()) return false; | |
3551 int int_value = Smi::cast(*value)->value(); | |
3552 return !is_intn(int_value, kMaxSmiInlinedBits); | |
3553 } | |
3554 | |
3555 | |
3556 class DeferredRegExpLiteral: public DeferredCode { | |
3557 public: | |
3558 DeferredRegExpLiteral(CodeGenerator* generator, RegExpLiteral* node) | |
3559 : DeferredCode(generator), node_(node) { | |
3560 set_comment("[ DeferredRegExpLiteral"); | |
3561 } | |
3562 | |
3563 virtual void Generate(); | |
3564 | |
3565 private: | |
3566 RegExpLiteral* node_; | |
3567 }; | |
3568 | |
3569 | |
3570 void DeferredRegExpLiteral::Generate() { | |
3571 Result literals(generator()); | |
3572 enter()->Bind(&literals); | |
3573 // Since the entry is undefined we call the runtime system to | |
3574 // compute the literal. | |
3575 | |
3576 VirtualFrame* frame = generator()->frame(); | |
3577 // Literal array (0). | |
3578 frame->Push(&literals); | |
3579 // Literal index (1). | |
3580 frame->Push(Smi::FromInt(node_->literal_index())); | |
3581 // RegExp pattern (2). | |
3582 frame->Push(node_->pattern()); | |
3583 // RegExp flags (3). | |
3584 frame->Push(node_->flags()); | |
3585 Result boilerplate = | |
3586 frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); | |
3587 exit_.Jump(&boilerplate); | |
3588 } | |
3589 | |
3590 | |
3591 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { | |
3592 Comment cmnt(masm_, "[ RegExp Literal"); | |
3593 DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(this, node); | |
3594 | |
3595 // Retrieve the literals array and check the allocated entry. Begin | |
3596 // with a writable copy of the function of this activation in a | |
3597 // register. | |
3598 frame_->PushFunction(); | |
3599 Result literals = frame_->Pop(); | |
3600 literals.ToRegister(); | |
3601 frame_->Spill(literals.reg()); | |
3602 | |
3603 // Load the literals array of the function. | |
3604 __ mov(literals.reg(), | |
3605 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); | |
3606 | |
3607 // Load the literal at the ast saved index. | |
3608 int literal_offset = | |
3609 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | |
3610 Result boilerplate = allocator_->Allocate(); | |
3611 ASSERT(boilerplate.is_valid()); | |
3612 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); | |
3613 | |
3614 // Check whether we need to materialize the RegExp object. If so, | |
3615 // jump to the deferred code passing the literals array. | |
3616 __ cmp(boilerplate.reg(), Factory::undefined_value()); | |
3617 deferred->enter()->Branch(equal, &literals, not_taken); | |
3618 | |
3619 literals.Unuse(); | |
3620 // The deferred code returns the boilerplate object. | |
3621 deferred->BindExit(&boilerplate); | |
3622 | |
3623 // Push the boilerplate object. | |
3624 frame_->Push(&boilerplate); | |
3625 } | |
3626 | |
3627 | |
3628 // This deferred code stub will be used for creating the boilerplate | |
3629 // by calling Runtime_CreateObjectLiteral. | |
3630 // Each created boilerplate is stored in the JSFunction and they are | |
3631 // therefore context dependent. | |
3632 class DeferredObjectLiteral: public DeferredCode { | |
3633 public: | |
3634 DeferredObjectLiteral(CodeGenerator* generator, | |
3635 ObjectLiteral* node) | |
3636 : DeferredCode(generator), node_(node) { | |
3637 set_comment("[ DeferredObjectLiteral"); | |
3638 } | |
3639 | |
3640 virtual void Generate(); | |
3641 | |
3642 private: | |
3643 ObjectLiteral* node_; | |
3644 }; | |
3645 | |
3646 | |
3647 void DeferredObjectLiteral::Generate() { | |
3648 Result literals(generator()); | |
3649 enter()->Bind(&literals); | |
3650 // Since the entry is undefined we call the runtime system to | |
3651 // compute the literal. | |
3652 | |
3653 VirtualFrame* frame = generator()->frame(); | |
3654 // Literal array (0). | |
3655 frame->Push(&literals); | |
3656 // Literal index (1). | |
3657 frame->Push(Smi::FromInt(node_->literal_index())); | |
3658 // Constant properties (2). | |
3659 frame->Push(node_->constant_properties()); | |
3660 Result boilerplate = | |
3661 frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); | |
3662 exit_.Jump(&boilerplate); | |
3663 } | |
3664 | |
3665 | |
3666 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { | |
3667 Comment cmnt(masm_, "[ ObjectLiteral"); | |
3668 DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node); | |
3669 | |
3670 // Retrieve the literals array and check the allocated entry. Begin | |
3671 // with a writable copy of the function of this activation in a | |
3672 // register. | |
3673 frame_->PushFunction(); | |
3674 Result literals = frame_->Pop(); | |
3675 literals.ToRegister(); | |
3676 frame_->Spill(literals.reg()); | |
3677 | |
3678 // Load the literals array of the function. | |
3679 __ mov(literals.reg(), | |
3680 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); | |
3681 | |
3682 // Load the literal at the ast saved index. | |
3683 int literal_offset = | |
3684 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | |
3685 Result boilerplate = allocator_->Allocate(); | |
3686 ASSERT(boilerplate.is_valid()); | |
3687 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); | |
3688 | |
3689 // Check whether we need to materialize the object literal boilerplate. | |
3690 // If so, jump to the deferred code passing the literals array. | |
3691 __ cmp(boilerplate.reg(), Factory::undefined_value()); | |
3692 deferred->enter()->Branch(equal, &literals, not_taken); | |
3693 | |
3694 literals.Unuse(); | |
3695 // The deferred code returns the boilerplate object. | |
3696 deferred->BindExit(&boilerplate); | |
3697 | |
3698 // Push the boilerplate object. | |
3699 frame_->Push(&boilerplate); | |
3700 // Clone the boilerplate object. | |
3701 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; | |
3702 if (node->depth() == 1) { | |
3703 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; | |
3704 } | |
3705 Result clone = frame_->CallRuntime(clone_function_id, 1); | |
3706 // Push the newly cloned literal object as the result. | |
3707 frame_->Push(&clone); | |
3708 | |
3709 for (int i = 0; i < node->properties()->length(); i++) { | |
3710 ObjectLiteral::Property* property = node->properties()->at(i); | |
3711 switch (property->kind()) { | |
3712 case ObjectLiteral::Property::CONSTANT: | |
3713 break; | |
3714 case ObjectLiteral::Property::MATERIALIZED_LITERAL: | |
3715 if (CompileTimeValue::IsCompileTimeValue(property->value())) break; | |
3716 // else fall through. | |
3717 case ObjectLiteral::Property::COMPUTED: { | |
3718 Handle<Object> key(property->key()->handle()); | |
3719 if (key->IsSymbol()) { | |
3720 // Duplicate the object as the IC receiver. | |
3721 frame_->Dup(); | |
3722 Load(property->value()); | |
3723 frame_->Push(key); | |
3724 Result ignored = frame_->CallStoreIC(); | |
3725 // Drop the duplicated receiver and ignore the result. | |
3726 frame_->Drop(); | |
3727 break; | |
3728 } | |
3729 // Fall through | |
3730 } | |
3731 case ObjectLiteral::Property::PROTOTYPE: { | |
3732 // Duplicate the object as an argument to the runtime call. | |
3733 frame_->Dup(); | |
3734 Load(property->key()); | |
3735 Load(property->value()); | |
3736 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3); | |
3737 // Ignore the result. | |
3738 break; | |
3739 } | |
3740 case ObjectLiteral::Property::SETTER: { | |
3741 // Duplicate the object as an argument to the runtime call. | |
3742 frame_->Dup(); | |
3743 Load(property->key()); | |
3744 frame_->Push(Smi::FromInt(1)); | |
3745 Load(property->value()); | |
3746 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4); | |
3747 // Ignore the result. | |
3748 break; | |
3749 } | |
3750 case ObjectLiteral::Property::GETTER: { | |
3751 // Duplicate the object as an argument to the runtime call. | |
3752 frame_->Dup(); | |
3753 Load(property->key()); | |
3754 frame_->Push(Smi::FromInt(0)); | |
3755 Load(property->value()); | |
3756 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4); | |
3757 // Ignore the result. | |
3758 break; | |
3759 } | |
3760 default: UNREACHABLE(); | |
3761 } | |
3762 } | |
3763 } | |
3764 | |
3765 | |
3766 // This deferred code stub will be used for creating the boilerplate | |
3767 // by calling Runtime_CreateArrayLiteralBoilerplate. | |
3768 // Each created boilerplate is stored in the JSFunction and they are | |
3769 // therefore context dependent. | |
3770 class DeferredArrayLiteral: public DeferredCode { | |
3771 public: | |
3772 DeferredArrayLiteral(CodeGenerator* generator, | |
3773 ArrayLiteral* node) | |
3774 : DeferredCode(generator), node_(node) { | |
3775 set_comment("[ DeferredArrayLiteral"); | |
3776 } | |
3777 | |
3778 virtual void Generate(); | |
3779 | |
3780 private: | |
3781 ArrayLiteral* node_; | |
3782 }; | |
3783 | |
3784 | |
3785 void DeferredArrayLiteral::Generate() { | |
3786 Result literals(generator()); | |
3787 enter()->Bind(&literals); | |
3788 // Since the entry is undefined we call the runtime system to | |
3789 // compute the literal. | |
3790 | |
3791 VirtualFrame* frame = generator()->frame(); | |
3792 // Literal array (0). | |
3793 frame->Push(&literals); | |
3794 // Literal index (1). | |
3795 frame->Push(Smi::FromInt(node_->literal_index())); | |
3796 // Constant properties (2). | |
3797 frame->Push(node_->literals()); | |
3798 Result boilerplate = | |
3799 frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); | |
3800 exit_.Jump(&boilerplate); | |
3801 } | |
3802 | |
3803 | |
3804 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { | |
3805 Comment cmnt(masm_, "[ ArrayLiteral"); | |
3806 DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node); | |
3807 | |
3808 // Retrieve the literals array and check the allocated entry. Begin | |
3809 // with a writable copy of the function of this activation in a | |
3810 // register. | |
3811 frame_->PushFunction(); | |
3812 Result literals = frame_->Pop(); | |
3813 literals.ToRegister(); | |
3814 frame_->Spill(literals.reg()); | |
3815 | |
3816 // Load the literals array of the function. | |
3817 __ mov(literals.reg(), | |
3818 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); | |
3819 | |
3820 // Load the literal at the ast saved index. | |
3821 int literal_offset = | |
3822 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | |
3823 Result boilerplate = allocator_->Allocate(); | |
3824 ASSERT(boilerplate.is_valid()); | |
3825 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); | |
3826 | |
3827 // Check whether we need to materialize the object literal boilerplate. | |
3828 // If so, jump to the deferred code passing the literals array. | |
3829 __ cmp(boilerplate.reg(), Factory::undefined_value()); | |
3830 deferred->enter()->Branch(equal, &literals, not_taken); | |
3831 | |
3832 literals.Unuse(); | |
3833 // The deferred code returns the boilerplate object. | |
3834 deferred->BindExit(&boilerplate); | |
3835 | |
3836 // Push the resulting array literal on the stack. | |
3837 frame_->Push(&boilerplate); | |
3838 | |
3839 // Clone the boilerplate object. | |
3840 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; | |
3841 if (node->depth() == 1) { | |
3842 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; | |
3843 } | |
3844 Result clone = frame_->CallRuntime(clone_function_id, 1); | |
3845 // Push the newly cloned literal object as the result. | |
3846 frame_->Push(&clone); | |
3847 | |
3848 // Generate code to set the elements in the array that are not | |
3849 // literals. | |
3850 for (int i = 0; i < node->values()->length(); i++) { | |
3851 Expression* value = node->values()->at(i); | |
3852 | |
3853 // If value is a literal the property value is already set in the | |
3854 // boilerplate object. | |
3855 if (value->AsLiteral() != NULL) continue; | |
3856 // If value is a materialized literal the property value is already set | |
3857 // in the boilerplate object if it is simple. | |
3858 if (CompileTimeValue::IsCompileTimeValue(value)) continue; | |
3859 | |
3860 // The property must be set by generated code. | |
3861 Load(value); | |
3862 | |
3863 // Get the property value off the stack. | |
3864 Result prop_value = frame_->Pop(); | |
3865 prop_value.ToRegister(); | |
3866 | |
3867 // Fetch the array literal while leaving a copy on the stack and | |
3868 // use it to get the elements array. | |
3869 frame_->Dup(); | |
3870 Result elements = frame_->Pop(); | |
3871 elements.ToRegister(); | |
3872 frame_->Spill(elements.reg()); | |
3873 // Get the elements array. | |
3874 __ mov(elements.reg(), | |
3875 FieldOperand(elements.reg(), JSObject::kElementsOffset)); | |
3876 | |
3877 // Write to the indexed properties array. | |
3878 int offset = i * kPointerSize + Array::kHeaderSize; | |
3879 __ mov(FieldOperand(elements.reg(), offset), prop_value.reg()); | |
3880 | |
3881 // Update the write barrier for the array address. | |
3882 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier. | |
3883 Result scratch = allocator_->Allocate(); | |
3884 ASSERT(scratch.is_valid()); | |
3885 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg()); | |
3886 } | |
3887 } | |
3888 | |
3889 | |
3890 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { | |
3891 ASSERT(!in_spilled_code()); | |
3892 // Call runtime routine to allocate the catch extension object and | |
3893 // assign the exception value to the catch variable. | |
3894 Comment cmnt(masm_, "[ CatchExtensionObject"); | |
3895 Load(node->key()); | |
3896 Load(node->value()); | |
3897 Result result = | |
3898 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); | |
3899 frame_->Push(&result); | |
3900 } | |
3901 | |
3902 | |
3903 void CodeGenerator::VisitAssignment(Assignment* node) { | |
3904 Comment cmnt(masm_, "[ Assignment"); | |
3905 CodeForStatementPosition(node); | |
3906 | |
3907 { Reference target(this, node->target()); | |
3908 if (target.is_illegal()) { | |
3909 // Fool the virtual frame into thinking that we left the assignment's | |
3910 // value on the frame. | |
3911 frame_->Push(Smi::FromInt(0)); | |
3912 return; | |
3913 } | |
3914 Variable* var = node->target()->AsVariableProxy()->AsVariable(); | |
3915 | |
3916 if (node->starts_initialization_block()) { | |
3917 ASSERT(target.type() == Reference::NAMED || | |
3918 target.type() == Reference::KEYED); | |
3919 // Change to slow case in the beginning of an initialization | |
3920 // block to avoid the quadratic behavior of repeatedly adding | |
3921 // fast properties. | |
3922 | |
3923 // The receiver is the argument to the runtime call. It is the | |
3924 // first value pushed when the reference was loaded to the | |
3925 // frame. | |
3926 frame_->PushElementAt(target.size() - 1); | |
3927 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1); | |
3928 } | |
3929 if (node->op() == Token::ASSIGN || | |
3930 node->op() == Token::INIT_VAR || | |
3931 node->op() == Token::INIT_CONST) { | |
3932 Load(node->value()); | |
3933 | |
3934 } else { | |
3935 Literal* literal = node->value()->AsLiteral(); | |
3936 bool overwrite_value = | |
3937 (node->value()->AsBinaryOperation() != NULL && | |
3938 node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); | |
3939 Variable* right_var = node->value()->AsVariableProxy()->AsVariable(); | |
3940 // There are two cases where the target is not read in the right hand | |
3941 // side, that are easy to test for: the right hand side is a literal, | |
3942 // or the right hand side is a different variable. TakeValue invalidates | |
3943 // the target, with an implicit promise that it will be written to again | |
3944 // before it is read. | |
3945 if (literal != NULL || (right_var != NULL && right_var != var)) { | |
3946 target.TakeValue(NOT_INSIDE_TYPEOF); | |
3947 } else { | |
3948 target.GetValue(NOT_INSIDE_TYPEOF); | |
3949 } | |
3950 Load(node->value()); | |
3951 GenericBinaryOperation(node->binary_op(), | |
3952 node->type(), | |
3953 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); | |
3954 } | |
3955 | |
3956 if (var != NULL && | |
3957 var->mode() == Variable::CONST && | |
3958 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { | |
3959 // Assignment ignored - leave the value on the stack. | |
3960 } else { | |
3961 CodeForSourcePosition(node->position()); | |
3962 if (node->op() == Token::INIT_CONST) { | |
3963 // Dynamic constant initializations must use the function context | |
3964 // and initialize the actual constant declared. Dynamic variable | |
3965 // initializations are simply assignments and use SetValue. | |
3966 target.SetValue(CONST_INIT); | |
3967 } else { | |
3968 target.SetValue(NOT_CONST_INIT); | |
3969 } | |
3970 if (node->ends_initialization_block()) { | |
3971 ASSERT(target.type() == Reference::NAMED || | |
3972 target.type() == Reference::KEYED); | |
3973 // End of initialization block. Revert to fast case. The | |
3974 // argument to the runtime call is the receiver, which is the | |
3975 // first value pushed as part of the reference, which is below | |
3976 // the lhs value. | |
3977 frame_->PushElementAt(target.size()); | |
3978 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); | |
3979 } | |
3980 } | |
3981 } | |
3982 } | |
3983 | |
3984 | |
3985 void CodeGenerator::VisitThrow(Throw* node) { | |
3986 Comment cmnt(masm_, "[ Throw"); | |
3987 CodeForStatementPosition(node); | |
3988 | |
3989 Load(node->exception()); | |
3990 Result result = frame_->CallRuntime(Runtime::kThrow, 1); | |
3991 frame_->Push(&result); | |
3992 } | |
3993 | |
3994 | |
3995 void CodeGenerator::VisitProperty(Property* node) { | |
3996 Comment cmnt(masm_, "[ Property"); | |
3997 Reference property(this, node); | |
3998 property.GetValue(typeof_state()); | |
3999 } | |
4000 | |
4001 | |
4002 void CodeGenerator::VisitCall(Call* node) { | |
4003 Comment cmnt(masm_, "[ Call"); | |
4004 | |
4005 ZoneList<Expression*>* args = node->arguments(); | |
4006 | |
4007 CodeForStatementPosition(node); | |
4008 | |
4009 // Check if the function is a variable or a property. | |
4010 Expression* function = node->expression(); | |
4011 Variable* var = function->AsVariableProxy()->AsVariable(); | |
4012 Property* property = function->AsProperty(); | |
4013 | |
4014 // ------------------------------------------------------------------------ | |
4015 // Fast-case: Use inline caching. | |
4016 // --- | |
4017 // According to ECMA-262, section 11.2.3, page 44, the function to call | |
4018 // must be resolved after the arguments have been evaluated. The IC code | |
4019 // automatically handles this by loading the arguments before the function | |
4020 // is resolved in cache misses (this also holds for megamorphic calls). | |
4021 // ------------------------------------------------------------------------ | |
4022 | |
4023 if (var != NULL && !var->is_this() && var->is_global()) { | |
4024 // ---------------------------------- | |
4025 // JavaScript example: 'foo(1, 2, 3)' // foo is global | |
4026 // ---------------------------------- | |
4027 | |
4028 // Push the name of the function and the receiver onto the stack. | |
4029 frame_->Push(var->name()); | |
4030 | |
4031 // Pass the global object as the receiver and let the IC stub | |
4032 // patch the stack to use the global proxy as 'this' in the | |
4033 // invoked function. | |
4034 LoadGlobal(); | |
4035 | |
4036 // Load the arguments. | |
4037 int arg_count = args->length(); | |
4038 for (int i = 0; i < arg_count; i++) { | |
4039 Load(args->at(i)); | |
4040 } | |
4041 | |
4042 // Call the IC initialization code. | |
4043 CodeForSourcePosition(node->position()); | |
4044 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT, | |
4045 arg_count, | |
4046 loop_nesting()); | |
4047 frame_->RestoreContextRegister(); | |
4048 // Replace the function on the stack with the result. | |
4049 frame_->SetElementAt(0, &result); | |
4050 | |
4051 } else if (var != NULL && var->slot() != NULL && | |
4052 var->slot()->type() == Slot::LOOKUP) { | |
4053 // ---------------------------------- | |
4054 // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj | |
4055 // ---------------------------------- | |
4056 | |
4057 // Load the function | |
4058 frame_->Push(esi); | |
4059 frame_->Push(var->name()); | |
4060 frame_->CallRuntime(Runtime::kLoadContextSlot, 2); | |
4061 // eax: slot value; edx: receiver | |
4062 | |
4063 // Load the receiver. | |
4064 frame_->Push(eax); | |
4065 frame_->Push(edx); | |
4066 | |
4067 // Call the function. | |
4068 CallWithArguments(args, node->position()); | |
4069 | |
4070 } else if (property != NULL) { | |
4071 // Check if the key is a literal string. | |
4072 Literal* literal = property->key()->AsLiteral(); | |
4073 | |
4074 if (literal != NULL && literal->handle()->IsSymbol()) { | |
4075 // ------------------------------------------------------------------ | |
4076 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)' | |
4077 // ------------------------------------------------------------------ | |
4078 | |
4079 // Push the name of the function and the receiver onto the stack. | |
4080 frame_->Push(literal->handle()); | |
4081 Load(property->obj()); | |
4082 | |
4083 // Load the arguments. | |
4084 int arg_count = args->length(); | |
4085 for (int i = 0; i < arg_count; i++) { | |
4086 Load(args->at(i)); | |
4087 } | |
4088 | |
4089 // Call the IC initialization code. | |
4090 CodeForSourcePosition(node->position()); | |
4091 Result result = | |
4092 frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting()); | |
4093 frame_->RestoreContextRegister(); | |
4094 // Replace the function on the stack with the result. | |
4095 frame_->SetElementAt(0, &result); | |
4096 | |
4097 } else { | |
4098 // ------------------------------------------- | |
4099 // JavaScript example: 'array[index](1, 2, 3)' | |
4100 // ------------------------------------------- | |
4101 | |
4102 // Load the function to call from the property through a reference. | |
4103 Reference ref(this, property); | |
4104 ref.GetValue(NOT_INSIDE_TYPEOF); | |
4105 | |
4106 // Pass receiver to called function. | |
4107 if (property->is_synthetic()) { | |
4108 // Use global object as receiver. | |
4109 LoadGlobalReceiver(); | |
4110 } else { | |
4111 // The reference's size is non-negative. | |
4112 frame_->PushElementAt(ref.size()); | |
4113 } | |
4114 | |
4115 // Call the function. | |
4116 CallWithArguments(args, node->position()); | |
4117 } | |
4118 | |
4119 } else { | |
4120 // ---------------------------------- | |
4121 // JavaScript example: 'foo(1, 2, 3)' // foo is not global | |
4122 // ---------------------------------- | |
4123 | |
4124 // Load the function. | |
4125 Load(function); | |
4126 | |
4127 // Pass the global proxy as the receiver. | |
4128 LoadGlobalReceiver(); | |
4129 | |
4130 // Call the function. | |
4131 CallWithArguments(args, node->position()); | |
4132 } | |
4133 } | |
4134 | |
4135 | |
4136 void CodeGenerator::VisitCallNew(CallNew* node) { | |
4137 Comment cmnt(masm_, "[ CallNew"); | |
4138 CodeForStatementPosition(node); | |
4139 | |
4140 // According to ECMA-262, section 11.2.2, page 44, the function | |
4141 // expression in new calls must be evaluated before the | |
4142 // arguments. This is different from ordinary calls, where the | |
4143 // actual function to call is resolved after the arguments have been | |
4144 // evaluated. | |
4145 | |
4146 // Compute function to call and use the global object as the | |
4147 // receiver. There is no need to use the global proxy here because | |
4148 // it will always be replaced with a newly allocated object. | |
4149 Load(node->expression()); | |
4150 LoadGlobal(); | |
4151 | |
4152 // Push the arguments ("left-to-right") on the stack. | |
4153 ZoneList<Expression*>* args = node->arguments(); | |
4154 int arg_count = args->length(); | |
4155 for (int i = 0; i < arg_count; i++) { | |
4156 Load(args->at(i)); | |
4157 } | |
4158 | |
4159 // Call the construct call builtin that handles allocation and | |
4160 // constructor invocation. | |
4161 CodeForSourcePosition(node->position()); | |
4162 Result result = frame_->CallConstructor(arg_count); | |
4163 // Replace the function on the stack with the result. | |
4164 frame_->SetElementAt(0, &result); | |
4165 } | |
4166 | |
4167 | |
4168 void CodeGenerator::VisitCallEval(CallEval* node) { | |
4169 Comment cmnt(masm_, "[ CallEval"); | |
4170 | |
4171 // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve | |
4172 // the function we need to call and the receiver of the call. | |
4173 // Then we call the resolved function using the given arguments. | |
4174 | |
4175 ZoneList<Expression*>* args = node->arguments(); | |
4176 Expression* function = node->expression(); | |
4177 | |
4178 CodeForStatementPosition(node); | |
4179 | |
4180 // Prepare the stack for the call to the resolved function. | |
4181 Load(function); | |
4182 | |
4183 // Allocate a frame slot for the receiver. | |
4184 frame_->Push(Factory::undefined_value()); | |
4185 int arg_count = args->length(); | |
4186 for (int i = 0; i < arg_count; i++) { | |
4187 Load(args->at(i)); | |
4188 } | |
4189 | |
4190 // Prepare the stack for the call to ResolvePossiblyDirectEval. | |
4191 frame_->PushElementAt(arg_count + 1); | |
4192 if (arg_count > 0) { | |
4193 frame_->PushElementAt(arg_count); | |
4194 } else { | |
4195 frame_->Push(Factory::undefined_value()); | |
4196 } | |
4197 | |
4198 // Resolve the call. | |
4199 Result result = | |
4200 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2); | |
4201 | |
4202 // Touch up the stack with the right values for the function and the | |
4203 // receiver. Use a scratch register to avoid destroying the result. | |
4204 Result scratch = allocator_->Allocate(); | |
4205 ASSERT(scratch.is_valid()); | |
4206 __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize)); | |
4207 frame_->SetElementAt(arg_count + 1, &scratch); | |
4208 | |
4209 // We can reuse the result register now. | |
4210 frame_->Spill(result.reg()); | |
4211 __ mov(result.reg(), | |
4212 FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize)); | |
4213 frame_->SetElementAt(arg_count, &result); | |
4214 | |
4215 // Call the function. | |
4216 CodeForSourcePosition(node->position()); | |
4217 CallFunctionStub call_function(arg_count); | |
4218 result = frame_->CallStub(&call_function, arg_count + 1); | |
4219 | |
4220 // Restore the context and overwrite the function on the stack with | |
4221 // the result. | |
4222 frame_->RestoreContextRegister(); | |
4223 frame_->SetElementAt(0, &result); | |
4224 } | |
4225 | |
4226 | |
4227 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { | |
4228 ASSERT(args->length() == 1); | |
4229 Load(args->at(0)); | |
4230 Result value = frame_->Pop(); | |
4231 value.ToRegister(); | |
4232 ASSERT(value.is_valid()); | |
4233 __ test(value.reg(), Immediate(kSmiTagMask)); | |
4234 value.Unuse(); | |
4235 destination()->Split(zero); | |
4236 } | |
4237 | |
4238 | |
4239 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { | |
4240 // Conditionally generate a log call. | |
4241 // Args: | |
4242 // 0 (literal string): The type of logging (corresponds to the flags). | |
4243 // This is used to determine whether or not to generate the log call. | |
4244 // 1 (string): Format string. Access the string at argument index 2 | |
4245 // with '%2s' (see Logger::LogRuntime for all the formats). | |
4246 // 2 (array): Arguments to the format string. | |
4247 ASSERT_EQ(args->length(), 3); | |
4248 #ifdef ENABLE_LOGGING_AND_PROFILING | |
4249 if (ShouldGenerateLog(args->at(0))) { | |
4250 Load(args->at(1)); | |
4251 Load(args->at(2)); | |
4252 frame_->CallRuntime(Runtime::kLog, 2); | |
4253 } | |
4254 #endif | |
4255 // Finally, we're expected to leave a value on the top of the stack. | |
4256 frame_->Push(Factory::undefined_value()); | |
4257 } | |
4258 | |
4259 | |
4260 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { | |
4261 ASSERT(args->length() == 1); | |
4262 Load(args->at(0)); | |
4263 Result value = frame_->Pop(); | |
4264 value.ToRegister(); | |
4265 ASSERT(value.is_valid()); | |
4266 __ test(value.reg(), Immediate(kSmiTagMask | 0x80000000)); | |
4267 value.Unuse(); | |
4268 destination()->Split(zero); | |
4269 } | |
4270 | |
4271 | |
4272 // This generates code that performs a charCodeAt() call or returns | |
4273 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt. | |
4274 // It can handle flat and sliced strings, 8 and 16 bit characters and | |
4275 // cons strings where the answer is found in the left hand branch of the | |
4276 // cons. The slow case will flatten the string, which will ensure that | |
4277 // the answer is in the left hand side the next time around. | |
4278 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { | |
4279 ASSERT(args->length() == 2); | |
4280 | |
4281 JumpTarget slow_case(this); | |
4282 JumpTarget end(this); | |
4283 JumpTarget not_a_flat_string(this); | |
4284 JumpTarget a_cons_string(this); | |
4285 JumpTarget try_again_with_new_string(this, JumpTarget::BIDIRECTIONAL); | |
4286 JumpTarget ascii_string(this); | |
4287 JumpTarget got_char_code(this); | |
4288 | |
4289 Load(args->at(0)); | |
4290 Load(args->at(1)); | |
4291 // Reserve register ecx, to use as shift amount later | |
4292 Result shift_amount = allocator()->Allocate(ecx); | |
4293 ASSERT(shift_amount.is_valid()); | |
4294 Result index = frame_->Pop(); | |
4295 index.ToRegister(); | |
4296 Result object = frame_->Pop(); | |
4297 object.ToRegister(); | |
4298 // If the receiver is a smi return undefined. | |
4299 ASSERT(kSmiTag == 0); | |
4300 __ test(object.reg(), Immediate(kSmiTagMask)); | |
4301 slow_case.Branch(zero, not_taken); | |
4302 | |
4303 // Check for negative or non-smi index. | |
4304 ASSERT(kSmiTag == 0); | |
4305 __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000)); | |
4306 slow_case.Branch(not_zero, not_taken); | |
4307 // Get rid of the smi tag on the index. | |
4308 frame_->Spill(index.reg()); | |
4309 __ sar(index.reg(), kSmiTagSize); | |
4310 | |
4311 try_again_with_new_string.Bind(&object, &index, &shift_amount); | |
4312 // Get the type of the heap object. | |
4313 Result object_type = allocator()->Allocate(); | |
4314 ASSERT(object_type.is_valid()); | |
4315 __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset)); | |
4316 __ movzx_b(object_type.reg(), | |
4317 FieldOperand(object_type.reg(), Map::kInstanceTypeOffset)); | |
4318 // We don't handle non-strings. | |
4319 __ test(object_type.reg(), Immediate(kIsNotStringMask)); | |
4320 slow_case.Branch(not_zero, not_taken); | |
4321 | |
4322 // Here we make assumptions about the tag values and the shifts needed. | |
4323 // See the comment in objects.h. | |
4324 ASSERT(kLongStringTag == 0); | |
4325 ASSERT(kMediumStringTag + String::kLongLengthShift == | |
4326 String::kMediumLengthShift); | |
4327 ASSERT(kShortStringTag + String::kLongLengthShift == | |
4328 String::kShortLengthShift); | |
4329 __ mov(shift_amount.reg(), Operand(object_type.reg())); | |
4330 __ and_(shift_amount.reg(), kStringSizeMask); | |
4331 __ add(Operand(shift_amount.reg()), Immediate(String::kLongLengthShift)); | |
4332 // Get the length field. Temporary register now used for length. | |
4333 Result length = object_type; | |
4334 __ mov(length.reg(), FieldOperand(object.reg(), String::kLengthOffset)); | |
4335 __ shr(length.reg()); // shift_amount, in ecx, is implicit operand. | |
4336 // Check for index out of range. | |
4337 __ cmp(index.reg(), Operand(length.reg())); | |
4338 slow_case.Branch(greater_equal, not_taken); | |
4339 length.Unuse(); | |
4340 // Load the object type into object_type again. | |
4341 // These two instructions are duplicated from above, to save a register. | |
4342 __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset)); | |
4343 __ movzx_b(object_type.reg(), | |
4344 FieldOperand(object_type.reg(), Map::kInstanceTypeOffset)); | |
4345 | |
4346 // We need special handling for non-flat strings. | |
4347 ASSERT(kSeqStringTag == 0); | |
4348 __ test(object_type.reg(), Immediate(kStringRepresentationMask)); | |
4349 not_a_flat_string.Branch(not_zero, &object, &index, &object_type, | |
4350 &shift_amount, not_taken); | |
4351 shift_amount.Unuse(); | |
4352 // Check for 1-byte or 2-byte string. | |
4353 __ test(object_type.reg(), Immediate(kStringEncodingMask)); | |
4354 ascii_string.Branch(not_zero, &object, &index, &object_type, taken); | |
4355 | |
4356 // 2-byte string. | |
4357 // Load the 2-byte character code. | |
4358 __ movzx_w(object_type.reg(), FieldOperand(object.reg(), | |
4359 index.reg(), | |
4360 times_2, | |
4361 SeqTwoByteString::kHeaderSize)); | |
4362 object.Unuse(); | |
4363 index.Unuse(); | |
4364 got_char_code.Jump(&object_type); | |
4365 | |
4366 // ASCII string. | |
4367 ascii_string.Bind(&object, &index, &object_type); | |
4368 // Load the byte. | |
4369 __ movzx_b(object_type.reg(), FieldOperand(object.reg(), | |
4370 index.reg(), | |
4371 times_1, | |
4372 SeqAsciiString::kHeaderSize)); | |
4373 object.Unuse(); | |
4374 index.Unuse(); | |
4375 got_char_code.Bind(&object_type); | |
4376 ASSERT(kSmiTag == 0); | |
4377 __ shl(object_type.reg(), kSmiTagSize); | |
4378 frame_->Push(&object_type); | |
4379 end.Jump(); | |
4380 | |
4381 // Handle non-flat strings. | |
4382 not_a_flat_string.Bind(&object, &index, &object_type, &shift_amount); | |
4383 __ and_(object_type.reg(), kStringRepresentationMask); | |
4384 __ cmp(object_type.reg(), kConsStringTag); | |
4385 a_cons_string.Branch(equal, &object, &index, &shift_amount, taken); | |
4386 __ cmp(object_type.reg(), kSlicedStringTag); | |
4387 slow_case.Branch(not_equal, not_taken); | |
4388 object_type.Unuse(); | |
4389 | |
4390 // SlicedString. | |
4391 // Add the offset to the index. | |
4392 __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset)); | |
4393 slow_case.Branch(overflow); | |
4394 // Getting the underlying string is done by running the cons string code. | |
4395 | |
4396 // ConsString. | |
4397 a_cons_string.Bind(&object, &index, &shift_amount); | |
4398 // Get the first of the two strings. | |
4399 frame_->Spill(object.reg()); | |
4400 // Both sliced and cons strings store their source string at the same place. | |
4401 ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset); | |
4402 __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset)); | |
4403 try_again_with_new_string.Jump(&object, &index, &shift_amount); | |
4404 | |
4405 // No results live at this point. | |
4406 slow_case.Bind(); | |
4407 frame_->Push(Factory::undefined_value()); | |
4408 end.Bind(); | |
4409 } | |
4410 | |
4411 | |
4412 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { | |
4413 ASSERT(args->length() == 1); | |
4414 Load(args->at(0)); | |
4415 Result value = frame_->Pop(); | |
4416 value.ToRegister(); | |
4417 ASSERT(value.is_valid()); | |
4418 __ test(value.reg(), Immediate(kSmiTagMask)); | |
4419 destination()->false_target()->Branch(equal); | |
4420 // It is a heap object - get map. | |
4421 Result temp = allocator()->Allocate(); | |
4422 ASSERT(temp.is_valid()); | |
4423 // Check if the object is a JS array or not. | |
4424 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg()); | |
4425 value.Unuse(); | |
4426 temp.Unuse(); | |
4427 destination()->Split(equal); | |
4428 } | |
4429 | |
4430 | |
4431 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { | |
4432 ASSERT(args->length() == 0); | |
4433 // ArgumentsAccessStub takes the parameter count as an input argument | |
4434 // in register eax. Create a constant result for it. | |
4435 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())), this); | |
4436 // Call the shared stub to get to the arguments.length. | |
4437 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH); | |
4438 Result result = frame_->CallStub(&stub, &count); | |
4439 frame_->Push(&result); | |
4440 } | |
4441 | |
4442 | |
4443 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { | |
4444 ASSERT(args->length() == 1); | |
4445 JumpTarget leave(this); | |
4446 Load(args->at(0)); // Load the object. | |
4447 frame_->Dup(); | |
4448 Result object = frame_->Pop(); | |
4449 object.ToRegister(); | |
4450 ASSERT(object.is_valid()); | |
4451 // if (object->IsSmi()) return object. | |
4452 __ test(object.reg(), Immediate(kSmiTagMask)); | |
4453 leave.Branch(zero, taken); | |
4454 // It is a heap object - get map. | |
4455 Result temp = allocator()->Allocate(); | |
4456 ASSERT(temp.is_valid()); | |
4457 // if (!object->IsJSValue()) return object. | |
4458 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg()); | |
4459 leave.Branch(not_equal, not_taken); | |
4460 __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset)); | |
4461 object.Unuse(); | |
4462 frame_->SetElementAt(0, &temp); | |
4463 leave.Bind(); | |
4464 } | |
4465 | |
4466 | |
4467 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) { | |
4468 ASSERT(args->length() == 2); | |
4469 JumpTarget leave(this); | |
4470 Load(args->at(0)); // Load the object. | |
4471 Load(args->at(1)); // Load the value. | |
4472 Result value = frame_->Pop(); | |
4473 Result object = frame_->Pop(); | |
4474 value.ToRegister(); | |
4475 object.ToRegister(); | |
4476 | |
4477 // if (object->IsSmi()) return value. | |
4478 __ test(object.reg(), Immediate(kSmiTagMask)); | |
4479 leave.Branch(zero, &value, taken); | |
4480 | |
4481 // It is a heap object - get its map. | |
4482 Result scratch = allocator_->Allocate(); | |
4483 ASSERT(scratch.is_valid()); | |
4484 // if (!object->IsJSValue()) return value. | |
4485 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg()); | |
4486 leave.Branch(not_equal, &value, not_taken); | |
4487 | |
4488 // Store the value. | |
4489 __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg()); | |
4490 // Update the write barrier. Save the value as it will be | |
4491 // overwritten by the write barrier code and is needed afterward. | |
4492 Result duplicate_value = allocator_->Allocate(); | |
4493 ASSERT(duplicate_value.is_valid()); | |
4494 __ mov(duplicate_value.reg(), value.reg()); | |
4495 // The object register is also overwritten by the write barrier and | |
4496 // possibly aliased in the frame. | |
4497 frame_->Spill(object.reg()); | |
4498 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(), | |
4499 scratch.reg()); | |
4500 object.Unuse(); | |
4501 scratch.Unuse(); | |
4502 duplicate_value.Unuse(); | |
4503 | |
4504 // Leave. | |
4505 leave.Bind(&value); | |
4506 frame_->Push(&value); | |
4507 } | |
4508 | |
4509 | |
4510 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) { | |
4511 ASSERT(args->length() == 1); | |
4512 | |
4513 // ArgumentsAccessStub expects the key in edx and the formal | |
4514 // parameter count in eax. | |
4515 Load(args->at(0)); | |
4516 Result key = frame_->Pop(); | |
4517 // Explicitly create a constant result. | |
4518 Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())), this); | |
4519 // Call the shared stub to get to arguments[key]. | |
4520 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); | |
4521 Result result = frame_->CallStub(&stub, &key, &count); | |
4522 frame_->Push(&result); | |
4523 } | |
4524 | |
4525 | |
4526 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) { | |
4527 ASSERT(args->length() == 2); | |
4528 | |
4529 // Load the two objects into registers and perform the comparison. | |
4530 Load(args->at(0)); | |
4531 Load(args->at(1)); | |
4532 Result right = frame_->Pop(); | |
4533 Result left = frame_->Pop(); | |
4534 right.ToRegister(); | |
4535 left.ToRegister(); | |
4536 __ cmp(right.reg(), Operand(left.reg())); | |
4537 right.Unuse(); | |
4538 left.Unuse(); | |
4539 destination()->Split(equal); | |
4540 } | |
4541 | |
4542 | |
4543 void CodeGenerator::VisitCallRuntime(CallRuntime* node) { | |
4544 if (CheckForInlineRuntimeCall(node)) { | |
4545 return; | |
4546 } | |
4547 | |
4548 ZoneList<Expression*>* args = node->arguments(); | |
4549 Comment cmnt(masm_, "[ CallRuntime"); | |
4550 Runtime::Function* function = node->function(); | |
4551 | |
4552 if (function == NULL) { | |
4553 // Prepare stack for calling JS runtime function. | |
4554 frame_->Push(node->name()); | |
4555 // Push the builtins object found in the current global object. | |
4556 Result temp = allocator()->Allocate(); | |
4557 ASSERT(temp.is_valid()); | |
4558 __ mov(temp.reg(), GlobalObject()); | |
4559 __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset)); | |
4560 frame_->Push(&temp); | |
4561 } | |
4562 | |
4563 // Push the arguments ("left-to-right"). | |
4564 int arg_count = args->length(); | |
4565 for (int i = 0; i < arg_count; i++) { | |
4566 Load(args->at(i)); | |
4567 } | |
4568 | |
4569 if (function == NULL) { | |
4570 // Call the JS runtime function. Pass 0 as the loop nesting depth | |
4571 // because we do not handle runtime calls specially in loops. | |
4572 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, 0); | |
4573 frame_->RestoreContextRegister(); | |
4574 frame_->SetElementAt(0, &answer); | |
4575 } else { | |
4576 // Call the C runtime function. | |
4577 Result answer = frame_->CallRuntime(function, arg_count); | |
4578 frame_->Push(&answer); | |
4579 } | |
4580 } | |
4581 | |
4582 | |
4583 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { | |
4584 // Note that because of NOT and an optimization in comparison of a typeof | |
4585 // expression to a literal string, this function can fail to leave a value | |
4586 // on top of the frame or in the cc register. | |
4587 Comment cmnt(masm_, "[ UnaryOperation"); | |
4588 | |
4589 Token::Value op = node->op(); | |
4590 | |
4591 if (op == Token::NOT) { | |
4592 // Swap the true and false targets but keep the same actual label | |
4593 // as the fall through. | |
4594 destination()->Invert(); | |
4595 LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true); | |
4596 // Swap the labels back. | |
4597 destination()->Invert(); | |
4598 | |
4599 } else if (op == Token::DELETE) { | |
4600 Property* property = node->expression()->AsProperty(); | |
4601 if (property != NULL) { | |
4602 Load(property->obj()); | |
4603 Load(property->key()); | |
4604 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2); | |
4605 frame_->Push(&answer); | |
4606 return; | |
4607 } | |
4608 | |
4609 Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); | |
4610 if (variable != NULL) { | |
4611 Slot* slot = variable->slot(); | |
4612 if (variable->is_global()) { | |
4613 LoadGlobal(); | |
4614 frame_->Push(variable->name()); | |
4615 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, | |
4616 CALL_FUNCTION, 2); | |
4617 frame_->Push(&answer); | |
4618 return; | |
4619 | |
4620 } else if (slot != NULL && slot->type() == Slot::LOOKUP) { | |
4621 // lookup the context holding the named variable | |
4622 frame_->Push(esi); | |
4623 frame_->Push(variable->name()); | |
4624 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2); | |
4625 frame_->Push(&context); | |
4626 frame_->Push(variable->name()); | |
4627 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, | |
4628 CALL_FUNCTION, 2); | |
4629 frame_->Push(&answer); | |
4630 return; | |
4631 } | |
4632 | |
4633 // Default: Result of deleting non-global, not dynamically | |
4634 // introduced variables is false. | |
4635 frame_->Push(Factory::false_value()); | |
4636 | |
4637 } else { | |
4638 // Default: Result of deleting expressions is true. | |
4639 Load(node->expression()); // may have side-effects | |
4640 frame_->SetElementAt(0, Factory::true_value()); | |
4641 } | |
4642 | |
4643 } else if (op == Token::TYPEOF) { | |
4644 // Special case for loading the typeof expression; see comment on | |
4645 // LoadTypeofExpression(). | |
4646 LoadTypeofExpression(node->expression()); | |
4647 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1); | |
4648 frame_->Push(&answer); | |
4649 | |
4650 } else if (op == Token::VOID) { | |
4651 Expression* expression = node->expression(); | |
4652 if (expression && expression->AsLiteral() && ( | |
4653 expression->AsLiteral()->IsTrue() || | |
4654 expression->AsLiteral()->IsFalse() || | |
4655 expression->AsLiteral()->handle()->IsNumber() || | |
4656 expression->AsLiteral()->handle()->IsString() || | |
4657 expression->AsLiteral()->handle()->IsJSRegExp() || | |
4658 expression->AsLiteral()->IsNull())) { | |
4659 // Omit evaluating the value of the primitive literal. | |
4660 // It will be discarded anyway, and can have no side effect. | |
4661 frame_->Push(Factory::undefined_value()); | |
4662 } else { | |
4663 Load(node->expression()); | |
4664 frame_->SetElementAt(0, Factory::undefined_value()); | |
4665 } | |
4666 | |
4667 } else { | |
4668 Load(node->expression()); | |
4669 switch (op) { | |
4670 case Token::NOT: | |
4671 case Token::DELETE: | |
4672 case Token::TYPEOF: | |
4673 UNREACHABLE(); // handled above | |
4674 break; | |
4675 | |
4676 case Token::SUB: { | |
4677 UnarySubStub stub; | |
4678 // TODO(1222589): remove dependency of TOS being cached inside stub | |
4679 Result operand = frame_->Pop(); | |
4680 Result answer = frame_->CallStub(&stub, &operand); | |
4681 frame_->Push(&answer); | |
4682 break; | |
4683 } | |
4684 | |
4685 case Token::BIT_NOT: { | |
4686 // Smi check. | |
4687 JumpTarget smi_label(this); | |
4688 JumpTarget continue_label(this); | |
4689 Result operand = frame_->Pop(); | |
4690 operand.ToRegister(); | |
4691 __ test(operand.reg(), Immediate(kSmiTagMask)); | |
4692 smi_label.Branch(zero, &operand, taken); | |
4693 | |
4694 frame_->Push(&operand); // undo popping of TOS | |
4695 Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT, | |
4696 CALL_FUNCTION, 1); | |
4697 | |
4698 continue_label.Jump(&answer); | |
4699 smi_label.Bind(&answer); | |
4700 answer.ToRegister(); | |
4701 frame_->Spill(answer.reg()); | |
4702 __ not_(answer.reg()); | |
4703 __ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag. | |
4704 continue_label.Bind(&answer); | |
4705 frame_->Push(&answer); | |
4706 break; | |
4707 } | |
4708 | |
4709 case Token::ADD: { | |
4710 // Smi check. | |
4711 JumpTarget continue_label(this); | |
4712 Result operand = frame_->Pop(); | |
4713 operand.ToRegister(); | |
4714 __ test(operand.reg(), Immediate(kSmiTagMask)); | |
4715 continue_label.Branch(zero, &operand, taken); | |
4716 | |
4717 frame_->Push(&operand); | |
4718 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER, | |
4719 CALL_FUNCTION, 1); | |
4720 | |
4721 continue_label.Bind(&answer); | |
4722 frame_->Push(&answer); | |
4723 break; | |
4724 } | |
4725 | |
4726 default: | |
4727 UNREACHABLE(); | |
4728 } | |
4729 } | |
4730 } | |
4731 | |
4732 | |
4733 class DeferredCountOperation: public DeferredCode { | |
4734 public: | |
4735 DeferredCountOperation(CodeGenerator* generator, | |
4736 bool is_postfix, | |
4737 bool is_increment, | |
4738 int target_size) | |
4739 : DeferredCode(generator), | |
4740 is_postfix_(is_postfix), | |
4741 is_increment_(is_increment), | |
4742 target_size_(target_size) { | |
4743 set_comment("[ DeferredCountOperation"); | |
4744 } | |
4745 | |
4746 virtual void Generate(); | |
4747 | |
4748 private: | |
4749 bool is_postfix_; | |
4750 bool is_increment_; | |
4751 int target_size_; | |
4752 }; | |
4753 | |
4754 | |
4755 void DeferredCountOperation::Generate() { | |
4756 CodeGenerator* cgen = generator(); | |
4757 Result value(cgen); | |
4758 enter()->Bind(&value); | |
4759 VirtualFrame* frame = cgen->frame(); | |
4760 // Undo the optimistic smi operation. | |
4761 value.ToRegister(); | |
4762 frame->Spill(value.reg()); | |
4763 if (is_increment_) { | |
4764 __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1))); | |
4765 } else { | |
4766 __ add(Operand(value.reg()), Immediate(Smi::FromInt(1))); | |
4767 } | |
4768 frame->Push(&value); | |
4769 value = frame->InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION, 1); | |
4770 frame->Push(&value); | |
4771 if (is_postfix_) { // Fix up copy of old value with ToNumber(value). | |
4772 // This is only safe because VisitCountOperation makes this frame slot | |
4773 // beneath the reference a register, which is spilled at the above call. | |
4774 // We cannot safely write to constants or copies below the water line. | |
4775 frame->StoreToElementAt(target_size_ + 1); | |
4776 } | |
4777 frame->Push(Smi::FromInt(1)); | |
4778 if (is_increment_) { | |
4779 value = frame->CallRuntime(Runtime::kNumberAdd, 2); | |
4780 } else { | |
4781 value = frame->CallRuntime(Runtime::kNumberSub, 2); | |
4782 } | |
4783 exit_.Jump(&value); | |
4784 } | |
4785 | |
4786 | |
4787 void CodeGenerator::VisitCountOperation(CountOperation* node) { | |
4788 Comment cmnt(masm_, "[ CountOperation"); | |
4789 | |
4790 bool is_postfix = node->is_postfix(); | |
4791 bool is_increment = node->op() == Token::INC; | |
4792 | |
4793 Variable* var = node->expression()->AsVariableProxy()->AsVariable(); | |
4794 bool is_const = (var != NULL && var->mode() == Variable::CONST); | |
4795 | |
4796 // Postfix operators need a stack slot under the reference to hold | |
4797 // the old value while the new one is being stored. | |
4798 if (is_postfix) { | |
4799 frame_->Push(Smi::FromInt(0)); | |
4800 } | |
4801 | |
4802 { Reference target(this, node->expression()); | |
4803 if (target.is_illegal()) { | |
4804 // Spoof the virtual frame to have the expected height (one higher | |
4805 // than on entry). | |
4806 if (!is_postfix) { | |
4807 frame_->Push(Smi::FromInt(0)); | |
4808 } | |
4809 return; | |
4810 } | |
4811 target.TakeValue(NOT_INSIDE_TYPEOF); | |
4812 | |
4813 DeferredCountOperation* deferred = | |
4814 new DeferredCountOperation(this, is_postfix, | |
4815 is_increment, target.size()); | |
4816 | |
4817 Result value = frame_->Pop(); | |
4818 value.ToRegister(); | |
4819 | |
4820 // Postfix: Store the old value as the result. | |
4821 if (is_postfix) { | |
4822 // Explicitly back the slot for the old value with a new register. | |
4823 // This improves performance in some cases. | |
4824 Result old_value = allocator_->Allocate(); | |
4825 ASSERT(old_value.is_valid()); | |
4826 __ mov(old_value.reg(), value.reg()); | |
4827 // SetElement must not create a constant element or a copy in this slot, | |
4828 // since we will write to it, below the waterline, in deferred code. | |
4829 frame_->SetElementAt(target.size(), &old_value); | |
4830 } | |
4831 | |
4832 // Perform optimistic increment/decrement. Ensure the value is | |
4833 // writable. | |
4834 frame_->Spill(value.reg()); | |
4835 ASSERT(allocator_->count(value.reg()) == 1); | |
4836 | |
4837 // In order to combine the overflow and the smi check, we need to | |
4838 // be able to allocate a byte register. We attempt to do so | |
4839 // without spilling. If we fail, we will generate separate | |
4840 // overflow and smi checks. | |
4841 // | |
4842 // We need to allocate and clear the temporary byte register | |
4843 // before performing the count operation since clearing the | |
4844 // register using xor will clear the overflow flag. | |
4845 Result tmp = allocator_->AllocateByteRegisterWithoutSpilling(); | |
4846 if (tmp.is_valid()) { | |
4847 __ Set(tmp.reg(), Immediate(0)); | |
4848 } | |
4849 | |
4850 if (is_increment) { | |
4851 __ add(Operand(value.reg()), Immediate(Smi::FromInt(1))); | |
4852 } else { | |
4853 __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1))); | |
4854 } | |
4855 | |
4856 // If the count operation didn't overflow and the result is a | |
4857 // valid smi, we're done. Otherwise, we jump to the deferred | |
4858 // slow-case code. | |
4859 // | |
4860 // We combine the overflow and the smi check if we could | |
4861 // successfully allocate a temporary byte register. | |
4862 if (tmp.is_valid()) { | |
4863 __ setcc(overflow, tmp.reg()); | |
4864 __ or_(Operand(value.reg()), tmp.reg()); | |
4865 tmp.Unuse(); | |
4866 __ test(value.reg(), Immediate(kSmiTagMask)); | |
4867 deferred->enter()->Branch(not_zero, &value, not_taken); | |
4868 } else { // Otherwise we test separately for overflow and smi check. | |
4869 deferred->enter()->Branch(overflow, &value, not_taken); | |
4870 __ test(value.reg(), Immediate(kSmiTagMask)); | |
4871 deferred->enter()->Branch(not_zero, &value, not_taken); | |
4872 } | |
4873 | |
4874 // Store the new value in the target if not const. | |
4875 deferred->BindExit(&value); | |
4876 frame_->Push(&value); | |
4877 if (!is_const) { | |
4878 target.SetValue(NOT_CONST_INIT); | |
4879 } | |
4880 } | |
4881 | |
4882 // Postfix: Discard the new value and use the old. | |
4883 if (is_postfix) { | |
4884 frame_->Drop(); | |
4885 } | |
4886 } | |
4887 | |
4888 | |
4889 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { | |
4890 // Note that due to an optimization in comparison operations (typeof | |
4891 // compared to a string literal), we can evaluate a binary expression such | |
4892 // as AND or OR and not leave a value on the frame or in the cc register. | |
4893 Comment cmnt(masm_, "[ BinaryOperation"); | |
4894 Token::Value op = node->op(); | |
4895 | |
4896 // According to ECMA-262 section 11.11, page 58, the binary logical | |
4897 // operators must yield the result of one of the two expressions | |
4898 // before any ToBoolean() conversions. This means that the value | |
4899 // produced by a && or || operator is not necessarily a boolean. | |
4900 | |
4901 // NOTE: If the left hand side produces a materialized value (not | |
4902 // control flow), we force the right hand side to do the same. This | |
4903 // is necessary because we assume that if we get control flow on the | |
4904 // last path out of an expression we got it on all paths. | |
4905 if (op == Token::AND) { | |
4906 JumpTarget is_true(this); | |
4907 ControlDestination dest(&is_true, destination()->false_target(), true); | |
4908 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false); | |
4909 | |
4910 if (dest.false_was_fall_through()) { | |
4911 // The current false target was used as the fall-through. If | |
4912 // there are no dangling jumps to is_true then the left | |
4913 // subexpression was unconditionally false. Otherwise we have | |
4914 // paths where we do have to evaluate the right subexpression. | |
4915 if (is_true.is_linked()) { | |
4916 // We need to compile the right subexpression. If the jump to | |
4917 // the current false target was a forward jump then we have a | |
4918 // valid frame, we have just bound the false target, and we | |
4919 // have to jump around the code for the right subexpression. | |
4920 if (has_valid_frame()) { | |
4921 destination()->false_target()->Unuse(); | |
4922 destination()->false_target()->Jump(); | |
4923 } | |
4924 is_true.Bind(); | |
4925 // The left subexpression compiled to control flow, so the | |
4926 // right one is free to do so as well. | |
4927 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); | |
4928 } else { | |
4929 // We have actually just jumped to or bound the current false | |
4930 // target but the current control destination is not marked as | |
4931 // used. | |
4932 destination()->Use(false); | |
4933 } | |
4934 | |
4935 } else if (dest.is_used()) { | |
4936 // The left subexpression compiled to control flow (and is_true | |
4937 // was just bound), so the right is free to do so as well. | |
4938 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); | |
4939 | |
4940 } else { | |
4941 // We have a materialized value on the frame, so we exit with | |
4942 // one on all paths. There are possibly also jumps to is_true | |
4943 // from nested subexpressions. | |
4944 JumpTarget pop_and_continue(this); | |
4945 JumpTarget exit(this); | |
4946 | |
4947 // Avoid popping the result if it converts to 'false' using the | |
4948 // standard ToBoolean() conversion as described in ECMA-262, | |
4949 // section 9.2, page 30. | |
4950 // | |
4951 // Duplicate the TOS value. The duplicate will be popped by | |
4952 // ToBoolean. | |
4953 frame_->Dup(); | |
4954 ControlDestination dest(&pop_and_continue, &exit, true); | |
4955 ToBoolean(&dest); | |
4956 | |
4957 // Pop the result of evaluating the first part. | |
4958 frame_->Drop(); | |
4959 | |
4960 // Compile right side expression. | |
4961 is_true.Bind(); | |
4962 Load(node->right()); | |
4963 | |
4964 // Exit (always with a materialized value). | |
4965 exit.Bind(); | |
4966 } | |
4967 | |
4968 } else if (op == Token::OR) { | |
4969 JumpTarget is_false(this); | |
4970 ControlDestination dest(destination()->true_target(), &is_false, false); | |
4971 LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false); | |
4972 | |
4973 if (dest.true_was_fall_through()) { | |
4974 // The current true target was used as the fall-through. If | |
4975 // there are no dangling jumps to is_false then the left | |
4976 // subexpression was unconditionally true. Otherwise we have | |
4977 // paths where we do have to evaluate the right subexpression. | |
4978 if (is_false.is_linked()) { | |
4979 // We need to compile the right subexpression. If the jump to | |
4980 // the current true target was a forward jump then we have a | |
4981 // valid frame, we have just bound the true target, and we | |
4982 // have to jump around the code for the right subexpression. | |
4983 if (has_valid_frame()) { | |
4984 destination()->true_target()->Unuse(); | |
4985 destination()->true_target()->Jump(); | |
4986 } | |
4987 is_false.Bind(); | |
4988 // The left subexpression compiled to control flow, so the | |
4989 // right one is free to do so as well. | |
4990 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); | |
4991 } else { | |
4992 // We have just jumped to or bound the current true target but | |
4993 // the current control destination is not marked as used. | |
4994 destination()->Use(true); | |
4995 } | |
4996 | |
4997 } else if (dest.is_used()) { | |
4998 // The left subexpression compiled to control flow (and is_false | |
4999 // was just bound), so the right is free to do so as well. | |
5000 LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); | |
5001 | |
5002 } else { | |
5003 // We have a materialized value on the frame, so we exit with | |
5004 // one on all paths. There are possibly also jumps to is_false | |
5005 // from nested subexpressions. | |
5006 JumpTarget pop_and_continue(this); | |
5007 JumpTarget exit(this); | |
5008 | |
5009 // Avoid popping the result if it converts to 'true' using the | |
5010 // standard ToBoolean() conversion as described in ECMA-262, | |
5011 // section 9.2, page 30. | |
5012 // | |
5013 // Duplicate the TOS value. The duplicate will be popped by | |
5014 // ToBoolean. | |
5015 frame_->Dup(); | |
5016 ControlDestination dest(&exit, &pop_and_continue, false); | |
5017 ToBoolean(&dest); | |
5018 | |
5019 // Pop the result of evaluating the first part. | |
5020 frame_->Drop(); | |
5021 | |
5022 // Compile right side expression. | |
5023 is_false.Bind(); | |
5024 Load(node->right()); | |
5025 | |
5026 // Exit (always with a materialized value). | |
5027 exit.Bind(); | |
5028 } | |
5029 | |
5030 } else { | |
5031 // NOTE: The code below assumes that the slow cases (calls to runtime) | |
5032 // never return a constant/immutable object. | |
5033 OverwriteMode overwrite_mode = NO_OVERWRITE; | |
5034 if (node->left()->AsBinaryOperation() != NULL && | |
5035 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { | |
5036 overwrite_mode = OVERWRITE_LEFT; | |
5037 } else if (node->right()->AsBinaryOperation() != NULL && | |
5038 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { | |
5039 overwrite_mode = OVERWRITE_RIGHT; | |
5040 } | |
5041 | |
5042 Load(node->left()); | |
5043 Load(node->right()); | |
5044 GenericBinaryOperation(node->op(), node->type(), overwrite_mode); | |
5045 } | |
5046 } | |
5047 | |
5048 | |
5049 void CodeGenerator::VisitThisFunction(ThisFunction* node) { | |
5050 frame_->PushFunction(); | |
5051 } | |
5052 | |
5053 | |
5054 class InstanceofStub: public CodeStub { | |
5055 public: | |
5056 InstanceofStub() { } | |
5057 | |
5058 void Generate(MacroAssembler* masm); | |
5059 | |
5060 private: | |
5061 Major MajorKey() { return Instanceof; } | |
5062 int MinorKey() { return 0; } | |
5063 }; | |
5064 | |
5065 | |
5066 void CodeGenerator::VisitCompareOperation(CompareOperation* node) { | |
5067 Comment cmnt(masm_, "[ CompareOperation"); | |
5068 | |
5069 // Get the expressions from the node. | |
5070 Expression* left = node->left(); | |
5071 Expression* right = node->right(); | |
5072 Token::Value op = node->op(); | |
5073 // To make typeof testing for natives implemented in JavaScript really | |
5074 // efficient, we generate special code for expressions of the form: | |
5075 // 'typeof <expression> == <string>'. | |
5076 UnaryOperation* operation = left->AsUnaryOperation(); | |
5077 if ((op == Token::EQ || op == Token::EQ_STRICT) && | |
5078 (operation != NULL && operation->op() == Token::TYPEOF) && | |
5079 (right->AsLiteral() != NULL && | |
5080 right->AsLiteral()->handle()->IsString())) { | |
5081 Handle<String> check(String::cast(*right->AsLiteral()->handle())); | |
5082 | |
5083 // Load the operand and move it to a register. | |
5084 LoadTypeofExpression(operation->expression()); | |
5085 Result answer = frame_->Pop(); | |
5086 answer.ToRegister(); | |
5087 | |
5088 if (check->Equals(Heap::number_symbol())) { | |
5089 __ test(answer.reg(), Immediate(kSmiTagMask)); | |
5090 destination()->true_target()->Branch(zero); | |
5091 frame_->Spill(answer.reg()); | |
5092 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
5093 __ cmp(answer.reg(), Factory::heap_number_map()); | |
5094 answer.Unuse(); | |
5095 destination()->Split(equal); | |
5096 | |
5097 } else if (check->Equals(Heap::string_symbol())) { | |
5098 __ test(answer.reg(), Immediate(kSmiTagMask)); | |
5099 destination()->false_target()->Branch(zero); | |
5100 | |
5101 // It can be an undetectable string object. | |
5102 Result temp = allocator()->Allocate(); | |
5103 ASSERT(temp.is_valid()); | |
5104 __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
5105 __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset)); | |
5106 __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable)); | |
5107 destination()->false_target()->Branch(not_zero); | |
5108 __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
5109 __ movzx_b(temp.reg(), | |
5110 FieldOperand(temp.reg(), Map::kInstanceTypeOffset)); | |
5111 __ cmp(temp.reg(), FIRST_NONSTRING_TYPE); | |
5112 temp.Unuse(); | |
5113 answer.Unuse(); | |
5114 destination()->Split(less); | |
5115 | |
5116 } else if (check->Equals(Heap::boolean_symbol())) { | |
5117 __ cmp(answer.reg(), Factory::true_value()); | |
5118 destination()->true_target()->Branch(equal); | |
5119 __ cmp(answer.reg(), Factory::false_value()); | |
5120 answer.Unuse(); | |
5121 destination()->Split(equal); | |
5122 | |
5123 } else if (check->Equals(Heap::undefined_symbol())) { | |
5124 __ cmp(answer.reg(), Factory::undefined_value()); | |
5125 destination()->true_target()->Branch(equal); | |
5126 | |
5127 __ test(answer.reg(), Immediate(kSmiTagMask)); | |
5128 destination()->false_target()->Branch(zero); | |
5129 | |
5130 // It can be an undetectable object. | |
5131 frame_->Spill(answer.reg()); | |
5132 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
5133 __ movzx_b(answer.reg(), | |
5134 FieldOperand(answer.reg(), Map::kBitFieldOffset)); | |
5135 __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable)); | |
5136 answer.Unuse(); | |
5137 destination()->Split(not_zero); | |
5138 | |
5139 } else if (check->Equals(Heap::function_symbol())) { | |
5140 __ test(answer.reg(), Immediate(kSmiTagMask)); | |
5141 destination()->false_target()->Branch(zero); | |
5142 frame_->Spill(answer.reg()); | |
5143 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); | |
5144 answer.Unuse(); | |
5145 destination()->Split(equal); | |
5146 | |
5147 } else if (check->Equals(Heap::object_symbol())) { | |
5148 __ test(answer.reg(), Immediate(kSmiTagMask)); | |
5149 destination()->false_target()->Branch(zero); | |
5150 __ cmp(answer.reg(), Factory::null_value()); | |
5151 destination()->true_target()->Branch(equal); | |
5152 | |
5153 // It can be an undetectable object. | |
5154 Result map = allocator()->Allocate(); | |
5155 ASSERT(map.is_valid()); | |
5156 __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
5157 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset)); | |
5158 __ test(map.reg(), Immediate(1 << Map::kIsUndetectable)); | |
5159 destination()->false_target()->Branch(not_zero); | |
5160 __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
5161 __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset)); | |
5162 __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE); | |
5163 destination()->false_target()->Branch(less); | |
5164 __ cmp(map.reg(), LAST_JS_OBJECT_TYPE); | |
5165 answer.Unuse(); | |
5166 map.Unuse(); | |
5167 destination()->Split(less_equal); | |
5168 } else { | |
5169 // Uncommon case: typeof testing against a string literal that is | |
5170 // never returned from the typeof operator. | |
5171 answer.Unuse(); | |
5172 destination()->Goto(false); | |
5173 } | |
5174 return; | |
5175 } | |
5176 | |
5177 Condition cc = no_condition; | |
5178 bool strict = false; | |
5179 switch (op) { | |
5180 case Token::EQ_STRICT: | |
5181 strict = true; | |
5182 // Fall through | |
5183 case Token::EQ: | |
5184 cc = equal; | |
5185 break; | |
5186 case Token::LT: | |
5187 cc = less; | |
5188 break; | |
5189 case Token::GT: | |
5190 cc = greater; | |
5191 break; | |
5192 case Token::LTE: | |
5193 cc = less_equal; | |
5194 break; | |
5195 case Token::GTE: | |
5196 cc = greater_equal; | |
5197 break; | |
5198 case Token::IN: { | |
5199 Load(left); | |
5200 Load(right); | |
5201 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2); | |
5202 frame_->Push(&answer); // push the result | |
5203 return; | |
5204 } | |
5205 case Token::INSTANCEOF: { | |
5206 Load(left); | |
5207 Load(right); | |
5208 InstanceofStub stub; | |
5209 Result answer = frame_->CallStub(&stub, 2); | |
5210 answer.ToRegister(); | |
5211 __ test(answer.reg(), Operand(answer.reg())); | |
5212 answer.Unuse(); | |
5213 destination()->Split(zero); | |
5214 return; | |
5215 } | |
5216 default: | |
5217 UNREACHABLE(); | |
5218 } | |
5219 Load(left); | |
5220 Load(right); | |
5221 Comparison(cc, strict, destination()); | |
5222 } | |
5223 | |
5224 | |
5225 #ifdef DEBUG | |
5226 bool CodeGenerator::HasValidEntryRegisters() { | |
5227 return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0)) | |
5228 && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0)) | |
5229 && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0)) | |
5230 && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0)) | |
5231 && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0)); | |
5232 } | |
5233 #endif | |
5234 | |
5235 | |
5236 class DeferredReferenceGetKeyedValue: public DeferredCode { | |
5237 public: | |
5238 DeferredReferenceGetKeyedValue(CodeGenerator* generator, bool is_global) | |
5239 : DeferredCode(generator), is_global_(is_global) { | |
5240 set_comment("[ DeferredReferenceGetKeyedValue"); | |
5241 } | |
5242 | |
5243 virtual void Generate(); | |
5244 | |
5245 Label* patch_site() { return &patch_site_; } | |
5246 | |
5247 private: | |
5248 Label patch_site_; | |
5249 bool is_global_; | |
5250 }; | |
5251 | |
5252 | |
5253 void DeferredReferenceGetKeyedValue::Generate() { | |
5254 CodeGenerator* cgen = generator(); | |
5255 Result receiver(cgen); | |
5256 Result key(cgen); | |
5257 enter()->Bind(&receiver, &key); | |
5258 cgen->frame()->Push(&receiver); // First IC argument. | |
5259 cgen->frame()->Push(&key); // Second IC argument. | |
5260 | |
5261 // Calculate the delta from the IC call instruction to the map check | |
5262 // cmp instruction in the inlined version. This delta is stored in | |
5263 // a test(eax, delta) instruction after the call so that we can find | |
5264 // it in the IC initialization code and patch the cmp instruction. | |
5265 // This means that we cannot allow test instructions after calls to | |
5266 // KeyedLoadIC stubs in other places. | |
5267 RelocInfo::Mode mode = is_global_ | |
5268 ? RelocInfo::CODE_TARGET_CONTEXT | |
5269 : RelocInfo::CODE_TARGET; | |
5270 Result value = cgen->frame()->CallKeyedLoadIC(mode); | |
5271 // The result needs to be specifically the eax register because the | |
5272 // offset to the patch site will be expected in a test eax | |
5273 // instruction. | |
5274 ASSERT(value.is_register() && value.reg().is(eax)); | |
5275 // The delta from the start of the map-compare instruction to the | |
5276 // test eax instruction. We use masm_ directly here instead of the | |
5277 // double underscore macro because the macro sometimes uses macro | |
5278 // expansion to turn into something that can't return a value. This | |
5279 // is encountered when doing generated code coverage tests. | |
5280 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | |
5281 __ test(value.reg(), Immediate(-delta_to_patch_site)); | |
5282 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); | |
5283 | |
5284 // The receiver and key were spilled by the call, so their state as | |
5285 // constants or copies has been changed. Thus, they need to be | |
5286 // "mergable" in the block at the exit label and are therefore | |
5287 // passed as return results here. | |
5288 key = cgen->frame()->Pop(); | |
5289 receiver = cgen->frame()->Pop(); | |
5290 exit_.Jump(&receiver, &key, &value); | |
5291 } | |
5292 | |
5293 | |
5294 #undef __ | |
5295 #define __ ACCESS_MASM(masm) | |
5296 | |
5297 Handle<String> Reference::GetName() { | |
5298 ASSERT(type_ == NAMED); | |
5299 Property* property = expression_->AsProperty(); | |
5300 if (property == NULL) { | |
5301 // Global variable reference treated as a named property reference. | |
5302 VariableProxy* proxy = expression_->AsVariableProxy(); | |
5303 ASSERT(proxy->AsVariable() != NULL); | |
5304 ASSERT(proxy->AsVariable()->is_global()); | |
5305 return proxy->name(); | |
5306 } else { | |
5307 Literal* raw_name = property->key()->AsLiteral(); | |
5308 ASSERT(raw_name != NULL); | |
5309 return Handle<String>(String::cast(*raw_name->handle())); | |
5310 } | |
5311 } | |
5312 | |
5313 | |
5314 void Reference::GetValue(TypeofState typeof_state) { | |
5315 ASSERT(!cgen_->in_spilled_code()); | |
5316 ASSERT(cgen_->HasValidEntryRegisters()); | |
5317 ASSERT(!is_illegal()); | |
5318 MacroAssembler* masm = cgen_->masm(); | |
5319 switch (type_) { | |
5320 case SLOT: { | |
5321 Comment cmnt(masm, "[ Load from Slot"); | |
5322 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); | |
5323 ASSERT(slot != NULL); | |
5324 cgen_->LoadFromSlot(slot, typeof_state); | |
5325 break; | |
5326 } | |
5327 | |
5328 case NAMED: { | |
5329 // TODO(1241834): Make sure that it is safe to ignore the | |
5330 // distinction between expressions in a typeof and not in a | |
5331 // typeof. If there is a chance that reference errors can be | |
5332 // thrown below, we must distinguish between the two kinds of | |
5333 // loads (typeof expression loads must not throw a reference | |
5334 // error). | |
5335 Comment cmnt(masm, "[ Load from named Property"); | |
5336 cgen_->frame()->Push(GetName()); | |
5337 | |
5338 Variable* var = expression_->AsVariableProxy()->AsVariable(); | |
5339 ASSERT(var == NULL || var->is_global()); | |
5340 RelocInfo::Mode mode = (var == NULL) | |
5341 ? RelocInfo::CODE_TARGET | |
5342 : RelocInfo::CODE_TARGET_CONTEXT; | |
5343 Result answer = cgen_->frame()->CallLoadIC(mode); | |
5344 cgen_->frame()->Push(&answer); | |
5345 break; | |
5346 } | |
5347 | |
5348 case KEYED: { | |
5349 // TODO(1241834): Make sure that this it is safe to ignore the | |
5350 // distinction between expressions in a typeof and not in a typeof. | |
5351 Comment cmnt(masm, "[ Load from keyed Property"); | |
5352 Variable* var = expression_->AsVariableProxy()->AsVariable(); | |
5353 bool is_global = var != NULL; | |
5354 ASSERT(!is_global || var->is_global()); | |
5355 // Inline array load code if inside of a loop. We do not know | |
5356 // the receiver map yet, so we initially generate the code with | |
5357 // a check against an invalid map. In the inline cache code, we | |
5358 // patch the map check if appropriate. | |
5359 if (cgen_->loop_nesting() > 0) { | |
5360 Comment cmnt(masm, "[ Inlined array index load"); | |
5361 DeferredReferenceGetKeyedValue* deferred = | |
5362 new DeferredReferenceGetKeyedValue(cgen_, is_global); | |
5363 | |
5364 Result key = cgen_->frame()->Pop(); | |
5365 Result receiver = cgen_->frame()->Pop(); | |
5366 key.ToRegister(); | |
5367 receiver.ToRegister(); | |
5368 | |
5369 // Check that the receiver is not a smi (only needed if this | |
5370 // is not a load from the global context) and that it has the | |
5371 // expected map. | |
5372 if (!is_global) { | |
5373 __ test(receiver.reg(), Immediate(kSmiTagMask)); | |
5374 deferred->enter()->Branch(zero, &receiver, &key, not_taken); | |
5375 } | |
5376 | |
5377 // Initially, use an invalid map. The map is patched in the IC | |
5378 // initialization code. | |
5379 __ bind(deferred->patch_site()); | |
5380 // Use masm-> here instead of the double underscore macro since extra | |
5381 // coverage code can interfere with the patching. | |
5382 masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), | |
5383 Immediate(Factory::null_value())); | |
5384 deferred->enter()->Branch(not_equal, &receiver, &key, not_taken); | |
5385 | |
5386 // Check that the key is a smi. | |
5387 __ test(key.reg(), Immediate(kSmiTagMask)); | |
5388 deferred->enter()->Branch(not_zero, &receiver, &key, not_taken); | |
5389 | |
5390 // Get the elements array from the receiver and check that it | |
5391 // is not a dictionary. | |
5392 Result elements = cgen_->allocator()->Allocate(); | |
5393 ASSERT(elements.is_valid()); | |
5394 __ mov(elements.reg(), | |
5395 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); | |
5396 __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), | |
5397 Immediate(Factory::hash_table_map())); | |
5398 deferred->enter()->Branch(equal, &receiver, &key, not_taken); | |
5399 | |
5400 // Shift the key to get the actual index value and check that | |
5401 // it is within bounds. | |
5402 Result index = cgen_->allocator()->Allocate(); | |
5403 ASSERT(index.is_valid()); | |
5404 __ mov(index.reg(), key.reg()); | |
5405 __ sar(index.reg(), kSmiTagSize); | |
5406 __ cmp(index.reg(), | |
5407 FieldOperand(elements.reg(), Array::kLengthOffset)); | |
5408 deferred->enter()->Branch(above_equal, &receiver, &key, not_taken); | |
5409 | |
5410 // Load and check that the result is not the hole. We could | |
5411 // reuse the index or elements register for the value. | |
5412 // | |
5413 // TODO(206): Consider whether it makes sense to try some | |
5414 // heuristic about which register to reuse. For example, if | |
5415 // one is eax, the we can reuse that one because the value | |
5416 // coming from the deferred code will be in eax. | |
5417 Result value = index; | |
5418 __ mov(value.reg(), Operand(elements.reg(), | |
5419 index.reg(), | |
5420 times_4, | |
5421 Array::kHeaderSize - kHeapObjectTag)); | |
5422 elements.Unuse(); | |
5423 index.Unuse(); | |
5424 __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value())); | |
5425 deferred->enter()->Branch(equal, &receiver, &key, not_taken); | |
5426 __ IncrementCounter(&Counters::keyed_load_inline, 1); | |
5427 | |
5428 // Restore the receiver and key to the frame and push the | |
5429 // result on top of it. | |
5430 deferred->BindExit(&receiver, &key, &value); | |
5431 cgen_->frame()->Push(&receiver); | |
5432 cgen_->frame()->Push(&key); | |
5433 cgen_->frame()->Push(&value); | |
5434 | |
5435 } else { | |
5436 Comment cmnt(masm, "[ Load from keyed Property"); | |
5437 RelocInfo::Mode mode = is_global | |
5438 ? RelocInfo::CODE_TARGET_CONTEXT | |
5439 : RelocInfo::CODE_TARGET; | |
5440 Result answer = cgen_->frame()->CallKeyedLoadIC(mode); | |
5441 // Make sure that we do not have a test instruction after the | |
5442 // call. A test instruction after the call is used to | |
5443 // indicate that we have generated an inline version of the | |
5444 // keyed load. The explicit nop instruction is here because | |
5445 // the push that follows might be peep-hole optimized away. | |
5446 __ nop(); | |
5447 cgen_->frame()->Push(&answer); | |
5448 } | |
5449 break; | |
5450 } | |
5451 | |
5452 default: | |
5453 UNREACHABLE(); | |
5454 } | |
5455 } | |
5456 | |
5457 | |
5458 void Reference::TakeValue(TypeofState typeof_state) { | |
5459 // For non-constant frame-allocated slots, we invalidate the value in the | |
5460 // slot. For all others, we fall back on GetValue. | |
5461 ASSERT(!cgen_->in_spilled_code()); | |
5462 ASSERT(!is_illegal()); | |
5463 if (type_ != SLOT) { | |
5464 GetValue(typeof_state); | |
5465 return; | |
5466 } | |
5467 | |
5468 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); | |
5469 ASSERT(slot != NULL); | |
5470 if (slot->type() == Slot::LOOKUP || | |
5471 slot->type() == Slot::CONTEXT || | |
5472 slot->var()->mode() == Variable::CONST) { | |
5473 GetValue(typeof_state); | |
5474 return; | |
5475 } | |
5476 | |
5477 // Only non-constant, frame-allocated parameters and locals can reach | |
5478 // here. | |
5479 if (slot->type() == Slot::PARAMETER) { | |
5480 cgen_->frame()->TakeParameterAt(slot->index()); | |
5481 } else { | |
5482 ASSERT(slot->type() == Slot::LOCAL); | |
5483 cgen_->frame()->TakeLocalAt(slot->index()); | |
5484 } | |
5485 } | |
5486 | |
5487 | |
5488 void Reference::SetValue(InitState init_state) { | |
5489 ASSERT(cgen_->HasValidEntryRegisters()); | |
5490 ASSERT(!is_illegal()); | |
5491 switch (type_) { | |
5492 case SLOT: { | |
5493 Comment cmnt(cgen_->masm(), "[ Store to Slot"); | |
5494 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); | |
5495 ASSERT(slot != NULL); | |
5496 cgen_->StoreToSlot(slot, init_state); | |
5497 break; | |
5498 } | |
5499 | |
5500 case NAMED: { | |
5501 Comment cmnt(cgen_->masm(), "[ Store to named Property"); | |
5502 cgen_->frame()->Push(GetName()); | |
5503 Result answer = cgen_->frame()->CallStoreIC(); | |
5504 cgen_->frame()->Push(&answer); | |
5505 break; | |
5506 } | |
5507 | |
5508 case KEYED: { | |
5509 Comment cmnt(cgen_->masm(), "[ Store to keyed Property"); | |
5510 Result answer = cgen_->frame()->CallKeyedStoreIC(); | |
5511 cgen_->frame()->Push(&answer); | |
5512 break; | |
5513 } | |
5514 | |
5515 default: | |
5516 UNREACHABLE(); | |
5517 } | |
5518 } | |
5519 | |
5520 | |
5521 // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined). | |
5522 void ToBooleanStub::Generate(MacroAssembler* masm) { | |
5523 Label false_result, true_result, not_string; | |
5524 __ mov(eax, Operand(esp, 1 * kPointerSize)); | |
5525 | |
5526 // 'null' => false. | |
5527 __ cmp(eax, Factory::null_value()); | |
5528 __ j(equal, &false_result); | |
5529 | |
5530 // Get the map and type of the heap object. | |
5531 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); | |
5532 __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset)); | |
5533 | |
5534 // Undetectable => false. | |
5535 __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset)); | |
5536 __ and_(ebx, 1 << Map::kIsUndetectable); | |
5537 __ j(not_zero, &false_result); | |
5538 | |
5539 // JavaScript object => true. | |
5540 __ cmp(ecx, FIRST_JS_OBJECT_TYPE); | |
5541 __ j(above_equal, &true_result); | |
5542 | |
5543 // String value => false iff empty. | |
5544 __ cmp(ecx, FIRST_NONSTRING_TYPE); | |
5545 __ j(above_equal, ¬_string); | |
5546 __ and_(ecx, kStringSizeMask); | |
5547 __ cmp(ecx, kShortStringTag); | |
5548 __ j(not_equal, &true_result); // Empty string is always short. | |
5549 __ mov(edx, FieldOperand(eax, String::kLengthOffset)); | |
5550 __ shr(edx, String::kShortLengthShift); | |
5551 __ j(zero, &false_result); | |
5552 __ jmp(&true_result); | |
5553 | |
5554 __ bind(¬_string); | |
5555 // HeapNumber => false iff +0, -0, or NaN. | |
5556 __ cmp(edx, Factory::heap_number_map()); | |
5557 __ j(not_equal, &true_result); | |
5558 __ fldz(); | |
5559 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); | |
5560 __ fucompp(); | |
5561 __ push(eax); | |
5562 __ fnstsw_ax(); | |
5563 __ sahf(); | |
5564 __ pop(eax); | |
5565 __ j(zero, &false_result); | |
5566 // Fall through to |true_result|. | |
5567 | |
5568 // Return 1/0 for true/false in eax. | |
5569 __ bind(&true_result); | |
5570 __ mov(eax, 1); | |
5571 __ ret(1 * kPointerSize); | |
5572 __ bind(&false_result); | |
5573 __ mov(eax, 0); | |
5574 __ ret(1 * kPointerSize); | |
5575 } | |
5576 | |
5577 | |
5578 #undef __ | |
5579 #define __ ACCESS_MASM(masm_) | |
5580 | |
5581 Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, | |
5582 Result* right) { | |
5583 // Perform fast-case smi code for the operation (left <op> right) and | |
5584 // returns the result in a Result. | |
5585 // If any fast-case tests fail, it jumps to the slow-case deferred code, | |
5586 // which calls the binary operation stub, with the arguments (in registers) | |
5587 // on top of the frame. | |
5588 // Consumes its arguments (sets left and right to invalid and frees their | |
5589 // registers). | |
5590 | |
5591 left->ToRegister(); | |
5592 right->ToRegister(); | |
5593 // A newly allocated register answer is used to hold the answer. | |
5594 // The registers containing left and right are not modified in | |
5595 // most cases, so they usually don't need to be spilled in the fast case. | |
5596 Result answer = generator()->allocator()->Allocate(); | |
5597 | |
5598 ASSERT(answer.is_valid()); | |
5599 // Perform the smi check. | |
5600 if (left->reg().is(right->reg())) { | |
5601 __ test(left->reg(), Immediate(kSmiTagMask)); | |
5602 } else { | |
5603 __ mov(answer.reg(), left->reg()); | |
5604 __ or_(answer.reg(), Operand(right->reg())); | |
5605 ASSERT(kSmiTag == 0); // adjust zero check if not the case | |
5606 __ test(answer.reg(), Immediate(kSmiTagMask)); | |
5607 } | |
5608 enter()->Branch(not_zero, left, right, not_taken); | |
5609 | |
5610 // All operations start by copying the left argument into answer. | |
5611 __ mov(answer.reg(), left->reg()); | |
5612 switch (op_) { | |
5613 case Token::ADD: | |
5614 __ add(answer.reg(), Operand(right->reg())); // add optimistically | |
5615 enter()->Branch(overflow, left, right, not_taken); | |
5616 break; | |
5617 | |
5618 case Token::SUB: | |
5619 __ sub(answer.reg(), Operand(right->reg())); // subtract optimistically | |
5620 enter()->Branch(overflow, left, right, not_taken); | |
5621 break; | |
5622 | |
5623 case Token::MUL: { | |
5624 // If the smi tag is 0 we can just leave the tag on one operand. | |
5625 ASSERT(kSmiTag == 0); // adjust code below if not the case | |
5626 // Remove tag from the left operand (but keep sign). | |
5627 // Left hand operand has been copied into answer. | |
5628 __ sar(answer.reg(), kSmiTagSize); | |
5629 // Do multiplication of smis, leaving result in answer. | |
5630 __ imul(answer.reg(), Operand(right->reg())); | |
5631 // Go slow on overflows. | |
5632 enter()->Branch(overflow, left, right, not_taken); | |
5633 // Check for negative zero result. If product is zero, | |
5634 // and one argument is negative, go to slow case. | |
5635 // The frame is unchanged in this block, so local control flow can | |
5636 // use a Label rather than a JumpTarget. | |
5637 Label non_zero_result; | |
5638 __ test(answer.reg(), Operand(answer.reg())); | |
5639 __ j(not_zero, &non_zero_result, taken); | |
5640 __ mov(answer.reg(), left->reg()); | |
5641 __ or_(answer.reg(), Operand(right->reg())); | |
5642 enter()->Branch(negative, left, right, not_taken); | |
5643 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct. | |
5644 __ bind(&non_zero_result); | |
5645 break; | |
5646 } | |
5647 | |
5648 case Token::DIV: // Fall through. | |
5649 case Token::MOD: { | |
5650 // Div and mod use the registers eax and edx. Left and right must | |
5651 // be preserved, because the original operands are needed if we switch | |
5652 // to the slow case. Move them if either is in eax or edx. | |
5653 // The Result answer should be changed into an alias for eax. | |
5654 // Precondition: | |
5655 // The Results left and right are valid. They may be the same register, | |
5656 // and may be unspilled. The Result answer is valid and is distinct | |
5657 // from left and right, and is spilled. | |
5658 // The value in left is copied to answer. | |
5659 | |
5660 Result reg_eax = generator()->allocator()->Allocate(eax); | |
5661 Result reg_edx = generator()->allocator()->Allocate(edx); | |
5662 // These allocations may have failed, if one of left, right, or answer | |
5663 // is in register eax or edx. | |
5664 bool left_copied_to_eax = false; // We will make sure this becomes true. | |
5665 | |
5666 // Part 1: Get eax | |
5667 if (answer.reg().is(eax)) { | |
5668 reg_eax = answer; | |
5669 left_copied_to_eax = true; | |
5670 } else if (right->reg().is(eax) || left->reg().is(eax)) { | |
5671 // We need a non-edx register to move one or both of left and right to. | |
5672 // We use answer if it is not edx, otherwise we allocate one. | |
5673 if (answer.reg().is(edx)) { | |
5674 reg_edx = answer; | |
5675 answer = generator()->allocator()->Allocate(); | |
5676 ASSERT(answer.is_valid()); | |
5677 } | |
5678 | |
5679 if (left->reg().is(eax)) { | |
5680 reg_eax = *left; | |
5681 left_copied_to_eax = true; | |
5682 *left = answer; | |
5683 } | |
5684 if (right->reg().is(eax)) { | |
5685 reg_eax = *right; | |
5686 *right = answer; | |
5687 } | |
5688 __ mov(answer.reg(), eax); | |
5689 } | |
5690 // End of Part 1. | |
5691 // reg_eax is valid, and neither left nor right is in eax. | |
5692 ASSERT(reg_eax.is_valid()); | |
5693 ASSERT(!left->reg().is(eax)); | |
5694 ASSERT(!right->reg().is(eax)); | |
5695 | |
5696 // Part 2: Get edx | |
5697 // reg_edx is invalid if and only if either left, right, | |
5698 // or answer is in edx. If edx is valid, then either edx | |
5699 // was free, or it was answer, but answer was reallocated. | |
5700 if (answer.reg().is(edx)) { | |
5701 reg_edx = answer; | |
5702 } else if (right->reg().is(edx) || left->reg().is(edx)) { | |
5703 // Is answer used? | |
5704 if (answer.reg().is(eax) || answer.reg().is(left->reg()) || | |
5705 answer.reg().is(right->reg())) { | |
5706 answer = generator()->allocator()->Allocate(); | |
5707 ASSERT(answer.is_valid()); // We cannot hit both Allocate() calls. | |
5708 } | |
5709 if (left->reg().is(edx)) { | |
5710 reg_edx = *left; | |
5711 *left = answer; | |
5712 } | |
5713 if (right->reg().is(edx)) { | |
5714 reg_edx = *right; | |
5715 *right = answer; | |
5716 } | |
5717 __ mov(answer.reg(), edx); | |
5718 } | |
5719 // End of Part 2 | |
5720 ASSERT(reg_edx.is_valid()); | |
5721 ASSERT(!left->reg().is(eax)); | |
5722 ASSERT(!right->reg().is(eax)); | |
5723 | |
5724 answer = reg_eax; // May free answer, if it was never used. | |
5725 generator()->frame()->Spill(eax); | |
5726 if (!left_copied_to_eax) { | |
5727 __ mov(eax, left->reg()); | |
5728 left_copied_to_eax = true; | |
5729 } | |
5730 generator()->frame()->Spill(edx); | |
5731 | |
5732 // Postcondition: | |
5733 // reg_eax, reg_edx are valid, correct, and spilled. | |
5734 // reg_eax contains the value originally in left | |
5735 // left and right are not eax or edx. They may or may not be | |
5736 // spilled or distinct. | |
5737 // answer is an alias for reg_eax. | |
5738 | |
5739 // Sign extend eax into edx:eax. | |
5740 __ cdq(); | |
5741 // Check for 0 divisor. | |
5742 __ test(right->reg(), Operand(right->reg())); | |
5743 enter()->Branch(zero, left, right, not_taken); | |
5744 // Divide edx:eax by the right operand. | |
5745 __ idiv(right->reg()); | |
5746 if (op_ == Token::DIV) { | |
5747 // Check for negative zero result. If result is zero, and divisor | |
5748 // is negative, return a floating point negative zero. | |
5749 // The frame is unchanged in this block, so local control flow can | |
5750 // use a Label rather than a JumpTarget. | |
5751 Label non_zero_result; | |
5752 __ test(left->reg(), Operand(left->reg())); | |
5753 __ j(not_zero, &non_zero_result, taken); | |
5754 __ test(right->reg(), Operand(right->reg())); | |
5755 enter()->Branch(negative, left, right, not_taken); | |
5756 __ bind(&non_zero_result); | |
5757 // Check for the corner case of dividing the most negative smi | |
5758 // by -1. We cannot use the overflow flag, since it is not set | |
5759 // by idiv instruction. | |
5760 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | |
5761 __ cmp(eax, 0x40000000); | |
5762 enter()->Branch(equal, left, right, not_taken); | |
5763 // Check that the remainder is zero. | |
5764 __ test(edx, Operand(edx)); | |
5765 enter()->Branch(not_zero, left, right, not_taken); | |
5766 // Tag the result and store it in register temp. | |
5767 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
5768 __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag)); | |
5769 } else { | |
5770 ASSERT(op_ == Token::MOD); | |
5771 // Check for a negative zero result. If the result is zero, and the | |
5772 // dividend is negative, return a floating point negative zero. | |
5773 // The frame is unchanged in this block, so local control flow can | |
5774 // use a Label rather than a JumpTarget. | |
5775 Label non_zero_result; | |
5776 __ test(edx, Operand(edx)); | |
5777 __ j(not_zero, &non_zero_result, taken); | |
5778 __ test(left->reg(), Operand(left->reg())); | |
5779 enter()->Branch(negative, left, right, not_taken); | |
5780 __ bind(&non_zero_result); | |
5781 // The answer is in edx. | |
5782 answer = reg_edx; | |
5783 } | |
5784 break; | |
5785 } | |
5786 case Token::BIT_OR: | |
5787 __ or_(answer.reg(), Operand(right->reg())); | |
5788 break; | |
5789 | |
5790 case Token::BIT_AND: | |
5791 __ and_(answer.reg(), Operand(right->reg())); | |
5792 break; | |
5793 | |
5794 case Token::BIT_XOR: | |
5795 __ xor_(answer.reg(), Operand(right->reg())); | |
5796 break; | |
5797 | |
5798 case Token::SHL: | |
5799 case Token::SHR: | |
5800 case Token::SAR: | |
5801 // Move right into ecx. | |
5802 // Left is in two registers already, so even if left or answer is ecx, | |
5803 // we can move right to it, and use the other one. | |
5804 // Right operand must be in register cl because x86 likes it that way. | |
5805 if (right->reg().is(ecx)) { | |
5806 // Right is already in the right place. Left may be in the | |
5807 // same register, which causes problems. Use answer instead. | |
5808 if (left->reg().is(ecx)) { | |
5809 *left = answer; | |
5810 } | |
5811 } else if (left->reg().is(ecx)) { | |
5812 generator()->frame()->Spill(left->reg()); | |
5813 __ mov(left->reg(), right->reg()); | |
5814 *right = *left; | |
5815 *left = answer; // Use copy of left in answer as left. | |
5816 } else if (answer.reg().is(ecx)) { | |
5817 __ mov(answer.reg(), right->reg()); | |
5818 *right = answer; | |
5819 } else { | |
5820 Result reg_ecx = generator()->allocator()->Allocate(ecx); | |
5821 ASSERT(reg_ecx.is_valid()); | |
5822 __ mov(ecx, right->reg()); | |
5823 *right = reg_ecx; | |
5824 } | |
5825 ASSERT(left->reg().is_valid()); | |
5826 ASSERT(!left->reg().is(ecx)); | |
5827 ASSERT(right->reg().is(ecx)); | |
5828 answer.Unuse(); // Answer may now be being used for left or right. | |
5829 // We will modify left and right, which we do not do in any other | |
5830 // binary operation. The exits to slow code need to restore the | |
5831 // original values of left and right, or at least values that give | |
5832 // the same answer. | |
5833 | |
5834 // We are modifying left and right. They must be spilled! | |
5835 generator()->frame()->Spill(left->reg()); | |
5836 generator()->frame()->Spill(right->reg()); | |
5837 | |
5838 // Remove tags from operands (but keep sign). | |
5839 __ sar(left->reg(), kSmiTagSize); | |
5840 __ sar(ecx, kSmiTagSize); | |
5841 // Perform the operation. | |
5842 switch (op_) { | |
5843 case Token::SAR: | |
5844 __ sar(left->reg()); | |
5845 // No checks of result necessary | |
5846 break; | |
5847 case Token::SHR: { | |
5848 __ shr(left->reg()); | |
5849 // Check that the *unsigned* result fits in a smi. | |
5850 // Neither of the two high-order bits can be set: | |
5851 // - 0x80000000: high bit would be lost when smi tagging. | |
5852 // - 0x40000000: this number would convert to negative when | |
5853 // Smi tagging these two cases can only happen with shifts | |
5854 // by 0 or 1 when handed a valid smi. | |
5855 // If the answer cannot be represented by a SMI, restore | |
5856 // the left and right arguments, and jump to slow case. | |
5857 // The low bit of the left argument may be lost, but only | |
5858 // in a case where it is dropped anyway. | |
5859 JumpTarget result_ok(generator()); | |
5860 __ test(left->reg(), Immediate(0xc0000000)); | |
5861 result_ok.Branch(zero, left, taken); | |
5862 __ shl(left->reg()); | |
5863 ASSERT(kSmiTag == 0); | |
5864 __ shl(left->reg(), kSmiTagSize); | |
5865 __ shl(right->reg(), kSmiTagSize); | |
5866 enter()->Jump(left, right); | |
5867 result_ok.Bind(left); | |
5868 break; | |
5869 } | |
5870 case Token::SHL: { | |
5871 __ shl(left->reg()); | |
5872 // Check that the *signed* result fits in a smi. | |
5873 // | |
5874 // TODO(207): Can reduce registers from 4 to 3 by | |
5875 // preallocating ecx. | |
5876 JumpTarget result_ok(generator()); | |
5877 Result smi_test_reg = generator()->allocator()->Allocate(); | |
5878 ASSERT(smi_test_reg.is_valid()); | |
5879 __ lea(smi_test_reg.reg(), Operand(left->reg(), 0x40000000)); | |
5880 __ test(smi_test_reg.reg(), Immediate(0x80000000)); | |
5881 smi_test_reg.Unuse(); | |
5882 result_ok.Branch(zero, left, taken); | |
5883 __ shr(left->reg()); | |
5884 ASSERT(kSmiTag == 0); | |
5885 __ shl(left->reg(), kSmiTagSize); | |
5886 __ shl(right->reg(), kSmiTagSize); | |
5887 enter()->Jump(left, right); | |
5888 result_ok.Bind(left); | |
5889 break; | |
5890 } | |
5891 default: | |
5892 UNREACHABLE(); | |
5893 } | |
5894 // Smi-tag the result, in left, and make answer an alias for left-> | |
5895 answer = *left; | |
5896 answer.ToRegister(); | |
5897 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
5898 __ lea(answer.reg(), | |
5899 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); | |
5900 break; | |
5901 | |
5902 default: | |
5903 UNREACHABLE(); | |
5904 break; | |
5905 } | |
5906 left->Unuse(); | |
5907 right->Unuse(); | |
5908 return answer; | |
5909 } | |
5910 | |
5911 | |
5912 #undef __ | |
5913 #define __ ACCESS_MASM(masm) | |
5914 | |
5915 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | |
5916 // Perform fast-case smi code for the operation (eax <op> ebx) and | |
5917 // leave result in register eax. | |
5918 | |
5919 // Prepare the smi check of both operands by or'ing them together | |
5920 // before checking against the smi mask. | |
5921 __ mov(ecx, Operand(ebx)); | |
5922 __ or_(ecx, Operand(eax)); | |
5923 | |
5924 switch (op_) { | |
5925 case Token::ADD: | |
5926 __ add(eax, Operand(ebx)); // add optimistically | |
5927 __ j(overflow, slow, not_taken); | |
5928 break; | |
5929 | |
5930 case Token::SUB: | |
5931 __ sub(eax, Operand(ebx)); // subtract optimistically | |
5932 __ j(overflow, slow, not_taken); | |
5933 break; | |
5934 | |
5935 case Token::DIV: | |
5936 case Token::MOD: | |
5937 // Sign extend eax into edx:eax. | |
5938 __ cdq(); | |
5939 // Check for 0 divisor. | |
5940 __ test(ebx, Operand(ebx)); | |
5941 __ j(zero, slow, not_taken); | |
5942 break; | |
5943 | |
5944 default: | |
5945 // Fall-through to smi check. | |
5946 break; | |
5947 } | |
5948 | |
5949 // Perform the actual smi check. | |
5950 ASSERT(kSmiTag == 0); // adjust zero check if not the case | |
5951 __ test(ecx, Immediate(kSmiTagMask)); | |
5952 __ j(not_zero, slow, not_taken); | |
5953 | |
5954 switch (op_) { | |
5955 case Token::ADD: | |
5956 case Token::SUB: | |
5957 // Do nothing here. | |
5958 break; | |
5959 | |
5960 case Token::MUL: | |
5961 // If the smi tag is 0 we can just leave the tag on one operand. | |
5962 ASSERT(kSmiTag == 0); // adjust code below if not the case | |
5963 // Remove tag from one of the operands (but keep sign). | |
5964 __ sar(eax, kSmiTagSize); | |
5965 // Do multiplication. | |
5966 __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax | |
5967 // Go slow on overflows. | |
5968 __ j(overflow, slow, not_taken); | |
5969 // Check for negative zero result. | |
5970 __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y | |
5971 break; | |
5972 | |
5973 case Token::DIV: | |
5974 // Divide edx:eax by ebx. | |
5975 __ idiv(ebx); | |
5976 // Check for the corner case of dividing the most negative smi | |
5977 // by -1. We cannot use the overflow flag, since it is not set | |
5978 // by idiv instruction. | |
5979 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | |
5980 __ cmp(eax, 0x40000000); | |
5981 __ j(equal, slow); | |
5982 // Check for negative zero result. | |
5983 __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y | |
5984 // Check that the remainder is zero. | |
5985 __ test(edx, Operand(edx)); | |
5986 __ j(not_zero, slow); | |
5987 // Tag the result and store it in register eax. | |
5988 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
5989 __ lea(eax, Operand(eax, eax, times_1, kSmiTag)); | |
5990 break; | |
5991 | |
5992 case Token::MOD: | |
5993 // Divide edx:eax by ebx. | |
5994 __ idiv(ebx); | |
5995 // Check for negative zero result. | |
5996 __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y | |
5997 // Move remainder to register eax. | |
5998 __ mov(eax, Operand(edx)); | |
5999 break; | |
6000 | |
6001 case Token::BIT_OR: | |
6002 __ or_(eax, Operand(ebx)); | |
6003 break; | |
6004 | |
6005 case Token::BIT_AND: | |
6006 __ and_(eax, Operand(ebx)); | |
6007 break; | |
6008 | |
6009 case Token::BIT_XOR: | |
6010 __ xor_(eax, Operand(ebx)); | |
6011 break; | |
6012 | |
6013 case Token::SHL: | |
6014 case Token::SHR: | |
6015 case Token::SAR: | |
6016 // Move the second operand into register ecx. | |
6017 __ mov(ecx, Operand(ebx)); | |
6018 // Remove tags from operands (but keep sign). | |
6019 __ sar(eax, kSmiTagSize); | |
6020 __ sar(ecx, kSmiTagSize); | |
6021 // Perform the operation. | |
6022 switch (op_) { | |
6023 case Token::SAR: | |
6024 __ sar(eax); | |
6025 // No checks of result necessary | |
6026 break; | |
6027 case Token::SHR: | |
6028 __ shr(eax); | |
6029 // Check that the *unsigned* result fits in a smi. | |
6030 // Neither of the two high-order bits can be set: | |
6031 // - 0x80000000: high bit would be lost when smi tagging. | |
6032 // - 0x40000000: this number would convert to negative when | |
6033 // Smi tagging these two cases can only happen with shifts | |
6034 // by 0 or 1 when handed a valid smi. | |
6035 __ test(eax, Immediate(0xc0000000)); | |
6036 __ j(not_zero, slow, not_taken); | |
6037 break; | |
6038 case Token::SHL: | |
6039 __ shl(eax); | |
6040 // Check that the *signed* result fits in a smi. | |
6041 __ cmp(eax, 0xc0000000); | |
6042 __ j(sign, slow, not_taken); | |
6043 break; | |
6044 default: | |
6045 UNREACHABLE(); | |
6046 } | |
6047 // Tag the result and store it in register eax. | |
6048 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
6049 __ lea(eax, Operand(eax, eax, times_1, kSmiTag)); | |
6050 break; | |
6051 | |
6052 default: | |
6053 UNREACHABLE(); | |
6054 break; | |
6055 } | |
6056 } | |
6057 | |
6058 | |
6059 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | |
6060 Label call_runtime; | |
6061 | |
6062 if (flags_ == SMI_CODE_IN_STUB) { | |
6063 // The fast case smi code wasn't inlined in the stub caller | |
6064 // code. Generate it here to speed up common operations. | |
6065 Label slow; | |
6066 __ mov(ebx, Operand(esp, 1 * kPointerSize)); // get y | |
6067 __ mov(eax, Operand(esp, 2 * kPointerSize)); // get x | |
6068 GenerateSmiCode(masm, &slow); | |
6069 __ ret(2 * kPointerSize); // remove both operands | |
6070 | |
6071 // Too bad. The fast case smi code didn't succeed. | |
6072 __ bind(&slow); | |
6073 } | |
6074 | |
6075 // Setup registers. | |
6076 __ mov(eax, Operand(esp, 1 * kPointerSize)); // get y | |
6077 __ mov(edx, Operand(esp, 2 * kPointerSize)); // get x | |
6078 | |
6079 // Floating point case. | |
6080 switch (op_) { | |
6081 case Token::ADD: | |
6082 case Token::SUB: | |
6083 case Token::MUL: | |
6084 case Token::DIV: { | |
6085 // eax: y | |
6086 // edx: x | |
6087 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); | |
6088 // Fast-case: Both operands are numbers. | |
6089 // Allocate a heap number, if needed. | |
6090 Label skip_allocation; | |
6091 switch (mode_) { | |
6092 case OVERWRITE_LEFT: | |
6093 __ mov(eax, Operand(edx)); | |
6094 // Fall through! | |
6095 case OVERWRITE_RIGHT: | |
6096 // If the argument in eax is already an object, we skip the | |
6097 // allocation of a heap number. | |
6098 __ test(eax, Immediate(kSmiTagMask)); | |
6099 __ j(not_zero, &skip_allocation, not_taken); | |
6100 // Fall through! | |
6101 case NO_OVERWRITE: | |
6102 FloatingPointHelper::AllocateHeapNumber(masm, | |
6103 &call_runtime, | |
6104 ecx, | |
6105 edx); | |
6106 __ bind(&skip_allocation); | |
6107 break; | |
6108 default: UNREACHABLE(); | |
6109 } | |
6110 FloatingPointHelper::LoadFloatOperands(masm, ecx); | |
6111 | |
6112 switch (op_) { | |
6113 case Token::ADD: __ faddp(1); break; | |
6114 case Token::SUB: __ fsubp(1); break; | |
6115 case Token::MUL: __ fmulp(1); break; | |
6116 case Token::DIV: __ fdivp(1); break; | |
6117 default: UNREACHABLE(); | |
6118 } | |
6119 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | |
6120 __ ret(2 * kPointerSize); | |
6121 } | |
6122 case Token::MOD: { | |
6123 // For MOD we go directly to runtime in the non-smi case. | |
6124 break; | |
6125 } | |
6126 case Token::BIT_OR: | |
6127 case Token::BIT_AND: | |
6128 case Token::BIT_XOR: | |
6129 case Token::SAR: | |
6130 case Token::SHL: | |
6131 case Token::SHR: { | |
6132 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); | |
6133 FloatingPointHelper::LoadFloatOperands(masm, ecx); | |
6134 | |
6135 Label skip_allocation, non_smi_result, operand_conversion_failure; | |
6136 | |
6137 // Reserve space for converted numbers. | |
6138 __ sub(Operand(esp), Immediate(2 * kPointerSize)); | |
6139 | |
6140 bool use_sse3 = CpuFeatures::IsSupported(CpuFeatures::SSE3); | |
6141 if (use_sse3) { | |
6142 // Truncate the operands to 32-bit integers and check for | |
6143 // exceptions in doing so. | |
6144 CpuFeatures::Scope scope(CpuFeatures::SSE3); | |
6145 __ fisttp_s(Operand(esp, 0 * kPointerSize)); | |
6146 __ fisttp_s(Operand(esp, 1 * kPointerSize)); | |
6147 __ fnstsw_ax(); | |
6148 __ test(eax, Immediate(1)); | |
6149 __ j(not_zero, &operand_conversion_failure); | |
6150 } else { | |
6151 // Check if right operand is int32. | |
6152 __ fist_s(Operand(esp, 0 * kPointerSize)); | |
6153 __ fild_s(Operand(esp, 0 * kPointerSize)); | |
6154 __ fucompp(); | |
6155 __ fnstsw_ax(); | |
6156 __ sahf(); | |
6157 __ j(not_zero, &operand_conversion_failure); | |
6158 __ j(parity_even, &operand_conversion_failure); | |
6159 | |
6160 // Check if left operand is int32. | |
6161 __ fist_s(Operand(esp, 1 * kPointerSize)); | |
6162 __ fild_s(Operand(esp, 1 * kPointerSize)); | |
6163 __ fucompp(); | |
6164 __ fnstsw_ax(); | |
6165 __ sahf(); | |
6166 __ j(not_zero, &operand_conversion_failure); | |
6167 __ j(parity_even, &operand_conversion_failure); | |
6168 } | |
6169 | |
6170 // Get int32 operands and perform bitop. | |
6171 __ pop(ecx); | |
6172 __ pop(eax); | |
6173 switch (op_) { | |
6174 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; | |
6175 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; | |
6176 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; | |
6177 case Token::SAR: __ sar(eax); break; | |
6178 case Token::SHL: __ shl(eax); break; | |
6179 case Token::SHR: __ shr(eax); break; | |
6180 default: UNREACHABLE(); | |
6181 } | |
6182 | |
6183 // Check if result is non-negative and fits in a smi. | |
6184 __ test(eax, Immediate(0xc0000000)); | |
6185 __ j(not_zero, &non_smi_result); | |
6186 | |
6187 // Tag smi result and return. | |
6188 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
6189 __ lea(eax, Operand(eax, eax, times_1, kSmiTag)); | |
6190 __ ret(2 * kPointerSize); | |
6191 | |
6192 // All ops except SHR return a signed int32 that we load in a HeapNumber. | |
6193 if (op_ != Token::SHR) { | |
6194 __ bind(&non_smi_result); | |
6195 // Allocate a heap number if needed. | |
6196 __ mov(ebx, Operand(eax)); // ebx: result | |
6197 switch (mode_) { | |
6198 case OVERWRITE_LEFT: | |
6199 case OVERWRITE_RIGHT: | |
6200 // If the operand was an object, we skip the | |
6201 // allocation of a heap number. | |
6202 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? | |
6203 1 * kPointerSize : 2 * kPointerSize)); | |
6204 __ test(eax, Immediate(kSmiTagMask)); | |
6205 __ j(not_zero, &skip_allocation, not_taken); | |
6206 // Fall through! | |
6207 case NO_OVERWRITE: | |
6208 FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime, | |
6209 ecx, edx); | |
6210 __ bind(&skip_allocation); | |
6211 break; | |
6212 default: UNREACHABLE(); | |
6213 } | |
6214 // Store the result in the HeapNumber and return. | |
6215 __ mov(Operand(esp, 1 * kPointerSize), ebx); | |
6216 __ fild_s(Operand(esp, 1 * kPointerSize)); | |
6217 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | |
6218 __ ret(2 * kPointerSize); | |
6219 } | |
6220 | |
6221 // Clear the FPU exception flag and reset the stack before calling | |
6222 // the runtime system. | |
6223 __ bind(&operand_conversion_failure); | |
6224 __ add(Operand(esp), Immediate(2 * kPointerSize)); | |
6225 if (use_sse3) { | |
6226 // If we've used the SSE3 instructions for truncating the | |
6227 // floating point values to integers and it failed, we have a | |
6228 // pending #IA exception. Clear it. | |
6229 __ fnclex(); | |
6230 } else { | |
6231 // The non-SSE3 variant does early bailout if the right | |
6232 // operand isn't a 32-bit integer, so we may have a single | |
6233 // value on the FPU stack we need to get rid of. | |
6234 __ ffree(0); | |
6235 } | |
6236 | |
6237 // SHR should return uint32 - go to runtime for non-smi/negative result. | |
6238 if (op_ == Token::SHR) { | |
6239 __ bind(&non_smi_result); | |
6240 } | |
6241 __ mov(eax, Operand(esp, 1 * kPointerSize)); | |
6242 __ mov(edx, Operand(esp, 2 * kPointerSize)); | |
6243 break; | |
6244 } | |
6245 default: UNREACHABLE(); break; | |
6246 } | |
6247 | |
6248 // If all else fails, use the runtime system to get the correct | |
6249 // result. | |
6250 __ bind(&call_runtime); | |
6251 switch (op_) { | |
6252 case Token::ADD: | |
6253 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); | |
6254 break; | |
6255 case Token::SUB: | |
6256 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); | |
6257 break; | |
6258 case Token::MUL: | |
6259 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); | |
6260 break; | |
6261 case Token::DIV: | |
6262 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); | |
6263 break; | |
6264 case Token::MOD: | |
6265 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); | |
6266 break; | |
6267 case Token::BIT_OR: | |
6268 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); | |
6269 break; | |
6270 case Token::BIT_AND: | |
6271 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); | |
6272 break; | |
6273 case Token::BIT_XOR: | |
6274 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); | |
6275 break; | |
6276 case Token::SAR: | |
6277 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); | |
6278 break; | |
6279 case Token::SHL: | |
6280 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); | |
6281 break; | |
6282 case Token::SHR: | |
6283 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); | |
6284 break; | |
6285 default: | |
6286 UNREACHABLE(); | |
6287 } | |
6288 } | |
6289 | |
6290 | |
6291 void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm, | |
6292 Label* need_gc, | |
6293 Register scratch1, | |
6294 Register scratch2) { | |
6295 ExternalReference allocation_top = | |
6296 ExternalReference::new_space_allocation_top_address(); | |
6297 ExternalReference allocation_limit = | |
6298 ExternalReference::new_space_allocation_limit_address(); | |
6299 __ mov(Operand(scratch1), Immediate(allocation_top)); | |
6300 __ mov(eax, Operand(scratch1, 0)); | |
6301 __ lea(scratch2, Operand(eax, HeapNumber::kSize)); // scratch2: new top | |
6302 __ cmp(scratch2, Operand::StaticVariable(allocation_limit)); | |
6303 __ j(above, need_gc, not_taken); | |
6304 | |
6305 __ mov(Operand(scratch1, 0), scratch2); // store new top | |
6306 __ mov(Operand(eax, HeapObject::kMapOffset), | |
6307 Immediate(Factory::heap_number_map())); | |
6308 // Tag old top and use as result. | |
6309 __ add(Operand(eax), Immediate(kHeapObjectTag)); | |
6310 } | |
6311 | |
6312 | |
6313 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, | |
6314 Register scratch) { | |
6315 Label load_smi_1, load_smi_2, done_load_1, done; | |
6316 __ mov(scratch, Operand(esp, 2 * kPointerSize)); | |
6317 __ test(scratch, Immediate(kSmiTagMask)); | |
6318 __ j(zero, &load_smi_1, not_taken); | |
6319 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); | |
6320 __ bind(&done_load_1); | |
6321 | |
6322 __ mov(scratch, Operand(esp, 1 * kPointerSize)); | |
6323 __ test(scratch, Immediate(kSmiTagMask)); | |
6324 __ j(zero, &load_smi_2, not_taken); | |
6325 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); | |
6326 __ jmp(&done); | |
6327 | |
6328 __ bind(&load_smi_1); | |
6329 __ sar(scratch, kSmiTagSize); | |
6330 __ push(scratch); | |
6331 __ fild_s(Operand(esp, 0)); | |
6332 __ pop(scratch); | |
6333 __ jmp(&done_load_1); | |
6334 | |
6335 __ bind(&load_smi_2); | |
6336 __ sar(scratch, kSmiTagSize); | |
6337 __ push(scratch); | |
6338 __ fild_s(Operand(esp, 0)); | |
6339 __ pop(scratch); | |
6340 | |
6341 __ bind(&done); | |
6342 } | |
6343 | |
6344 | |
6345 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, | |
6346 Label* non_float, | |
6347 Register scratch) { | |
6348 Label test_other, done; | |
6349 // Test if both operands are floats or smi -> scratch=k_is_float; | |
6350 // Otherwise scratch = k_not_float. | |
6351 __ test(edx, Immediate(kSmiTagMask)); | |
6352 __ j(zero, &test_other, not_taken); // argument in edx is OK | |
6353 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); | |
6354 __ cmp(scratch, Factory::heap_number_map()); | |
6355 __ j(not_equal, non_float); // argument in edx is not a number -> NaN | |
6356 | |
6357 __ bind(&test_other); | |
6358 __ test(eax, Immediate(kSmiTagMask)); | |
6359 __ j(zero, &done); // argument in eax is OK | |
6360 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); | |
6361 __ cmp(scratch, Factory::heap_number_map()); | |
6362 __ j(not_equal, non_float); // argument in eax is not a number -> NaN | |
6363 | |
6364 // Fall-through: Both operands are numbers. | |
6365 __ bind(&done); | |
6366 } | |
6367 | |
6368 | |
6369 void UnarySubStub::Generate(MacroAssembler* masm) { | |
6370 Label undo; | |
6371 Label slow; | |
6372 Label done; | |
6373 Label try_float; | |
6374 | |
6375 // Check whether the value is a smi. | |
6376 __ test(eax, Immediate(kSmiTagMask)); | |
6377 __ j(not_zero, &try_float, not_taken); | |
6378 | |
6379 // Enter runtime system if the value of the expression is zero | |
6380 // to make sure that we switch between 0 and -0. | |
6381 __ test(eax, Operand(eax)); | |
6382 __ j(zero, &slow, not_taken); | |
6383 | |
6384 // The value of the expression is a smi that is not zero. Try | |
6385 // optimistic subtraction '0 - value'. | |
6386 __ mov(edx, Operand(eax)); | |
6387 __ Set(eax, Immediate(0)); | |
6388 __ sub(eax, Operand(edx)); | |
6389 __ j(overflow, &undo, not_taken); | |
6390 | |
6391 // If result is a smi we are done. | |
6392 __ test(eax, Immediate(kSmiTagMask)); | |
6393 __ j(zero, &done, taken); | |
6394 | |
6395 // Restore eax and enter runtime system. | |
6396 __ bind(&undo); | |
6397 __ mov(eax, Operand(edx)); | |
6398 | |
6399 // Enter runtime system. | |
6400 __ bind(&slow); | |
6401 __ pop(ecx); // pop return address | |
6402 __ push(eax); | |
6403 __ push(ecx); // push return address | |
6404 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); | |
6405 | |
6406 // Try floating point case. | |
6407 __ bind(&try_float); | |
6408 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); | |
6409 __ cmp(edx, Factory::heap_number_map()); | |
6410 __ j(not_equal, &slow); | |
6411 __ mov(edx, Operand(eax)); | |
6412 // edx: operand | |
6413 FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx); | |
6414 // eax: allocated 'empty' number | |
6415 __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset)); | |
6416 __ fchs(); | |
6417 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | |
6418 | |
6419 __ bind(&done); | |
6420 | |
6421 __ StubReturn(1); | |
6422 } | |
6423 | |
6424 | |
6425 void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) { | |
6426 // Check if the calling frame is an arguments adaptor frame. | |
6427 Label adaptor; | |
6428 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); | |
6429 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); | |
6430 __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL); | |
6431 __ j(equal, &adaptor); | |
6432 | |
6433 // Nothing to do: The formal number of parameters has already been | |
6434 // passed in register eax by calling function. Just return it. | |
6435 __ ret(0); | |
6436 | |
6437 // Arguments adaptor case: Read the arguments length from the | |
6438 // adaptor frame and return it. | |
6439 __ bind(&adaptor); | |
6440 __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
6441 __ ret(0); | |
6442 } | |
6443 | |
6444 | |
6445 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | |
6446 // The key is in edx and the parameter count is in eax. | |
6447 | |
6448 // The displacement is used for skipping the frame pointer on the | |
6449 // stack. It is the offset of the last parameter (if any) relative | |
6450 // to the frame pointer. | |
6451 static const int kDisplacement = 1 * kPointerSize; | |
6452 | |
6453 // Check that the key is a smi. | |
6454 Label slow; | |
6455 __ test(edx, Immediate(kSmiTagMask)); | |
6456 __ j(not_zero, &slow, not_taken); | |
6457 | |
6458 // Check if the calling frame is an arguments adaptor frame. | |
6459 Label adaptor; | |
6460 __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); | |
6461 __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); | |
6462 __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL); | |
6463 __ j(equal, &adaptor); | |
6464 | |
6465 // Check index against formal parameters count limit passed in | |
6466 // through register eax. Use unsigned comparison to get negative | |
6467 // check for free. | |
6468 __ cmp(edx, Operand(eax)); | |
6469 __ j(above_equal, &slow, not_taken); | |
6470 | |
6471 // Read the argument from the stack and return it. | |
6472 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this | |
6473 __ lea(ebx, Operand(ebp, eax, times_2, 0)); | |
6474 __ neg(edx); | |
6475 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); | |
6476 __ ret(0); | |
6477 | |
6478 // Arguments adaptor case: Check index against actual arguments | |
6479 // limit found in the arguments adaptor frame. Use unsigned | |
6480 // comparison to get negative check for free. | |
6481 __ bind(&adaptor); | |
6482 __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
6483 __ cmp(edx, Operand(ecx)); | |
6484 __ j(above_equal, &slow, not_taken); | |
6485 | |
6486 // Read the argument from the stack and return it. | |
6487 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this | |
6488 __ lea(ebx, Operand(ebx, ecx, times_2, 0)); | |
6489 __ neg(edx); | |
6490 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); | |
6491 __ ret(0); | |
6492 | |
6493 // Slow-case: Handle non-smi or out-of-bounds access to arguments | |
6494 // by calling the runtime system. | |
6495 __ bind(&slow); | |
6496 __ pop(ebx); // Return address. | |
6497 __ push(edx); | |
6498 __ push(ebx); | |
6499 __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1); | |
6500 } | |
6501 | |
6502 | |
6503 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { | |
6504 // The displacement is used for skipping the return address and the | |
6505 // frame pointer on the stack. It is the offset of the last | |
6506 // parameter (if any) relative to the frame pointer. | |
6507 static const int kDisplacement = 2 * kPointerSize; | |
6508 | |
6509 // Check if the calling frame is an arguments adaptor frame. | |
6510 Label runtime; | |
6511 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); | |
6512 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); | |
6513 __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL); | |
6514 __ j(not_equal, &runtime); | |
6515 | |
6516 // Patch the arguments.length and the parameters pointer. | |
6517 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
6518 __ mov(Operand(esp, 1 * kPointerSize), ecx); | |
6519 __ lea(edx, Operand(edx, ecx, times_2, kDisplacement)); | |
6520 __ mov(Operand(esp, 2 * kPointerSize), edx); | |
6521 | |
6522 // Do the runtime call to allocate the arguments object. | |
6523 __ bind(&runtime); | |
6524 __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3); | |
6525 } | |
6526 | |
6527 | |
6528 void CompareStub::Generate(MacroAssembler* masm) { | |
6529 Label call_builtin, done; | |
6530 | |
6531 // NOTICE! This code is only reached after a smi-fast-case check, so | |
6532 // it is certain that at least one operand isn't a smi. | |
6533 | |
6534 if (cc_ == equal) { // Both strict and non-strict. | |
6535 Label slow; // Fallthrough label. | |
6536 // Equality is almost reflexive (everything but NaN), so start by testing | |
6537 // for "identity and not NaN". | |
6538 { | |
6539 Label not_identical; | |
6540 __ cmp(eax, Operand(edx)); | |
6541 __ j(not_equal, ¬_identical); | |
6542 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | |
6543 // so we do the second best thing - test it ourselves. | |
6544 | |
6545 Label return_equal; | |
6546 Label heap_number; | |
6547 // If it's not a heap number, then return equal. | |
6548 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), | |
6549 Immediate(Factory::heap_number_map())); | |
6550 __ j(equal, &heap_number); | |
6551 __ bind(&return_equal); | |
6552 __ Set(eax, Immediate(0)); | |
6553 __ ret(0); | |
6554 | |
6555 __ bind(&heap_number); | |
6556 // It is a heap number, so return non-equal if it's NaN and equal if it's | |
6557 // not NaN. | |
6558 // The representation of NaN values has all exponent bits (52..62) set, | |
6559 // and not all mantissa bits (0..51) clear. | |
6560 // Read top bits of double representation (second word of value). | |
6561 __ mov(eax, FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize)); | |
6562 // Test that exponent bits are all set. | |
6563 __ not_(eax); | |
6564 __ test(eax, Immediate(0x7ff00000)); | |
6565 __ j(not_zero, &return_equal); | |
6566 __ not_(eax); | |
6567 | |
6568 // Shift out flag and all exponent bits, retaining only mantissa. | |
6569 __ shl(eax, 12); | |
6570 // Or with all low-bits of mantissa. | |
6571 __ or_(eax, FieldOperand(edx, HeapNumber::kValueOffset)); | |
6572 // Return zero equal if all bits in mantissa is zero (it's an Infinity) | |
6573 // and non-zero if not (it's a NaN). | |
6574 __ ret(0); | |
6575 | |
6576 __ bind(¬_identical); | |
6577 } | |
6578 | |
6579 // If we're doing a strict equality comparison, we don't have to do | |
6580 // type conversion, so we generate code to do fast comparison for objects | |
6581 // and oddballs. Non-smi numbers and strings still go through the usual | |
6582 // slow-case code. | |
6583 if (strict_) { | |
6584 // If either is a Smi (we know that not both are), then they can only | |
6585 // be equal if the other is a HeapNumber. If so, use the slow case. | |
6586 { | |
6587 Label not_smis; | |
6588 ASSERT_EQ(0, kSmiTag); | |
6589 ASSERT_EQ(0, Smi::FromInt(0)); | |
6590 __ mov(ecx, Immediate(kSmiTagMask)); | |
6591 __ and_(ecx, Operand(eax)); | |
6592 __ test(ecx, Operand(edx)); | |
6593 __ j(not_zero, ¬_smis); | |
6594 // One operand is a smi. | |
6595 | |
6596 // Check whether the non-smi is a heap number. | |
6597 ASSERT_EQ(1, kSmiTagMask); | |
6598 // ecx still holds eax & kSmiTag, which is either zero or one. | |
6599 __ sub(Operand(ecx), Immediate(0x01)); | |
6600 __ mov(ebx, edx); | |
6601 __ xor_(ebx, Operand(eax)); | |
6602 __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. | |
6603 __ xor_(ebx, Operand(eax)); | |
6604 // if eax was smi, ebx is now edx, else eax. | |
6605 | |
6606 // Check if the non-smi operand is a heap number. | |
6607 __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), | |
6608 Immediate(Factory::heap_number_map())); | |
6609 // If heap number, handle it in the slow case. | |
6610 __ j(equal, &slow); | |
6611 // Return non-equal (ebx is not zero) | |
6612 __ mov(eax, ebx); | |
6613 __ ret(0); | |
6614 | |
6615 __ bind(¬_smis); | |
6616 } | |
6617 | |
6618 // If either operand is a JSObject or an oddball value, then they are not | |
6619 // equal since their pointers are different | |
6620 // There is no test for undetectability in strict equality. | |
6621 | |
6622 // Get the type of the first operand. | |
6623 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); | |
6624 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); | |
6625 | |
6626 // If the first object is a JS object, we have done pointer comparison. | |
6627 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | |
6628 Label first_non_object; | |
6629 __ cmp(ecx, FIRST_JS_OBJECT_TYPE); | |
6630 __ j(less, &first_non_object); | |
6631 | |
6632 // Return non-zero (eax is not zero) | |
6633 Label return_not_equal; | |
6634 ASSERT(kHeapObjectTag != 0); | |
6635 __ bind(&return_not_equal); | |
6636 __ ret(0); | |
6637 | |
6638 __ bind(&first_non_object); | |
6639 // Check for oddballs: true, false, null, undefined. | |
6640 __ cmp(ecx, ODDBALL_TYPE); | |
6641 __ j(equal, &return_not_equal); | |
6642 | |
6643 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); | |
6644 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); | |
6645 | |
6646 __ cmp(ecx, FIRST_JS_OBJECT_TYPE); | |
6647 __ j(greater_equal, &return_not_equal); | |
6648 | |
6649 // Check for oddballs: true, false, null, undefined. | |
6650 __ cmp(ecx, ODDBALL_TYPE); | |
6651 __ j(equal, &return_not_equal); | |
6652 | |
6653 // Fall through to the general case. | |
6654 } | |
6655 __ bind(&slow); | |
6656 } | |
6657 | |
6658 // Save the return address (and get it off the stack). | |
6659 __ pop(ecx); | |
6660 | |
6661 // Push arguments. | |
6662 __ push(eax); | |
6663 __ push(edx); | |
6664 __ push(ecx); | |
6665 | |
6666 // Inlined floating point compare. | |
6667 // Call builtin if operands are not floating point or smi. | |
6668 FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx); | |
6669 FloatingPointHelper::LoadFloatOperands(masm, ecx); | |
6670 __ FCmp(); | |
6671 | |
6672 // Jump to builtin for NaN. | |
6673 __ j(parity_even, &call_builtin, not_taken); | |
6674 | |
6675 // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up. | |
6676 Label below_lbl, above_lbl; | |
6677 // use edx, eax to convert unsigned to signed comparison | |
6678 __ j(below, &below_lbl, not_taken); | |
6679 __ j(above, &above_lbl, not_taken); | |
6680 | |
6681 __ xor_(eax, Operand(eax)); // equal | |
6682 __ ret(2 * kPointerSize); | |
6683 | |
6684 __ bind(&below_lbl); | |
6685 __ mov(eax, -1); | |
6686 __ ret(2 * kPointerSize); | |
6687 | |
6688 __ bind(&above_lbl); | |
6689 __ mov(eax, 1); | |
6690 __ ret(2 * kPointerSize); // eax, edx were pushed | |
6691 | |
6692 __ bind(&call_builtin); | |
6693 // must swap argument order | |
6694 __ pop(ecx); | |
6695 __ pop(edx); | |
6696 __ pop(eax); | |
6697 __ push(edx); | |
6698 __ push(eax); | |
6699 | |
6700 // Figure out which native to call and setup the arguments. | |
6701 Builtins::JavaScript builtin; | |
6702 if (cc_ == equal) { | |
6703 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | |
6704 } else { | |
6705 builtin = Builtins::COMPARE; | |
6706 int ncr; // NaN compare result | |
6707 if (cc_ == less || cc_ == less_equal) { | |
6708 ncr = GREATER; | |
6709 } else { | |
6710 ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases | |
6711 ncr = LESS; | |
6712 } | |
6713 __ push(Immediate(Smi::FromInt(ncr))); | |
6714 } | |
6715 | |
6716 // Restore return address on the stack. | |
6717 __ push(ecx); | |
6718 | |
6719 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | |
6720 // tagged as a small integer. | |
6721 __ InvokeBuiltin(builtin, JUMP_FUNCTION); | |
6722 } | |
6723 | |
6724 | |
6725 void StackCheckStub::Generate(MacroAssembler* masm) { | |
6726 // Because builtins always remove the receiver from the stack, we | |
6727 // have to fake one to avoid underflowing the stack. The receiver | |
6728 // must be inserted below the return address on the stack so we | |
6729 // temporarily store that in a register. | |
6730 __ pop(eax); | |
6731 __ push(Immediate(Smi::FromInt(0))); | |
6732 __ push(eax); | |
6733 | |
6734 // Do tail-call to runtime routine. | |
6735 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1); | |
6736 } | |
6737 | |
6738 | |
6739 void CallFunctionStub::Generate(MacroAssembler* masm) { | |
6740 Label slow; | |
6741 | |
6742 // Get the function to call from the stack. | |
6743 // +2 ~ receiver, return address | |
6744 __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize)); | |
6745 | |
6746 // Check that the function really is a JavaScript function. | |
6747 __ test(edi, Immediate(kSmiTagMask)); | |
6748 __ j(zero, &slow, not_taken); | |
6749 // Goto slow case if we do not have a function. | |
6750 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); | |
6751 __ j(not_equal, &slow, not_taken); | |
6752 | |
6753 // Fast-case: Just invoke the function. | |
6754 ParameterCount actual(argc_); | |
6755 __ InvokeFunction(edi, actual, JUMP_FUNCTION); | |
6756 | |
6757 // Slow-case: Non-function called. | |
6758 __ bind(&slow); | |
6759 __ Set(eax, Immediate(argc_)); | |
6760 __ Set(ebx, Immediate(0)); | |
6761 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); | |
6762 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); | |
6763 __ jmp(adaptor, RelocInfo::CODE_TARGET); | |
6764 } | |
6765 | |
6766 | |
6767 | |
6768 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | |
6769 ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code | |
6770 ExternalReference handler_address(Top::k_handler_address); | |
6771 __ mov(edx, Operand::StaticVariable(handler_address)); | |
6772 __ mov(ecx, Operand(edx, -1 * kPointerSize)); // get next in chain | |
6773 __ mov(Operand::StaticVariable(handler_address), ecx); | |
6774 __ mov(esp, Operand(edx)); | |
6775 __ pop(edi); | |
6776 __ pop(ebp); | |
6777 __ pop(edx); // remove code pointer | |
6778 __ pop(edx); // remove state | |
6779 | |
6780 // Before returning we restore the context from the frame pointer if not NULL. | |
6781 // The frame pointer is NULL in the exception handler of a JS entry frame. | |
6782 __ xor_(esi, Operand(esi)); // tentatively set context pointer to NULL | |
6783 Label skip; | |
6784 __ cmp(ebp, 0); | |
6785 __ j(equal, &skip, not_taken); | |
6786 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | |
6787 __ bind(&skip); | |
6788 | |
6789 __ ret(0); | |
6790 } | |
6791 | |
6792 | |
6793 void CEntryStub::GenerateCore(MacroAssembler* masm, | |
6794 Label* throw_normal_exception, | |
6795 Label* throw_out_of_memory_exception, | |
6796 StackFrame::Type frame_type, | |
6797 bool do_gc, | |
6798 bool always_allocate_scope) { | |
6799 // eax: result parameter for PerformGC, if any | |
6800 // ebx: pointer to C function (C callee-saved) | |
6801 // ebp: frame pointer (restored after C call) | |
6802 // esp: stack pointer (restored after C call) | |
6803 // edi: number of arguments including receiver (C callee-saved) | |
6804 // esi: pointer to the first argument (C callee-saved) | |
6805 | |
6806 if (do_gc) { | |
6807 __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. | |
6808 __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); | |
6809 } | |
6810 | |
6811 ExternalReference scope_depth = | |
6812 ExternalReference::heap_always_allocate_scope_depth(); | |
6813 if (always_allocate_scope) { | |
6814 __ inc(Operand::StaticVariable(scope_depth)); | |
6815 } | |
6816 | |
6817 // Call C function. | |
6818 __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. | |
6819 __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. | |
6820 __ call(Operand(ebx)); | |
6821 // Result is in eax or edx:eax - do not destroy these registers! | |
6822 | |
6823 if (always_allocate_scope) { | |
6824 __ dec(Operand::StaticVariable(scope_depth)); | |
6825 } | |
6826 | |
6827 // Check for failure result. | |
6828 Label failure_returned; | |
6829 ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); | |
6830 __ lea(ecx, Operand(eax, 1)); | |
6831 // Lower 2 bits of ecx are 0 iff eax has failure tag. | |
6832 __ test(ecx, Immediate(kFailureTagMask)); | |
6833 __ j(zero, &failure_returned, not_taken); | |
6834 | |
6835 // Exit the JavaScript to C++ exit frame. | |
6836 __ LeaveExitFrame(frame_type); | |
6837 __ ret(0); | |
6838 | |
6839 // Handling of failure. | |
6840 __ bind(&failure_returned); | |
6841 | |
6842 Label retry; | |
6843 // If the returned exception is RETRY_AFTER_GC continue at retry label | |
6844 ASSERT(Failure::RETRY_AFTER_GC == 0); | |
6845 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); | |
6846 __ j(zero, &retry, taken); | |
6847 | |
6848 Label continue_exception; | |
6849 // If the returned failure is EXCEPTION then promote Top::pending_exception(). | |
6850 __ cmp(eax, reinterpret_cast<int32_t>(Failure::Exception())); | |
6851 __ j(not_equal, &continue_exception); | |
6852 | |
6853 // Retrieve the pending exception and clear the variable. | |
6854 ExternalReference pending_exception_address(Top::k_pending_exception_address); | |
6855 __ mov(eax, Operand::StaticVariable(pending_exception_address)); | |
6856 __ mov(edx, | |
6857 Operand::StaticVariable(ExternalReference::the_hole_value_location())); | |
6858 __ mov(Operand::StaticVariable(pending_exception_address), edx); | |
6859 | |
6860 __ bind(&continue_exception); | |
6861 // Special handling of out of memory exception. | |
6862 __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); | |
6863 __ j(equal, throw_out_of_memory_exception); | |
6864 | |
6865 // Handle normal exception. | |
6866 __ jmp(throw_normal_exception); | |
6867 | |
6868 // Retry. | |
6869 __ bind(&retry); | |
6870 } | |
6871 | |
6872 | |
6873 void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) { | |
6874 // Fetch top stack handler. | |
6875 ExternalReference handler_address(Top::k_handler_address); | |
6876 __ mov(edx, Operand::StaticVariable(handler_address)); | |
6877 | |
6878 // Unwind the handlers until the ENTRY handler is found. | |
6879 Label loop, done; | |
6880 __ bind(&loop); | |
6881 // Load the type of the current stack handler. | |
6882 const int kStateOffset = StackHandlerConstants::kAddressDisplacement + | |
6883 StackHandlerConstants::kStateOffset; | |
6884 __ cmp(Operand(edx, kStateOffset), Immediate(StackHandler::ENTRY)); | |
6885 __ j(equal, &done); | |
6886 // Fetch the next handler in the list. | |
6887 const int kNextOffset = StackHandlerConstants::kAddressDisplacement + | |
6888 StackHandlerConstants::kNextOffset; | |
6889 __ mov(edx, Operand(edx, kNextOffset)); | |
6890 __ jmp(&loop); | |
6891 __ bind(&done); | |
6892 | |
6893 // Set the top handler address to next handler past the current ENTRY handler. | |
6894 __ mov(eax, Operand(edx, kNextOffset)); | |
6895 __ mov(Operand::StaticVariable(handler_address), eax); | |
6896 | |
6897 // Set external caught exception to false. | |
6898 __ mov(eax, false); | |
6899 ExternalReference external_caught(Top::k_external_caught_exception_address); | |
6900 __ mov(Operand::StaticVariable(external_caught), eax); | |
6901 | |
6902 // Set pending exception and eax to out of memory exception. | |
6903 __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); | |
6904 ExternalReference pending_exception(Top::k_pending_exception_address); | |
6905 __ mov(Operand::StaticVariable(pending_exception), eax); | |
6906 | |
6907 // Restore the stack to the address of the ENTRY handler | |
6908 __ mov(esp, Operand(edx)); | |
6909 | |
6910 // Clear the context pointer; | |
6911 __ xor_(esi, Operand(esi)); | |
6912 | |
6913 // Restore registers from handler. | |
6914 __ pop(edi); // PP | |
6915 __ pop(ebp); // FP | |
6916 __ pop(edx); // Code | |
6917 __ pop(edx); // State | |
6918 | |
6919 __ ret(0); | |
6920 } | |
6921 | |
6922 | |
6923 void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { | |
6924 // eax: number of arguments including receiver | |
6925 // ebx: pointer to C function (C callee-saved) | |
6926 // ebp: frame pointer (restored after C call) | |
6927 // esp: stack pointer (restored after C call) | |
6928 // esi: current context (C callee-saved) | |
6929 // edi: caller's parameter pointer pp (C callee-saved) | |
6930 | |
6931 // NOTE: Invocations of builtins may return failure objects | |
6932 // instead of a proper result. The builtin entry handles | |
6933 // this by performing a garbage collection and retrying the | |
6934 // builtin once. | |
6935 | |
6936 StackFrame::Type frame_type = is_debug_break ? | |
6937 StackFrame::EXIT_DEBUG : | |
6938 StackFrame::EXIT; | |
6939 | |
6940 // Enter the exit frame that transitions from JavaScript to C++. | |
6941 __ EnterExitFrame(frame_type); | |
6942 | |
6943 // eax: result parameter for PerformGC, if any (setup below) | |
6944 // ebx: pointer to builtin function (C callee-saved) | |
6945 // ebp: frame pointer (restored after C call) | |
6946 // esp: stack pointer (restored after C call) | |
6947 // edi: number of arguments including receiver (C callee-saved) | |
6948 // esi: argv pointer (C callee-saved) | |
6949 | |
6950 Label throw_out_of_memory_exception; | |
6951 Label throw_normal_exception; | |
6952 | |
6953 // Call into the runtime system. Collect garbage before the call if | |
6954 // running with --gc-greedy set. | |
6955 if (FLAG_gc_greedy) { | |
6956 Failure* failure = Failure::RetryAfterGC(0); | |
6957 __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure))); | |
6958 } | |
6959 GenerateCore(masm, &throw_normal_exception, | |
6960 &throw_out_of_memory_exception, | |
6961 frame_type, | |
6962 FLAG_gc_greedy, | |
6963 false); | |
6964 | |
6965 // Do space-specific GC and retry runtime call. | |
6966 GenerateCore(masm, | |
6967 &throw_normal_exception, | |
6968 &throw_out_of_memory_exception, | |
6969 frame_type, | |
6970 true, | |
6971 false); | |
6972 | |
6973 // Do full GC and retry runtime call one final time. | |
6974 Failure* failure = Failure::InternalError(); | |
6975 __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure))); | |
6976 GenerateCore(masm, | |
6977 &throw_normal_exception, | |
6978 &throw_out_of_memory_exception, | |
6979 frame_type, | |
6980 true, | |
6981 true); | |
6982 | |
6983 __ bind(&throw_out_of_memory_exception); | |
6984 GenerateThrowOutOfMemory(masm); | |
6985 // control flow for generated will not return. | |
6986 | |
6987 __ bind(&throw_normal_exception); | |
6988 GenerateThrowTOS(masm); | |
6989 } | |
6990 | |
6991 | |
6992 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | |
6993 Label invoke, exit; | |
6994 | |
6995 // Setup frame. | |
6996 __ push(ebp); | |
6997 __ mov(ebp, Operand(esp)); | |
6998 | |
6999 // Save callee-saved registers (C calling conventions). | |
7000 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | |
7001 // Push something that is not an arguments adaptor. | |
7002 __ push(Immediate(~ArgumentsAdaptorFrame::SENTINEL)); | |
7003 __ push(Immediate(Smi::FromInt(marker))); // @ function offset | |
7004 __ push(edi); | |
7005 __ push(esi); | |
7006 __ push(ebx); | |
7007 | |
7008 // Save copies of the top frame descriptor on the stack. | |
7009 ExternalReference c_entry_fp(Top::k_c_entry_fp_address); | |
7010 __ push(Operand::StaticVariable(c_entry_fp)); | |
7011 | |
7012 // Call a faked try-block that does the invoke. | |
7013 __ call(&invoke); | |
7014 | |
7015 // Caught exception: Store result (exception) in the pending | |
7016 // exception field in the JSEnv and return a failure sentinel. | |
7017 ExternalReference pending_exception(Top::k_pending_exception_address); | |
7018 __ mov(Operand::StaticVariable(pending_exception), eax); | |
7019 __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception())); | |
7020 __ jmp(&exit); | |
7021 | |
7022 // Invoke: Link this frame into the handler chain. | |
7023 __ bind(&invoke); | |
7024 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); | |
7025 __ push(eax); // flush TOS | |
7026 | |
7027 // Clear any pending exceptions. | |
7028 __ mov(edx, | |
7029 Operand::StaticVariable(ExternalReference::the_hole_value_location())); | |
7030 __ mov(Operand::StaticVariable(pending_exception), edx); | |
7031 | |
7032 // Fake a receiver (NULL). | |
7033 __ push(Immediate(0)); // receiver | |
7034 | |
7035 // Invoke the function by calling through JS entry trampoline | |
7036 // builtin and pop the faked function when we return. Notice that we | |
7037 // cannot store a reference to the trampoline code directly in this | |
7038 // stub, because the builtin stubs may not have been generated yet. | |
7039 if (is_construct) { | |
7040 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); | |
7041 __ mov(edx, Immediate(construct_entry)); | |
7042 } else { | |
7043 ExternalReference entry(Builtins::JSEntryTrampoline); | |
7044 __ mov(edx, Immediate(entry)); | |
7045 } | |
7046 __ mov(edx, Operand(edx, 0)); // deref address | |
7047 __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); | |
7048 __ call(Operand(edx)); | |
7049 | |
7050 // Unlink this frame from the handler chain. | |
7051 __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address))); | |
7052 // Pop next_sp. | |
7053 __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize)); | |
7054 | |
7055 // Restore the top frame descriptor from the stack. | |
7056 __ bind(&exit); | |
7057 __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address))); | |
7058 | |
7059 // Restore callee-saved registers (C calling conventions). | |
7060 __ pop(ebx); | |
7061 __ pop(esi); | |
7062 __ pop(edi); | |
7063 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers | |
7064 | |
7065 // Restore frame pointer and return. | |
7066 __ pop(ebp); | |
7067 __ ret(0); | |
7068 } | |
7069 | |
7070 | |
7071 void InstanceofStub::Generate(MacroAssembler* masm) { | |
7072 // Get the object - go slow case if it's a smi. | |
7073 Label slow; | |
7074 __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function | |
7075 __ test(eax, Immediate(kSmiTagMask)); | |
7076 __ j(zero, &slow, not_taken); | |
7077 | |
7078 // Check that the left hand is a JS object. | |
7079 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // ebx - object map | |
7080 __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type | |
7081 __ cmp(ecx, FIRST_JS_OBJECT_TYPE); | |
7082 __ j(less, &slow, not_taken); | |
7083 __ cmp(ecx, LAST_JS_OBJECT_TYPE); | |
7084 __ j(greater, &slow, not_taken); | |
7085 | |
7086 // Get the prototype of the function. | |
7087 __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address | |
7088 __ TryGetFunctionPrototype(edx, ebx, ecx, &slow); | |
7089 | |
7090 // Check that the function prototype is a JS object. | |
7091 __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset)); | |
7092 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); | |
7093 __ cmp(ecx, FIRST_JS_OBJECT_TYPE); | |
7094 __ j(less, &slow, not_taken); | |
7095 __ cmp(ecx, LAST_JS_OBJECT_TYPE); | |
7096 __ j(greater, &slow, not_taken); | |
7097 | |
7098 // Register mapping: eax is object map and ebx is function prototype. | |
7099 __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); | |
7100 | |
7101 // Loop through the prototype chain looking for the function prototype. | |
7102 Label loop, is_instance, is_not_instance; | |
7103 __ bind(&loop); | |
7104 __ cmp(ecx, Operand(ebx)); | |
7105 __ j(equal, &is_instance); | |
7106 __ cmp(Operand(ecx), Immediate(Factory::null_value())); | |
7107 __ j(equal, &is_not_instance); | |
7108 __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); | |
7109 __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset)); | |
7110 __ jmp(&loop); | |
7111 | |
7112 __ bind(&is_instance); | |
7113 __ Set(eax, Immediate(0)); | |
7114 __ ret(2 * kPointerSize); | |
7115 | |
7116 __ bind(&is_not_instance); | |
7117 __ Set(eax, Immediate(Smi::FromInt(1))); | |
7118 __ ret(2 * kPointerSize); | |
7119 | |
7120 // Slow-case: Go through the JavaScript implementation. | |
7121 __ bind(&slow); | |
7122 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | |
7123 } | |
7124 | |
7125 | |
7126 #undef __ | |
7127 | |
7128 } } // namespace v8::internal | |
OLD | NEW |