OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
132 owner_->set_state(this); | 132 owner_->set_state(this); |
133 } | 133 } |
134 | 134 |
135 | 135 |
136 CodeGenState::~CodeGenState() { | 136 CodeGenState::~CodeGenState() { |
137 ASSERT(owner_->state() == this); | 137 ASSERT(owner_->state() == this); |
138 owner_->set_state(previous_); | 138 owner_->set_state(previous_); |
139 } | 139 } |
140 | 140 |
141 | 141 |
142 // ------------------------------------------------------------------------- | |
143 // Deferred code objects | |
144 // | |
145 // These subclasses of DeferredCode add pieces of code to the end of generated | |
146 // code. They are branched to from the generated code, and | |
147 // keep some slower code out of the main body of the generated code. | |
148 // Many of them call a code stub or a runtime function. | |
149 | |
150 class DeferredInlineSmiAdd: public DeferredCode { | |
151 public: | |
152 DeferredInlineSmiAdd(Register dst, | |
153 Smi* value, | |
154 OverwriteMode overwrite_mode) | |
155 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | |
156 set_comment("[ DeferredInlineSmiAdd"); | |
157 } | |
158 | |
159 virtual void Generate(); | |
160 | |
161 private: | |
162 Register dst_; | |
163 Smi* value_; | |
164 OverwriteMode overwrite_mode_; | |
165 }; | |
166 | |
167 | |
168 // The result of value + src is in dst. It either overflowed or was not | |
169 // smi tagged. Undo the speculative addition and call the appropriate | |
170 // specialized stub for add. The result is left in dst. | |
171 class DeferredInlineSmiAddReversed: public DeferredCode { | |
172 public: | |
173 DeferredInlineSmiAddReversed(Register dst, | |
174 Smi* value, | |
175 OverwriteMode overwrite_mode) | |
176 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | |
177 set_comment("[ DeferredInlineSmiAddReversed"); | |
178 } | |
179 | |
180 virtual void Generate(); | |
181 | |
182 private: | |
183 Register dst_; | |
184 Smi* value_; | |
185 OverwriteMode overwrite_mode_; | |
186 }; | |
187 | |
188 | |
189 class DeferredInlineSmiSub: public DeferredCode { | |
190 public: | |
191 DeferredInlineSmiSub(Register dst, | |
192 Smi* value, | |
193 OverwriteMode overwrite_mode) | |
194 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | |
195 set_comment("[ DeferredInlineSmiSub"); | |
196 } | |
197 | |
198 virtual void Generate(); | |
199 | |
200 private: | |
201 Register dst_; | |
202 Smi* value_; | |
203 OverwriteMode overwrite_mode_; | |
204 }; | |
205 | |
206 | |
207 // Call the appropriate binary operation stub to compute src op value | |
208 // and leave the result in dst. | |
209 class DeferredInlineSmiOperation: public DeferredCode { | |
210 public: | |
211 DeferredInlineSmiOperation(Token::Value op, | |
212 Register dst, | |
213 Register src, | |
214 Smi* value, | |
215 OverwriteMode overwrite_mode) | |
216 : op_(op), | |
217 dst_(dst), | |
218 src_(src), | |
219 value_(value), | |
220 overwrite_mode_(overwrite_mode) { | |
221 set_comment("[ DeferredInlineSmiOperation"); | |
222 } | |
223 | |
224 virtual void Generate(); | |
225 | |
226 private: | |
227 Token::Value op_; | |
228 Register dst_; | |
229 Register src_; | |
230 Smi* value_; | |
231 OverwriteMode overwrite_mode_; | |
232 }; | |
233 | |
234 | |
235 // Call the appropriate binary operation stub to compute value op src | |
236 // and leave the result in dst. | |
237 class DeferredInlineSmiOperationReversed: public DeferredCode { | |
238 public: | |
239 DeferredInlineSmiOperationReversed(Token::Value op, | |
240 Register dst, | |
241 Smi* value, | |
242 Register src, | |
243 OverwriteMode overwrite_mode) | |
244 : op_(op), | |
245 dst_(dst), | |
246 value_(value), | |
247 src_(src), | |
248 overwrite_mode_(overwrite_mode) { | |
249 set_comment("[ DeferredInlineSmiOperationReversed"); | |
250 } | |
251 | |
252 virtual void Generate(); | |
253 | |
254 private: | |
255 Token::Value op_; | |
256 Register dst_; | |
257 Smi* value_; | |
258 Register src_; | |
259 OverwriteMode overwrite_mode_; | |
260 }; | |
261 | |
262 | |
263 class FloatingPointHelper : public AllStatic { | |
264 public: | |
265 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. | |
266 // If the operands are not both numbers, jump to not_numbers. | |
267 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. | |
268 // NumberOperands assumes both are smis or heap numbers. | |
269 static void LoadSSE2SmiOperands(MacroAssembler* masm); | |
270 static void LoadSSE2NumberOperands(MacroAssembler* masm); | |
271 static void LoadSSE2UnknownOperands(MacroAssembler* masm, | |
272 Label* not_numbers); | |
273 | |
274 // Takes the operands in rdx and rax and loads them as integers in rax | |
275 // and rcx. | |
276 static void LoadAsIntegers(MacroAssembler* masm, | |
277 Label* operand_conversion_failure, | |
278 Register heap_number_map); | |
279 // As above, but we know the operands to be numbers. In that case, | |
280 // conversion can't fail. | |
281 static void LoadNumbersAsIntegers(MacroAssembler* masm); | |
282 }; | |
283 | |
284 | |
285 // ----------------------------------------------------------------------------- | 142 // ----------------------------------------------------------------------------- |
286 // CodeGenerator implementation. | 143 // CodeGenerator implementation. |
287 | 144 |
288 CodeGenerator::CodeGenerator(MacroAssembler* masm) | 145 CodeGenerator::CodeGenerator(MacroAssembler* masm) |
289 : deferred_(8), | 146 : deferred_(8), |
290 masm_(masm), | 147 masm_(masm), |
291 info_(NULL), | 148 info_(NULL), |
292 frame_(NULL), | 149 frame_(NULL), |
293 allocator_(NULL), | 150 allocator_(NULL), |
294 state_(NULL), | 151 state_(NULL), |
295 loop_nesting_(0), | 152 loop_nesting_(0), |
296 function_return_is_shadowed_(false), | 153 function_return_is_shadowed_(false), |
297 in_spilled_code_(false) { | 154 in_spilled_code_(false) { |
298 } | 155 } |
299 | 156 |
300 | 157 |
301 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { | |
302 // Call the runtime to declare the globals. The inevitable call | |
303 // will sync frame elements to memory anyway, so we do it eagerly to | |
304 // allow us to push the arguments directly into place. | |
305 frame_->SyncRange(0, frame_->element_count() - 1); | |
306 | |
307 __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT); | |
308 frame_->EmitPush(rsi); // The context is the first argument. | |
309 frame_->EmitPush(kScratchRegister); | |
310 frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0)); | |
311 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); | |
312 // Return value is ignored. | |
313 } | |
314 | |
315 | |
316 void CodeGenerator::Generate(CompilationInfo* info) { | 158 void CodeGenerator::Generate(CompilationInfo* info) { |
317 // Record the position for debugging purposes. | 159 // Record the position for debugging purposes. |
318 CodeForFunctionPosition(info->function()); | 160 CodeForFunctionPosition(info->function()); |
319 Comment cmnt(masm_, "[ function compiled by virtual frame code generator"); | 161 Comment cmnt(masm_, "[ function compiled by virtual frame code generator"); |
320 | 162 |
321 // Initialize state. | 163 // Initialize state. |
322 info_ = info; | 164 info_ = info; |
323 ASSERT(allocator_ == NULL); | 165 ASSERT(allocator_ == NULL); |
324 RegisterAllocator register_allocator(this); | 166 RegisterAllocator register_allocator(this); |
325 allocator_ = ®ister_allocator; | 167 allocator_ = ®ister_allocator; |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
536 JumpTarget::set_compiling_deferred_code(true); | 378 JumpTarget::set_compiling_deferred_code(true); |
537 ProcessDeferred(); | 379 ProcessDeferred(); |
538 JumpTarget::set_compiling_deferred_code(false); | 380 JumpTarget::set_compiling_deferred_code(false); |
539 } | 381 } |
540 | 382 |
541 // There is no need to delete the register allocator, it is a | 383 // There is no need to delete the register allocator, it is a |
542 // stack-allocated local. | 384 // stack-allocated local. |
543 allocator_ = NULL; | 385 allocator_ = NULL; |
544 } | 386 } |
545 | 387 |
546 void CodeGenerator::GenerateReturnSequence(Result* return_value) { | 388 |
547 // The return value is a live (but not currently reference counted) | 389 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { |
548 // reference to rax. This is safe because the current frame does not | 390 // Currently, this assertion will fail if we try to assign to |
549 // contain a reference to rax (it is prepared for the return by spilling | 391 // a constant variable that is constant because it is read-only |
550 // all registers). | 392 // (such as the variable referring to a named function expression). |
551 if (FLAG_trace) { | 393 // We need to implement assignments to read-only variables. |
552 frame_->Push(return_value); | 394 // Ideally, we should do this during AST generation (by converting |
553 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1); | 395 // such assignments into expression statements); however, in general |
554 } | 396 // we may not be able to make the decision until past AST generation, |
555 return_value->ToRegister(rax); | 397 // that is when the entire program is known. |
556 | 398 ASSERT(slot != NULL); |
557 // Add a label for checking the size of the code used for returning. | 399 int index = slot->index(); |
| 400 switch (slot->type()) { |
| 401 case Slot::PARAMETER: |
| 402 return frame_->ParameterAt(index); |
| 403 |
| 404 case Slot::LOCAL: |
| 405 return frame_->LocalAt(index); |
| 406 |
| 407 case Slot::CONTEXT: { |
| 408 // Follow the context chain if necessary. |
| 409 ASSERT(!tmp.is(rsi)); // do not overwrite context register |
| 410 Register context = rsi; |
| 411 int chain_length = scope()->ContextChainLength(slot->var()->scope()); |
| 412 for (int i = 0; i < chain_length; i++) { |
| 413 // Load the closure. |
| 414 // (All contexts, even 'with' contexts, have a closure, |
| 415 // and it is the same for all contexts inside a function. |
| 416 // There is no need to go to the function context first.) |
| 417 __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); |
| 418 // Load the function context (which is the incoming, outer context). |
| 419 __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset)); |
| 420 context = tmp; |
| 421 } |
| 422 // We may have a 'with' context now. Get the function context. |
| 423 // (In fact this mov may never be the needed, since the scope analysis |
| 424 // may not permit a direct context access in this case and thus we are |
| 425 // always at a function context. However it is safe to dereference be- |
| 426 // cause the function context of a function context is itself. Before |
| 427 // deleting this mov we should try to create a counter-example first, |
| 428 // though...) |
| 429 __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); |
| 430 return ContextOperand(tmp, index); |
| 431 } |
| 432 |
| 433 default: |
| 434 UNREACHABLE(); |
| 435 return Operand(rsp, 0); |
| 436 } |
| 437 } |
| 438 |
| 439 |
| 440 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, |
| 441 Result tmp, |
| 442 JumpTarget* slow) { |
| 443 ASSERT(slot->type() == Slot::CONTEXT); |
| 444 ASSERT(tmp.is_register()); |
| 445 Register context = rsi; |
| 446 |
| 447 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { |
| 448 if (s->num_heap_slots() > 0) { |
| 449 if (s->calls_eval()) { |
| 450 // Check that extension is NULL. |
| 451 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), |
| 452 Immediate(0)); |
| 453 slow->Branch(not_equal, not_taken); |
| 454 } |
| 455 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); |
| 456 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); |
| 457 context = tmp.reg(); |
| 458 } |
| 459 } |
| 460 // Check that last extension is NULL. |
| 461 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); |
| 462 slow->Branch(not_equal, not_taken); |
| 463 __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX)); |
| 464 return ContextOperand(tmp.reg(), slot->index()); |
| 465 } |
| 466 |
| 467 |
| 468 // Emit code to load the value of an expression to the top of the |
| 469 // frame. If the expression is boolean-valued it may be compiled (or |
| 470 // partially compiled) into control flow to the control destination. |
| 471 // If force_control is true, control flow is forced. |
| 472 void CodeGenerator::LoadCondition(Expression* x, |
| 473 ControlDestination* dest, |
| 474 bool force_control) { |
| 475 ASSERT(!in_spilled_code()); |
| 476 int original_height = frame_->height(); |
| 477 |
| 478 { CodeGenState new_state(this, dest); |
| 479 Visit(x); |
| 480 |
| 481 // If we hit a stack overflow, we may not have actually visited |
| 482 // the expression. In that case, we ensure that we have a |
| 483 // valid-looking frame state because we will continue to generate |
| 484 // code as we unwind the C++ stack. |
| 485 // |
| 486 // It's possible to have both a stack overflow and a valid frame |
| 487 // state (eg, a subexpression overflowed, visiting it returned |
| 488 // with a dummied frame state, and visiting this expression |
| 489 // returned with a normal-looking state). |
| 490 if (HasStackOverflow() && |
| 491 !dest->is_used() && |
| 492 frame_->height() == original_height) { |
| 493 dest->Goto(true); |
| 494 } |
| 495 } |
| 496 |
| 497 if (force_control && !dest->is_used()) { |
| 498 // Convert the TOS value into flow to the control destination. |
| 499 // TODO(X64): Make control flow to control destinations work. |
| 500 ToBoolean(dest); |
| 501 } |
| 502 |
| 503 ASSERT(!(force_control && !dest->is_used())); |
| 504 ASSERT(dest->is_used() || frame_->height() == original_height + 1); |
| 505 } |
| 506 |
| 507 |
| 508 void CodeGenerator::LoadAndSpill(Expression* expression) { |
| 509 // TODO(x64): No architecture specific code. Move to shared location. |
| 510 ASSERT(in_spilled_code()); |
| 511 set_in_spilled_code(false); |
| 512 Load(expression); |
| 513 frame_->SpillAll(); |
| 514 set_in_spilled_code(true); |
| 515 } |
| 516 |
| 517 |
| 518 void CodeGenerator::Load(Expression* expr) { |
558 #ifdef DEBUG | 519 #ifdef DEBUG |
559 Label check_exit_codesize; | 520 int original_height = frame_->height(); |
560 masm_->bind(&check_exit_codesize); | |
561 #endif | 521 #endif |
562 | 522 ASSERT(!in_spilled_code()); |
563 // Leave the frame and return popping the arguments and the | 523 JumpTarget true_target; |
564 // receiver. | 524 JumpTarget false_target; |
565 frame_->Exit(); | 525 ControlDestination dest(&true_target, &false_target, true); |
566 masm_->ret((scope()->num_parameters() + 1) * kPointerSize); | 526 LoadCondition(expr, &dest, false); |
567 #ifdef ENABLE_DEBUGGER_SUPPORT | 527 |
568 // Add padding that will be overwritten by a debugger breakpoint. | 528 if (dest.false_was_fall_through()) { |
569 // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k" | 529 // The false target was just bound. |
570 // with length 7 (3 + 1 + 3). | 530 JumpTarget loaded; |
571 const int kPadding = Assembler::kJSReturnSequenceLength - 7; | 531 frame_->Push(Factory::false_value()); |
572 for (int i = 0; i < kPadding; ++i) { | 532 // There may be dangling jumps to the true target. |
573 masm_->int3(); | 533 if (true_target.is_linked()) { |
574 } | 534 loaded.Jump(); |
575 // Check that the size of the code used for returning matches what is | 535 true_target.Bind(); |
576 // expected by the debugger. | 536 frame_->Push(Factory::true_value()); |
577 ASSERT_EQ(Assembler::kJSReturnSequenceLength, | 537 loaded.Bind(); |
578 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); | 538 } |
579 #endif | 539 |
580 DeleteFrame(); | 540 } else if (dest.is_used()) { |
581 } | 541 // There is true, and possibly false, control flow (with true as |
582 | 542 // the fall through). |
583 | 543 JumpTarget loaded; |
584 #ifdef DEBUG | 544 frame_->Push(Factory::true_value()); |
585 bool CodeGenerator::HasValidEntryRegisters() { | 545 if (false_target.is_linked()) { |
586 return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0)) | 546 loaded.Jump(); |
587 && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0)) | 547 false_target.Bind(); |
588 && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0)) | 548 frame_->Push(Factory::false_value()); |
589 && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0)) | 549 loaded.Bind(); |
590 && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0)) | 550 } |
591 && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0)) | 551 |
592 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) | 552 } else { |
593 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) | 553 // We have a valid value on top of the frame, but we still may |
594 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) | 554 // have dangling jumps to the true and false targets from nested |
595 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); | 555 // subexpressions (eg, the left subexpressions of the |
596 } | 556 // short-circuited boolean operators). |
597 #endif | 557 ASSERT(has_valid_frame()); |
598 | 558 if (true_target.is_linked() || false_target.is_linked()) { |
599 | 559 JumpTarget loaded; |
600 class DeferredReferenceGetKeyedValue: public DeferredCode { | 560 loaded.Jump(); // Don't lose the current TOS. |
| 561 if (true_target.is_linked()) { |
| 562 true_target.Bind(); |
| 563 frame_->Push(Factory::true_value()); |
| 564 if (false_target.is_linked()) { |
| 565 loaded.Jump(); |
| 566 } |
| 567 } |
| 568 if (false_target.is_linked()) { |
| 569 false_target.Bind(); |
| 570 frame_->Push(Factory::false_value()); |
| 571 } |
| 572 loaded.Bind(); |
| 573 } |
| 574 } |
| 575 |
| 576 ASSERT(has_valid_frame()); |
| 577 ASSERT(frame_->height() == original_height + 1); |
| 578 } |
| 579 |
| 580 |
| 581 void CodeGenerator::LoadGlobal() { |
| 582 if (in_spilled_code()) { |
| 583 frame_->EmitPush(GlobalObject()); |
| 584 } else { |
| 585 Result temp = allocator_->Allocate(); |
| 586 __ movq(temp.reg(), GlobalObject()); |
| 587 frame_->Push(&temp); |
| 588 } |
| 589 } |
| 590 |
| 591 |
| 592 void CodeGenerator::LoadGlobalReceiver() { |
| 593 Result temp = allocator_->Allocate(); |
| 594 Register reg = temp.reg(); |
| 595 __ movq(reg, GlobalObject()); |
| 596 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset)); |
| 597 frame_->Push(&temp); |
| 598 } |
| 599 |
| 600 |
| 601 void CodeGenerator::LoadTypeofExpression(Expression* expr) { |
| 602 // Special handling of identifiers as subexpressions of typeof. |
| 603 Variable* variable = expr->AsVariableProxy()->AsVariable(); |
| 604 if (variable != NULL && !variable->is_this() && variable->is_global()) { |
| 605 // For a global variable we build the property reference |
| 606 // <global>.<variable> and perform a (regular non-contextual) property |
| 607 // load to make sure we do not get reference errors. |
| 608 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); |
| 609 Literal key(variable->name()); |
| 610 Property property(&global, &key, RelocInfo::kNoPosition); |
| 611 Reference ref(this, &property); |
| 612 ref.GetValue(); |
| 613 } else if (variable != NULL && variable->slot() != NULL) { |
| 614 // For a variable that rewrites to a slot, we signal it is the immediate |
| 615 // subexpression of a typeof. |
| 616 LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF); |
| 617 } else { |
| 618 // Anything else can be handled normally. |
| 619 Load(expr); |
| 620 } |
| 621 } |
| 622 |
| 623 |
| 624 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() { |
| 625 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION; |
| 626 ASSERT(scope()->arguments_shadow() != NULL); |
| 627 // We don't want to do lazy arguments allocation for functions that |
| 628 // have heap-allocated contexts, because it interfers with the |
| 629 // uninitialized const tracking in the context objects. |
| 630 return (scope()->num_heap_slots() > 0) |
| 631 ? EAGER_ARGUMENTS_ALLOCATION |
| 632 : LAZY_ARGUMENTS_ALLOCATION; |
| 633 } |
| 634 |
| 635 |
| 636 Result CodeGenerator::StoreArgumentsObject(bool initial) { |
| 637 ArgumentsAllocationMode mode = ArgumentsMode(); |
| 638 ASSERT(mode != NO_ARGUMENTS_ALLOCATION); |
| 639 |
| 640 Comment cmnt(masm_, "[ store arguments object"); |
| 641 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { |
| 642 // When using lazy arguments allocation, we store the hole value |
| 643 // as a sentinel indicating that the arguments object hasn't been |
| 644 // allocated yet. |
| 645 frame_->Push(Factory::the_hole_value()); |
| 646 } else { |
| 647 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); |
| 648 frame_->PushFunction(); |
| 649 frame_->PushReceiverSlotAddress(); |
| 650 frame_->Push(Smi::FromInt(scope()->num_parameters())); |
| 651 Result result = frame_->CallStub(&stub, 3); |
| 652 frame_->Push(&result); |
| 653 } |
| 654 |
| 655 |
| 656 Variable* arguments = scope()->arguments()->var(); |
| 657 Variable* shadow = scope()->arguments_shadow()->var(); |
| 658 ASSERT(arguments != NULL && arguments->slot() != NULL); |
| 659 ASSERT(shadow != NULL && shadow->slot() != NULL); |
| 660 JumpTarget done; |
| 661 bool skip_arguments = false; |
| 662 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { |
| 663 // We have to skip storing into the arguments slot if it has |
| 664 // already been written to. This can happen if the a function |
| 665 // has a local variable named 'arguments'. |
| 666 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); |
| 667 Result probe = frame_->Pop(); |
| 668 if (probe.is_constant()) { |
| 669 // We have to skip updating the arguments object if it has been |
| 670 // assigned a proper value. |
| 671 skip_arguments = !probe.handle()->IsTheHole(); |
| 672 } else { |
| 673 __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex); |
| 674 probe.Unuse(); |
| 675 done.Branch(not_equal); |
| 676 } |
| 677 } |
| 678 if (!skip_arguments) { |
| 679 StoreToSlot(arguments->slot(), NOT_CONST_INIT); |
| 680 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); |
| 681 } |
| 682 StoreToSlot(shadow->slot(), NOT_CONST_INIT); |
| 683 return frame_->Pop(); |
| 684 } |
| 685 |
| 686 //------------------------------------------------------------------------------ |
| 687 // CodeGenerator implementation of variables, lookups, and stores. |
| 688 |
| 689 //------------------------------------------------------------------------------ |
| 690 // CodeGenerator implementation of variables, lookups, and stores. |
| 691 |
| 692 Reference::Reference(CodeGenerator* cgen, |
| 693 Expression* expression, |
| 694 bool persist_after_get) |
| 695 : cgen_(cgen), |
| 696 expression_(expression), |
| 697 type_(ILLEGAL), |
| 698 persist_after_get_(persist_after_get) { |
| 699 cgen->LoadReference(this); |
| 700 } |
| 701 |
| 702 |
| 703 Reference::~Reference() { |
| 704 ASSERT(is_unloaded() || is_illegal()); |
| 705 } |
| 706 |
| 707 |
| 708 void CodeGenerator::LoadReference(Reference* ref) { |
| 709 // References are loaded from both spilled and unspilled code. Set the |
| 710 // state to unspilled to allow that (and explicitly spill after |
| 711 // construction at the construction sites). |
| 712 bool was_in_spilled_code = in_spilled_code_; |
| 713 in_spilled_code_ = false; |
| 714 |
| 715 Comment cmnt(masm_, "[ LoadReference"); |
| 716 Expression* e = ref->expression(); |
| 717 Property* property = e->AsProperty(); |
| 718 Variable* var = e->AsVariableProxy()->AsVariable(); |
| 719 |
| 720 if (property != NULL) { |
| 721 // The expression is either a property or a variable proxy that rewrites |
| 722 // to a property. |
| 723 Load(property->obj()); |
| 724 if (property->key()->IsPropertyName()) { |
| 725 ref->set_type(Reference::NAMED); |
| 726 } else { |
| 727 Load(property->key()); |
| 728 ref->set_type(Reference::KEYED); |
| 729 } |
| 730 } else if (var != NULL) { |
| 731 // The expression is a variable proxy that does not rewrite to a |
| 732 // property. Global variables are treated as named property references. |
| 733 if (var->is_global()) { |
| 734 // If rax is free, the register allocator prefers it. Thus the code |
| 735 // generator will load the global object into rax, which is where |
| 736 // LoadIC wants it. Most uses of Reference call LoadIC directly |
| 737 // after the reference is created. |
| 738 frame_->Spill(rax); |
| 739 LoadGlobal(); |
| 740 ref->set_type(Reference::NAMED); |
| 741 } else { |
| 742 ASSERT(var->slot() != NULL); |
| 743 ref->set_type(Reference::SLOT); |
| 744 } |
| 745 } else { |
| 746 // Anything else is a runtime error. |
| 747 Load(e); |
| 748 frame_->CallRuntime(Runtime::kThrowReferenceError, 1); |
| 749 } |
| 750 |
| 751 in_spilled_code_ = was_in_spilled_code; |
| 752 } |
| 753 |
| 754 |
| 755 void CodeGenerator::UnloadReference(Reference* ref) { |
| 756 // Pop a reference from the stack while preserving TOS. |
| 757 Comment cmnt(masm_, "[ UnloadReference"); |
| 758 frame_->Nip(ref->size()); |
| 759 ref->set_unloaded(); |
| 760 } |
| 761 |
| 762 |
| 763 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and |
| 764 // convert it to a boolean in the condition code register or jump to |
| 765 // 'false_target'/'true_target' as appropriate. |
| 766 void CodeGenerator::ToBoolean(ControlDestination* dest) { |
| 767 Comment cmnt(masm_, "[ ToBoolean"); |
| 768 |
| 769 // The value to convert should be popped from the frame. |
| 770 Result value = frame_->Pop(); |
| 771 value.ToRegister(); |
| 772 |
| 773 if (value.is_number()) { |
| 774 // Fast case if TypeInfo indicates only numbers. |
| 775 if (FLAG_debug_code) { |
| 776 __ AbortIfNotNumber(value.reg()); |
| 777 } |
| 778 // Smi => false iff zero. |
| 779 __ SmiCompare(value.reg(), Smi::FromInt(0)); |
| 780 if (value.is_smi()) { |
| 781 value.Unuse(); |
| 782 dest->Split(not_zero); |
| 783 } else { |
| 784 dest->false_target()->Branch(equal); |
| 785 Condition is_smi = masm_->CheckSmi(value.reg()); |
| 786 dest->true_target()->Branch(is_smi); |
| 787 __ xorpd(xmm0, xmm0); |
| 788 __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset)); |
| 789 value.Unuse(); |
| 790 dest->Split(not_zero); |
| 791 } |
| 792 } else { |
| 793 // Fast case checks. |
| 794 // 'false' => false. |
| 795 __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex); |
| 796 dest->false_target()->Branch(equal); |
| 797 |
| 798 // 'true' => true. |
| 799 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex); |
| 800 dest->true_target()->Branch(equal); |
| 801 |
| 802 // 'undefined' => false. |
| 803 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex); |
| 804 dest->false_target()->Branch(equal); |
| 805 |
| 806 // Smi => false iff zero. |
| 807 __ SmiCompare(value.reg(), Smi::FromInt(0)); |
| 808 dest->false_target()->Branch(equal); |
| 809 Condition is_smi = masm_->CheckSmi(value.reg()); |
| 810 dest->true_target()->Branch(is_smi); |
| 811 |
| 812 // Call the stub for all other cases. |
| 813 frame_->Push(&value); // Undo the Pop() from above. |
| 814 ToBooleanStub stub; |
| 815 Result temp = frame_->CallStub(&stub, 1); |
| 816 // Convert the result to a condition code. |
| 817 __ testq(temp.reg(), temp.reg()); |
| 818 temp.Unuse(); |
| 819 dest->Split(not_equal); |
| 820 } |
| 821 } |
| 822 |
| 823 |
| 824 class FloatingPointHelper : public AllStatic { |
601 public: | 825 public: |
602 explicit DeferredReferenceGetKeyedValue(Register dst, | 826 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. |
603 Register receiver, | 827 // If the operands are not both numbers, jump to not_numbers. |
604 Register key) | 828 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. |
605 : dst_(dst), receiver_(receiver), key_(key) { | 829 // NumberOperands assumes both are smis or heap numbers. |
606 set_comment("[ DeferredReferenceGetKeyedValue"); | 830 static void LoadSSE2SmiOperands(MacroAssembler* masm); |
| 831 static void LoadSSE2NumberOperands(MacroAssembler* masm); |
| 832 static void LoadSSE2UnknownOperands(MacroAssembler* masm, |
| 833 Label* not_numbers); |
| 834 |
| 835 // Takes the operands in rdx and rax and loads them as integers in rax |
| 836 // and rcx. |
| 837 static void LoadAsIntegers(MacroAssembler* masm, |
| 838 Label* operand_conversion_failure, |
| 839 Register heap_number_map); |
| 840 // As above, but we know the operands to be numbers. In that case, |
| 841 // conversion can't fail. |
| 842 static void LoadNumbersAsIntegers(MacroAssembler* masm); |
| 843 }; |
| 844 |
| 845 |
| 846 const char* GenericBinaryOpStub::GetName() { |
| 847 if (name_ != NULL) return name_; |
| 848 const int len = 100; |
| 849 name_ = Bootstrapper::AllocateAutoDeletedArray(len); |
| 850 if (name_ == NULL) return "OOM"; |
| 851 const char* op_name = Token::Name(op_); |
| 852 const char* overwrite_name; |
| 853 switch (mode_) { |
| 854 case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
| 855 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
| 856 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
| 857 default: overwrite_name = "UnknownOverwrite"; break; |
| 858 } |
| 859 |
| 860 OS::SNPrintF(Vector<char>(name_, len), |
| 861 "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", |
| 862 op_name, |
| 863 overwrite_name, |
| 864 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", |
| 865 args_in_registers_ ? "RegArgs" : "StackArgs", |
| 866 args_reversed_ ? "_R" : "", |
| 867 static_operands_type_.ToString(), |
| 868 BinaryOpIC::GetName(runtime_operands_type_)); |
| 869 return name_; |
| 870 } |
| 871 |
| 872 |
| 873 // Call the specialized stub for a binary operation. |
| 874 class DeferredInlineBinaryOperation: public DeferredCode { |
| 875 public: |
| 876 DeferredInlineBinaryOperation(Token::Value op, |
| 877 Register dst, |
| 878 Register left, |
| 879 Register right, |
| 880 OverwriteMode mode) |
| 881 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) { |
| 882 set_comment("[ DeferredInlineBinaryOperation"); |
607 } | 883 } |
608 | 884 |
609 virtual void Generate(); | 885 virtual void Generate(); |
610 | 886 |
611 Label* patch_site() { return &patch_site_; } | |
612 | |
613 private: | 887 private: |
614 Label patch_site_; | 888 Token::Value op_; |
615 Register dst_; | 889 Register dst_; |
616 Register receiver_; | 890 Register left_; |
617 Register key_; | 891 Register right_; |
| 892 OverwriteMode mode_; |
618 }; | 893 }; |
619 | 894 |
620 | 895 |
621 void DeferredReferenceGetKeyedValue::Generate() { | 896 void DeferredInlineBinaryOperation::Generate() { |
622 if (receiver_.is(rdx)) { | 897 Label done; |
623 if (!key_.is(rax)) { | 898 if ((op_ == Token::ADD) |
624 __ movq(rax, key_); | 899 || (op_ == Token::SUB) |
625 } // else do nothing. | 900 || (op_ == Token::MUL) |
626 } else if (receiver_.is(rax)) { | 901 || (op_ == Token::DIV)) { |
627 if (key_.is(rdx)) { | 902 Label call_runtime; |
628 __ xchg(rax, rdx); | 903 Label left_smi, right_smi, load_right, do_op; |
629 } else if (key_.is(rax)) { | 904 __ JumpIfSmi(left_, &left_smi); |
630 __ movq(rdx, receiver_); | 905 __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset), |
| 906 Heap::kHeapNumberMapRootIndex); |
| 907 __ j(not_equal, &call_runtime); |
| 908 __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset)); |
| 909 if (mode_ == OVERWRITE_LEFT) { |
| 910 __ movq(dst_, left_); |
| 911 } |
| 912 __ jmp(&load_right); |
| 913 |
| 914 __ bind(&left_smi); |
| 915 __ SmiToInteger32(left_, left_); |
| 916 __ cvtlsi2sd(xmm0, left_); |
| 917 __ Integer32ToSmi(left_, left_); |
| 918 if (mode_ == OVERWRITE_LEFT) { |
| 919 Label alloc_failure; |
| 920 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); |
| 921 } |
| 922 |
| 923 __ bind(&load_right); |
| 924 __ JumpIfSmi(right_, &right_smi); |
| 925 __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset), |
| 926 Heap::kHeapNumberMapRootIndex); |
| 927 __ j(not_equal, &call_runtime); |
| 928 __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset)); |
| 929 if (mode_ == OVERWRITE_RIGHT) { |
| 930 __ movq(dst_, right_); |
| 931 } else if (mode_ == NO_OVERWRITE) { |
| 932 Label alloc_failure; |
| 933 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); |
| 934 } |
| 935 __ jmp(&do_op); |
| 936 |
| 937 __ bind(&right_smi); |
| 938 __ SmiToInteger32(right_, right_); |
| 939 __ cvtlsi2sd(xmm1, right_); |
| 940 __ Integer32ToSmi(right_, right_); |
| 941 if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) { |
| 942 Label alloc_failure; |
| 943 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); |
| 944 } |
| 945 |
| 946 __ bind(&do_op); |
| 947 switch (op_) { |
| 948 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 949 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 950 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 951 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 952 default: UNREACHABLE(); |
| 953 } |
| 954 __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0); |
| 955 __ jmp(&done); |
| 956 |
| 957 __ bind(&call_runtime); |
| 958 } |
| 959 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB); |
| 960 stub.GenerateCall(masm_, left_, right_); |
| 961 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 962 __ bind(&done); |
| 963 } |
| 964 |
| 965 |
| 966 static TypeInfo CalculateTypeInfo(TypeInfo operands_type, |
| 967 Token::Value op, |
| 968 const Result& right, |
| 969 const Result& left) { |
| 970 // Set TypeInfo of result according to the operation performed. |
| 971 // We rely on the fact that smis have a 32 bit payload on x64. |
| 972 STATIC_ASSERT(kSmiValueSize == 32); |
| 973 switch (op) { |
| 974 case Token::COMMA: |
| 975 return right.type_info(); |
| 976 case Token::OR: |
| 977 case Token::AND: |
| 978 // Result type can be either of the two input types. |
| 979 return operands_type; |
| 980 case Token::BIT_OR: |
| 981 case Token::BIT_XOR: |
| 982 case Token::BIT_AND: |
| 983 // Result is always a smi. |
| 984 return TypeInfo::Smi(); |
| 985 case Token::SAR: |
| 986 case Token::SHL: |
| 987 // Result is always a smi. |
| 988 return TypeInfo::Smi(); |
| 989 case Token::SHR: |
| 990 // Result of x >>> y is always a smi if masked y >= 1, otherwise a number. |
| 991 return (right.is_constant() && right.handle()->IsSmi() |
| 992 && (Smi::cast(*right.handle())->value() & 0x1F) >= 1) |
| 993 ? TypeInfo::Smi() |
| 994 : TypeInfo::Number(); |
| 995 case Token::ADD: |
| 996 if (operands_type.IsNumber()) { |
| 997 return TypeInfo::Number(); |
| 998 } else if (left.type_info().IsString() || right.type_info().IsString()) { |
| 999 return TypeInfo::String(); |
| 1000 } else { |
| 1001 return TypeInfo::Unknown(); |
| 1002 } |
| 1003 case Token::SUB: |
| 1004 case Token::MUL: |
| 1005 case Token::DIV: |
| 1006 case Token::MOD: |
| 1007 // Result is always a number. |
| 1008 return TypeInfo::Number(); |
| 1009 default: |
| 1010 UNREACHABLE(); |
| 1011 } |
| 1012 UNREACHABLE(); |
| 1013 return TypeInfo::Unknown(); |
| 1014 } |
| 1015 |
| 1016 |
| 1017 void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, |
| 1018 OverwriteMode overwrite_mode) { |
| 1019 Comment cmnt(masm_, "[ BinaryOperation"); |
| 1020 Token::Value op = expr->op(); |
| 1021 Comment cmnt_token(masm_, Token::String(op)); |
| 1022 |
| 1023 if (op == Token::COMMA) { |
| 1024 // Simply discard left value. |
| 1025 frame_->Nip(1); |
| 1026 return; |
| 1027 } |
| 1028 |
| 1029 Result right = frame_->Pop(); |
| 1030 Result left = frame_->Pop(); |
| 1031 |
| 1032 if (op == Token::ADD) { |
| 1033 const bool left_is_string = left.type_info().IsString(); |
| 1034 const bool right_is_string = right.type_info().IsString(); |
| 1035 // Make sure constant strings have string type info. |
| 1036 ASSERT(!(left.is_constant() && left.handle()->IsString()) || |
| 1037 left_is_string); |
| 1038 ASSERT(!(right.is_constant() && right.handle()->IsString()) || |
| 1039 right_is_string); |
| 1040 if (left_is_string || right_is_string) { |
| 1041 frame_->Push(&left); |
| 1042 frame_->Push(&right); |
| 1043 Result answer; |
| 1044 if (left_is_string) { |
| 1045 if (right_is_string) { |
| 1046 StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
| 1047 answer = frame_->CallStub(&stub, 2); |
| 1048 } else { |
| 1049 answer = |
| 1050 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2); |
| 1051 } |
| 1052 } else if (right_is_string) { |
| 1053 answer = |
| 1054 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); |
| 1055 } |
| 1056 answer.set_type_info(TypeInfo::String()); |
| 1057 frame_->Push(&answer); |
| 1058 return; |
| 1059 } |
| 1060 // Neither operand is known to be a string. |
| 1061 } |
| 1062 |
| 1063 bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi(); |
| 1064 bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi(); |
| 1065 bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi(); |
| 1066 bool right_is_non_smi_constant = |
| 1067 right.is_constant() && !right.handle()->IsSmi(); |
| 1068 |
| 1069 if (left_is_smi_constant && right_is_smi_constant) { |
| 1070 // Compute the constant result at compile time, and leave it on the frame. |
| 1071 int left_int = Smi::cast(*left.handle())->value(); |
| 1072 int right_int = Smi::cast(*right.handle())->value(); |
| 1073 if (FoldConstantSmis(op, left_int, right_int)) return; |
| 1074 } |
| 1075 |
| 1076 // Get number type of left and right sub-expressions. |
| 1077 TypeInfo operands_type = |
| 1078 TypeInfo::Combine(left.type_info(), right.type_info()); |
| 1079 |
| 1080 TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left); |
| 1081 |
| 1082 Result answer; |
| 1083 if (left_is_non_smi_constant || right_is_non_smi_constant) { |
| 1084 // Go straight to the slow case, with no smi code. |
| 1085 GenericBinaryOpStub stub(op, |
| 1086 overwrite_mode, |
| 1087 NO_SMI_CODE_IN_STUB, |
| 1088 operands_type); |
| 1089 answer = stub.GenerateCall(masm_, frame_, &left, &right); |
| 1090 } else if (right_is_smi_constant) { |
| 1091 answer = ConstantSmiBinaryOperation(expr, &left, right.handle(), |
| 1092 false, overwrite_mode); |
| 1093 } else if (left_is_smi_constant) { |
| 1094 answer = ConstantSmiBinaryOperation(expr, &right, left.handle(), |
| 1095 true, overwrite_mode); |
| 1096 } else { |
| 1097 // Set the flags based on the operation, type and loop nesting level. |
| 1098 // Bit operations always assume they likely operate on Smis. Still only |
| 1099 // generate the inline Smi check code if this operation is part of a loop. |
| 1100 // For all other operations only inline the Smi check code for likely smis |
| 1101 // if the operation is part of a loop. |
| 1102 if (loop_nesting() > 0 && |
| 1103 (Token::IsBitOp(op) || |
| 1104 operands_type.IsInteger32() || |
| 1105 expr->type()->IsLikelySmi())) { |
| 1106 answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode); |
631 } else { | 1107 } else { |
632 __ movq(rdx, receiver_); | 1108 GenericBinaryOpStub stub(op, |
633 __ movq(rax, key_); | 1109 overwrite_mode, |
634 } | 1110 NO_GENERIC_BINARY_FLAGS, |
635 } else if (key_.is(rax)) { | 1111 operands_type); |
636 __ movq(rdx, receiver_); | 1112 answer = stub.GenerateCall(masm_, frame_, &left, &right); |
| 1113 } |
| 1114 } |
| 1115 |
| 1116 answer.set_type_info(result_type); |
| 1117 frame_->Push(&answer); |
| 1118 } |
| 1119 |
| 1120 |
| 1121 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { |
| 1122 Object* answer_object = Heap::undefined_value(); |
| 1123 switch (op) { |
| 1124 case Token::ADD: |
| 1125 // Use intptr_t to detect overflow of 32-bit int. |
| 1126 if (Smi::IsValid(static_cast<intptr_t>(left) + right)) { |
| 1127 answer_object = Smi::FromInt(left + right); |
| 1128 } |
| 1129 break; |
| 1130 case Token::SUB: |
| 1131 // Use intptr_t to detect overflow of 32-bit int. |
| 1132 if (Smi::IsValid(static_cast<intptr_t>(left) - right)) { |
| 1133 answer_object = Smi::FromInt(left - right); |
| 1134 } |
| 1135 break; |
| 1136 case Token::MUL: { |
| 1137 double answer = static_cast<double>(left) * right; |
| 1138 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { |
| 1139 // If the product is zero and the non-zero factor is negative, |
| 1140 // the spec requires us to return floating point negative zero. |
| 1141 if (answer != 0 || (left + right) >= 0) { |
| 1142 answer_object = Smi::FromInt(static_cast<int>(answer)); |
| 1143 } |
| 1144 } |
| 1145 } |
| 1146 break; |
| 1147 case Token::DIV: |
| 1148 case Token::MOD: |
| 1149 break; |
| 1150 case Token::BIT_OR: |
| 1151 answer_object = Smi::FromInt(left | right); |
| 1152 break; |
| 1153 case Token::BIT_AND: |
| 1154 answer_object = Smi::FromInt(left & right); |
| 1155 break; |
| 1156 case Token::BIT_XOR: |
| 1157 answer_object = Smi::FromInt(left ^ right); |
| 1158 break; |
| 1159 |
| 1160 case Token::SHL: { |
| 1161 int shift_amount = right & 0x1F; |
| 1162 if (Smi::IsValid(left << shift_amount)) { |
| 1163 answer_object = Smi::FromInt(left << shift_amount); |
| 1164 } |
| 1165 break; |
| 1166 } |
| 1167 case Token::SHR: { |
| 1168 int shift_amount = right & 0x1F; |
| 1169 unsigned int unsigned_left = left; |
| 1170 unsigned_left >>= shift_amount; |
| 1171 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) { |
| 1172 answer_object = Smi::FromInt(unsigned_left); |
| 1173 } |
| 1174 break; |
| 1175 } |
| 1176 case Token::SAR: { |
| 1177 int shift_amount = right & 0x1F; |
| 1178 unsigned int unsigned_left = left; |
| 1179 if (left < 0) { |
| 1180 // Perform arithmetic shift of a negative number by |
| 1181 // complementing number, logical shifting, complementing again. |
| 1182 unsigned_left = ~unsigned_left; |
| 1183 unsigned_left >>= shift_amount; |
| 1184 unsigned_left = ~unsigned_left; |
| 1185 } else { |
| 1186 unsigned_left >>= shift_amount; |
| 1187 } |
| 1188 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left))); |
| 1189 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left)); |
| 1190 break; |
| 1191 } |
| 1192 default: |
| 1193 UNREACHABLE(); |
| 1194 break; |
| 1195 } |
| 1196 if (answer_object == Heap::undefined_value()) { |
| 1197 return false; |
| 1198 } |
| 1199 frame_->Push(Handle<Object>(answer_object)); |
| 1200 return true; |
| 1201 } |
| 1202 |
| 1203 |
| 1204 void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg, |
| 1205 TypeInfo type, |
| 1206 DeferredCode* deferred) { |
| 1207 if (!type.IsSmi()) { |
| 1208 __ JumpIfNotSmi(reg, deferred->entry_label()); |
| 1209 } |
| 1210 if (FLAG_debug_code) { |
| 1211 __ AbortIfNotSmi(reg); |
| 1212 } |
| 1213 } |
| 1214 |
| 1215 |
| 1216 void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, |
| 1217 Register right, |
| 1218 TypeInfo left_info, |
| 1219 TypeInfo right_info, |
| 1220 DeferredCode* deferred) { |
| 1221 if (!left_info.IsSmi() && !right_info.IsSmi()) { |
| 1222 __ JumpIfNotBothSmi(left, right, deferred->entry_label()); |
| 1223 } else if (!left_info.IsSmi()) { |
| 1224 __ JumpIfNotSmi(left, deferred->entry_label()); |
| 1225 } else if (!right_info.IsSmi()) { |
| 1226 __ JumpIfNotSmi(right, deferred->entry_label()); |
| 1227 } |
| 1228 if (FLAG_debug_code) { |
| 1229 __ AbortIfNotSmi(left); |
| 1230 __ AbortIfNotSmi(right); |
| 1231 } |
| 1232 } |
| 1233 |
| 1234 |
| 1235 // Implements a binary operation using a deferred code object and some |
| 1236 // inline code to operate on smis quickly. |
| 1237 Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, |
| 1238 Result* left, |
| 1239 Result* right, |
| 1240 OverwriteMode overwrite_mode) { |
| 1241 // Copy the type info because left and right may be overwritten. |
| 1242 TypeInfo left_type_info = left->type_info(); |
| 1243 TypeInfo right_type_info = right->type_info(); |
| 1244 Token::Value op = expr->op(); |
| 1245 Result answer; |
| 1246 // Special handling of div and mod because they use fixed registers. |
| 1247 if (op == Token::DIV || op == Token::MOD) { |
| 1248 // We need rax as the quotient register, rdx as the remainder |
| 1249 // register, neither left nor right in rax or rdx, and left copied |
| 1250 // to rax. |
| 1251 Result quotient; |
| 1252 Result remainder; |
| 1253 bool left_is_in_rax = false; |
| 1254 // Step 1: get rax for quotient. |
| 1255 if ((left->is_register() && left->reg().is(rax)) || |
| 1256 (right->is_register() && right->reg().is(rax))) { |
| 1257 // One or both is in rax. Use a fresh non-rdx register for |
| 1258 // them. |
| 1259 Result fresh = allocator_->Allocate(); |
| 1260 ASSERT(fresh.is_valid()); |
| 1261 if (fresh.reg().is(rdx)) { |
| 1262 remainder = fresh; |
| 1263 fresh = allocator_->Allocate(); |
| 1264 ASSERT(fresh.is_valid()); |
| 1265 } |
| 1266 if (left->is_register() && left->reg().is(rax)) { |
| 1267 quotient = *left; |
| 1268 *left = fresh; |
| 1269 left_is_in_rax = true; |
| 1270 } |
| 1271 if (right->is_register() && right->reg().is(rax)) { |
| 1272 quotient = *right; |
| 1273 *right = fresh; |
| 1274 } |
| 1275 __ movq(fresh.reg(), rax); |
| 1276 } else { |
| 1277 // Neither left nor right is in rax. |
| 1278 quotient = allocator_->Allocate(rax); |
| 1279 } |
| 1280 ASSERT(quotient.is_register() && quotient.reg().is(rax)); |
| 1281 ASSERT(!(left->is_register() && left->reg().is(rax))); |
| 1282 ASSERT(!(right->is_register() && right->reg().is(rax))); |
| 1283 |
| 1284 // Step 2: get rdx for remainder if necessary. |
| 1285 if (!remainder.is_valid()) { |
| 1286 if ((left->is_register() && left->reg().is(rdx)) || |
| 1287 (right->is_register() && right->reg().is(rdx))) { |
| 1288 Result fresh = allocator_->Allocate(); |
| 1289 ASSERT(fresh.is_valid()); |
| 1290 if (left->is_register() && left->reg().is(rdx)) { |
| 1291 remainder = *left; |
| 1292 *left = fresh; |
| 1293 } |
| 1294 if (right->is_register() && right->reg().is(rdx)) { |
| 1295 remainder = *right; |
| 1296 *right = fresh; |
| 1297 } |
| 1298 __ movq(fresh.reg(), rdx); |
| 1299 } else { |
| 1300 // Neither left nor right is in rdx. |
| 1301 remainder = allocator_->Allocate(rdx); |
| 1302 } |
| 1303 } |
| 1304 ASSERT(remainder.is_register() && remainder.reg().is(rdx)); |
| 1305 ASSERT(!(left->is_register() && left->reg().is(rdx))); |
| 1306 ASSERT(!(right->is_register() && right->reg().is(rdx))); |
| 1307 |
| 1308 left->ToRegister(); |
| 1309 right->ToRegister(); |
| 1310 frame_->Spill(rax); |
| 1311 frame_->Spill(rdx); |
| 1312 |
| 1313 // Check that left and right are smi tagged. |
| 1314 DeferredInlineBinaryOperation* deferred = |
| 1315 new DeferredInlineBinaryOperation(op, |
| 1316 (op == Token::DIV) ? rax : rdx, |
| 1317 left->reg(), |
| 1318 right->reg(), |
| 1319 overwrite_mode); |
| 1320 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), |
| 1321 left_type_info, right_type_info, deferred); |
| 1322 |
| 1323 if (op == Token::DIV) { |
| 1324 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label()); |
| 1325 deferred->BindExit(); |
| 1326 left->Unuse(); |
| 1327 right->Unuse(); |
| 1328 answer = quotient; |
| 1329 } else { |
| 1330 ASSERT(op == Token::MOD); |
| 1331 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label()); |
| 1332 deferred->BindExit(); |
| 1333 left->Unuse(); |
| 1334 right->Unuse(); |
| 1335 answer = remainder; |
| 1336 } |
| 1337 ASSERT(answer.is_valid()); |
| 1338 return answer; |
| 1339 } |
| 1340 |
| 1341 // Special handling of shift operations because they use fixed |
| 1342 // registers. |
| 1343 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) { |
| 1344 // Move left out of rcx if necessary. |
| 1345 if (left->is_register() && left->reg().is(rcx)) { |
| 1346 *left = allocator_->Allocate(); |
| 1347 ASSERT(left->is_valid()); |
| 1348 __ movq(left->reg(), rcx); |
| 1349 } |
| 1350 right->ToRegister(rcx); |
| 1351 left->ToRegister(); |
| 1352 ASSERT(left->is_register() && !left->reg().is(rcx)); |
| 1353 ASSERT(right->is_register() && right->reg().is(rcx)); |
| 1354 |
| 1355 // We will modify right, it must be spilled. |
| 1356 frame_->Spill(rcx); |
| 1357 |
| 1358 // Use a fresh answer register to avoid spilling the left operand. |
| 1359 answer = allocator_->Allocate(); |
| 1360 ASSERT(answer.is_valid()); |
| 1361 // Check that both operands are smis using the answer register as a |
| 1362 // temporary. |
| 1363 DeferredInlineBinaryOperation* deferred = |
| 1364 new DeferredInlineBinaryOperation(op, |
| 1365 answer.reg(), |
| 1366 left->reg(), |
| 1367 rcx, |
| 1368 overwrite_mode); |
| 1369 |
| 1370 Label do_op; |
| 1371 if (right_type_info.IsSmi()) { |
| 1372 if (FLAG_debug_code) { |
| 1373 __ AbortIfNotSmi(right->reg()); |
| 1374 } |
| 1375 __ movq(answer.reg(), left->reg()); |
| 1376 // If left is not known to be a smi, check if it is. |
| 1377 // If left is not known to be a number, and it isn't a smi, check if |
| 1378 // it is a HeapNumber. |
| 1379 if (!left_type_info.IsSmi()) { |
| 1380 __ JumpIfSmi(answer.reg(), &do_op); |
| 1381 if (!left_type_info.IsNumber()) { |
| 1382 // Branch if not a heapnumber. |
| 1383 __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset), |
| 1384 Factory::heap_number_map()); |
| 1385 deferred->Branch(not_equal); |
| 1386 } |
| 1387 // Load integer value into answer register using truncation. |
| 1388 __ cvttsd2si(answer.reg(), |
| 1389 FieldOperand(answer.reg(), HeapNumber::kValueOffset)); |
| 1390 // Branch if we might have overflowed. |
| 1391 // (False negative for Smi::kMinValue) |
| 1392 __ cmpq(answer.reg(), Immediate(0x80000000)); |
| 1393 deferred->Branch(equal); |
| 1394 // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging. |
| 1395 __ Integer32ToSmi(answer.reg(), answer.reg()); |
| 1396 } else { |
| 1397 // Fast case - both are actually smis. |
| 1398 if (FLAG_debug_code) { |
| 1399 __ AbortIfNotSmi(left->reg()); |
| 1400 } |
| 1401 } |
| 1402 } else { |
| 1403 JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx, |
| 1404 left_type_info, right_type_info, deferred); |
| 1405 } |
| 1406 __ bind(&do_op); |
| 1407 |
| 1408 // Perform the operation. |
| 1409 switch (op) { |
| 1410 case Token::SAR: |
| 1411 __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx); |
| 1412 break; |
| 1413 case Token::SHR: { |
| 1414 __ SmiShiftLogicalRight(answer.reg(), |
| 1415 left->reg(), |
| 1416 rcx, |
| 1417 deferred->entry_label()); |
| 1418 break; |
| 1419 } |
| 1420 case Token::SHL: { |
| 1421 __ SmiShiftLeft(answer.reg(), |
| 1422 left->reg(), |
| 1423 rcx); |
| 1424 break; |
| 1425 } |
| 1426 default: |
| 1427 UNREACHABLE(); |
| 1428 } |
| 1429 deferred->BindExit(); |
| 1430 left->Unuse(); |
| 1431 right->Unuse(); |
| 1432 ASSERT(answer.is_valid()); |
| 1433 return answer; |
| 1434 } |
| 1435 |
| 1436 // Handle the other binary operations. |
| 1437 left->ToRegister(); |
| 1438 right->ToRegister(); |
| 1439 // A newly allocated register answer is used to hold the answer. The |
| 1440 // registers containing left and right are not modified so they don't |
| 1441 // need to be spilled in the fast case. |
| 1442 answer = allocator_->Allocate(); |
| 1443 ASSERT(answer.is_valid()); |
| 1444 |
| 1445 // Perform the smi tag check. |
| 1446 DeferredInlineBinaryOperation* deferred = |
| 1447 new DeferredInlineBinaryOperation(op, |
| 1448 answer.reg(), |
| 1449 left->reg(), |
| 1450 right->reg(), |
| 1451 overwrite_mode); |
| 1452 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), |
| 1453 left_type_info, right_type_info, deferred); |
| 1454 |
| 1455 switch (op) { |
| 1456 case Token::ADD: |
| 1457 __ SmiAdd(answer.reg(), |
| 1458 left->reg(), |
| 1459 right->reg(), |
| 1460 deferred->entry_label()); |
| 1461 break; |
| 1462 |
| 1463 case Token::SUB: |
| 1464 __ SmiSub(answer.reg(), |
| 1465 left->reg(), |
| 1466 right->reg(), |
| 1467 deferred->entry_label()); |
| 1468 break; |
| 1469 |
| 1470 case Token::MUL: { |
| 1471 __ SmiMul(answer.reg(), |
| 1472 left->reg(), |
| 1473 right->reg(), |
| 1474 deferred->entry_label()); |
| 1475 break; |
| 1476 } |
| 1477 |
| 1478 case Token::BIT_OR: |
| 1479 __ SmiOr(answer.reg(), left->reg(), right->reg()); |
| 1480 break; |
| 1481 |
| 1482 case Token::BIT_AND: |
| 1483 __ SmiAnd(answer.reg(), left->reg(), right->reg()); |
| 1484 break; |
| 1485 |
| 1486 case Token::BIT_XOR: |
| 1487 __ SmiXor(answer.reg(), left->reg(), right->reg()); |
| 1488 break; |
| 1489 |
| 1490 default: |
| 1491 UNREACHABLE(); |
| 1492 break; |
| 1493 } |
| 1494 deferred->BindExit(); |
| 1495 left->Unuse(); |
| 1496 right->Unuse(); |
| 1497 ASSERT(answer.is_valid()); |
| 1498 return answer; |
| 1499 } |
| 1500 |
| 1501 |
| 1502 // Call the appropriate binary operation stub to compute src op value |
| 1503 // and leave the result in dst. |
| 1504 class DeferredInlineSmiOperation: public DeferredCode { |
| 1505 public: |
| 1506 DeferredInlineSmiOperation(Token::Value op, |
| 1507 Register dst, |
| 1508 Register src, |
| 1509 Smi* value, |
| 1510 OverwriteMode overwrite_mode) |
| 1511 : op_(op), |
| 1512 dst_(dst), |
| 1513 src_(src), |
| 1514 value_(value), |
| 1515 overwrite_mode_(overwrite_mode) { |
| 1516 set_comment("[ DeferredInlineSmiOperation"); |
| 1517 } |
| 1518 |
| 1519 virtual void Generate(); |
| 1520 |
| 1521 private: |
| 1522 Token::Value op_; |
| 1523 Register dst_; |
| 1524 Register src_; |
| 1525 Smi* value_; |
| 1526 OverwriteMode overwrite_mode_; |
| 1527 }; |
| 1528 |
| 1529 |
| 1530 void DeferredInlineSmiOperation::Generate() { |
| 1531 // For mod we don't generate all the Smi code inline. |
| 1532 GenericBinaryOpStub stub( |
| 1533 op_, |
| 1534 overwrite_mode_, |
| 1535 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB); |
| 1536 stub.GenerateCall(masm_, src_, value_); |
| 1537 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 1538 } |
| 1539 |
| 1540 |
| 1541 // Call the appropriate binary operation stub to compute value op src |
| 1542 // and leave the result in dst. |
| 1543 class DeferredInlineSmiOperationReversed: public DeferredCode { |
| 1544 public: |
| 1545 DeferredInlineSmiOperationReversed(Token::Value op, |
| 1546 Register dst, |
| 1547 Smi* value, |
| 1548 Register src, |
| 1549 OverwriteMode overwrite_mode) |
| 1550 : op_(op), |
| 1551 dst_(dst), |
| 1552 value_(value), |
| 1553 src_(src), |
| 1554 overwrite_mode_(overwrite_mode) { |
| 1555 set_comment("[ DeferredInlineSmiOperationReversed"); |
| 1556 } |
| 1557 |
| 1558 virtual void Generate(); |
| 1559 |
| 1560 private: |
| 1561 Token::Value op_; |
| 1562 Register dst_; |
| 1563 Smi* value_; |
| 1564 Register src_; |
| 1565 OverwriteMode overwrite_mode_; |
| 1566 }; |
| 1567 |
| 1568 |
| 1569 void DeferredInlineSmiOperationReversed::Generate() { |
| 1570 GenericBinaryOpStub stub( |
| 1571 op_, |
| 1572 overwrite_mode_, |
| 1573 NO_SMI_CODE_IN_STUB); |
| 1574 stub.GenerateCall(masm_, value_, src_); |
| 1575 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 1576 } |
| 1577 class DeferredInlineSmiAdd: public DeferredCode { |
| 1578 public: |
| 1579 DeferredInlineSmiAdd(Register dst, |
| 1580 Smi* value, |
| 1581 OverwriteMode overwrite_mode) |
| 1582 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { |
| 1583 set_comment("[ DeferredInlineSmiAdd"); |
| 1584 } |
| 1585 |
| 1586 virtual void Generate(); |
| 1587 |
| 1588 private: |
| 1589 Register dst_; |
| 1590 Smi* value_; |
| 1591 OverwriteMode overwrite_mode_; |
| 1592 }; |
| 1593 |
| 1594 |
| 1595 void DeferredInlineSmiAdd::Generate() { |
| 1596 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); |
| 1597 igostub.GenerateCall(masm_, dst_, value_); |
| 1598 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 1599 } |
| 1600 |
| 1601 |
| 1602 // The result of value + src is in dst. It either overflowed or was not |
| 1603 // smi tagged. Undo the speculative addition and call the appropriate |
| 1604 // specialized stub for add. The result is left in dst. |
| 1605 class DeferredInlineSmiAddReversed: public DeferredCode { |
| 1606 public: |
| 1607 DeferredInlineSmiAddReversed(Register dst, |
| 1608 Smi* value, |
| 1609 OverwriteMode overwrite_mode) |
| 1610 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { |
| 1611 set_comment("[ DeferredInlineSmiAddReversed"); |
| 1612 } |
| 1613 |
| 1614 virtual void Generate(); |
| 1615 |
| 1616 private: |
| 1617 Register dst_; |
| 1618 Smi* value_; |
| 1619 OverwriteMode overwrite_mode_; |
| 1620 }; |
| 1621 |
| 1622 |
| 1623 void DeferredInlineSmiAddReversed::Generate() { |
| 1624 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); |
| 1625 igostub.GenerateCall(masm_, value_, dst_); |
| 1626 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 1627 } |
| 1628 |
| 1629 |
| 1630 class DeferredInlineSmiSub: public DeferredCode { |
| 1631 public: |
| 1632 DeferredInlineSmiSub(Register dst, |
| 1633 Smi* value, |
| 1634 OverwriteMode overwrite_mode) |
| 1635 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { |
| 1636 set_comment("[ DeferredInlineSmiSub"); |
| 1637 } |
| 1638 |
| 1639 virtual void Generate(); |
| 1640 |
| 1641 private: |
| 1642 Register dst_; |
| 1643 Smi* value_; |
| 1644 OverwriteMode overwrite_mode_; |
| 1645 }; |
| 1646 |
| 1647 |
| 1648 |
| 1649 void DeferredInlineSmiSub::Generate() { |
| 1650 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB); |
| 1651 igostub.GenerateCall(masm_, dst_, value_); |
| 1652 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 1653 } |
| 1654 |
| 1655 |
| 1656 Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, |
| 1657 Result* operand, |
| 1658 Handle<Object> value, |
| 1659 bool reversed, |
| 1660 OverwriteMode overwrite_mode) { |
| 1661 // Generate inline code for a binary operation when one of the |
| 1662 // operands is a constant smi. Consumes the argument "operand". |
| 1663 if (IsUnsafeSmi(value)) { |
| 1664 Result unsafe_operand(value); |
| 1665 if (reversed) { |
| 1666 return LikelySmiBinaryOperation(expr, &unsafe_operand, operand, |
| 1667 overwrite_mode); |
| 1668 } else { |
| 1669 return LikelySmiBinaryOperation(expr, operand, &unsafe_operand, |
| 1670 overwrite_mode); |
| 1671 } |
| 1672 } |
| 1673 |
| 1674 // Get the literal value. |
| 1675 Smi* smi_value = Smi::cast(*value); |
| 1676 int int_value = smi_value->value(); |
| 1677 |
| 1678 Token::Value op = expr->op(); |
| 1679 Result answer; |
| 1680 switch (op) { |
| 1681 case Token::ADD: { |
| 1682 operand->ToRegister(); |
| 1683 frame_->Spill(operand->reg()); |
| 1684 DeferredCode* deferred = NULL; |
| 1685 if (reversed) { |
| 1686 deferred = new DeferredInlineSmiAddReversed(operand->reg(), |
| 1687 smi_value, |
| 1688 overwrite_mode); |
| 1689 } else { |
| 1690 deferred = new DeferredInlineSmiAdd(operand->reg(), |
| 1691 smi_value, |
| 1692 overwrite_mode); |
| 1693 } |
| 1694 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), |
| 1695 deferred); |
| 1696 __ SmiAddConstant(operand->reg(), |
| 1697 operand->reg(), |
| 1698 smi_value, |
| 1699 deferred->entry_label()); |
| 1700 deferred->BindExit(); |
| 1701 answer = *operand; |
| 1702 break; |
| 1703 } |
| 1704 |
| 1705 case Token::SUB: { |
| 1706 if (reversed) { |
| 1707 Result constant_operand(value); |
| 1708 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, |
| 1709 overwrite_mode); |
| 1710 } else { |
| 1711 operand->ToRegister(); |
| 1712 frame_->Spill(operand->reg()); |
| 1713 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(), |
| 1714 smi_value, |
| 1715 overwrite_mode); |
| 1716 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), |
| 1717 deferred); |
| 1718 // A smi currently fits in a 32-bit Immediate. |
| 1719 __ SmiSubConstant(operand->reg(), |
| 1720 operand->reg(), |
| 1721 smi_value, |
| 1722 deferred->entry_label()); |
| 1723 deferred->BindExit(); |
| 1724 answer = *operand; |
| 1725 } |
| 1726 break; |
| 1727 } |
| 1728 |
| 1729 case Token::SAR: |
| 1730 if (reversed) { |
| 1731 Result constant_operand(value); |
| 1732 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, |
| 1733 overwrite_mode); |
| 1734 } else { |
| 1735 // Only the least significant 5 bits of the shift value are used. |
| 1736 // In the slow case, this masking is done inside the runtime call. |
| 1737 int shift_value = int_value & 0x1f; |
| 1738 operand->ToRegister(); |
| 1739 frame_->Spill(operand->reg()); |
| 1740 DeferredInlineSmiOperation* deferred = |
| 1741 new DeferredInlineSmiOperation(op, |
| 1742 operand->reg(), |
| 1743 operand->reg(), |
| 1744 smi_value, |
| 1745 overwrite_mode); |
| 1746 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), |
| 1747 deferred); |
| 1748 __ SmiShiftArithmeticRightConstant(operand->reg(), |
| 1749 operand->reg(), |
| 1750 shift_value); |
| 1751 deferred->BindExit(); |
| 1752 answer = *operand; |
| 1753 } |
| 1754 break; |
| 1755 |
| 1756 case Token::SHR: |
| 1757 if (reversed) { |
| 1758 Result constant_operand(value); |
| 1759 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, |
| 1760 overwrite_mode); |
| 1761 } else { |
| 1762 // Only the least significant 5 bits of the shift value are used. |
| 1763 // In the slow case, this masking is done inside the runtime call. |
| 1764 int shift_value = int_value & 0x1f; |
| 1765 operand->ToRegister(); |
| 1766 answer = allocator()->Allocate(); |
| 1767 ASSERT(answer.is_valid()); |
| 1768 DeferredInlineSmiOperation* deferred = |
| 1769 new DeferredInlineSmiOperation(op, |
| 1770 answer.reg(), |
| 1771 operand->reg(), |
| 1772 smi_value, |
| 1773 overwrite_mode); |
| 1774 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), |
| 1775 deferred); |
| 1776 __ SmiShiftLogicalRightConstant(answer.reg(), |
| 1777 operand->reg(), |
| 1778 shift_value, |
| 1779 deferred->entry_label()); |
| 1780 deferred->BindExit(); |
| 1781 operand->Unuse(); |
| 1782 } |
| 1783 break; |
| 1784 |
| 1785 case Token::SHL: |
| 1786 if (reversed) { |
| 1787 operand->ToRegister(); |
| 1788 |
| 1789 // We need rcx to be available to hold operand, and to be spilled. |
| 1790 // SmiShiftLeft implicitly modifies rcx. |
| 1791 if (operand->reg().is(rcx)) { |
| 1792 frame_->Spill(operand->reg()); |
| 1793 answer = allocator()->Allocate(); |
| 1794 } else { |
| 1795 Result rcx_reg = allocator()->Allocate(rcx); |
| 1796 // answer must not be rcx. |
| 1797 answer = allocator()->Allocate(); |
| 1798 // rcx_reg goes out of scope. |
| 1799 } |
| 1800 |
| 1801 DeferredInlineSmiOperationReversed* deferred = |
| 1802 new DeferredInlineSmiOperationReversed(op, |
| 1803 answer.reg(), |
| 1804 smi_value, |
| 1805 operand->reg(), |
| 1806 overwrite_mode); |
| 1807 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), |
| 1808 deferred); |
| 1809 |
| 1810 __ Move(answer.reg(), smi_value); |
| 1811 __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg()); |
| 1812 operand->Unuse(); |
| 1813 |
| 1814 deferred->BindExit(); |
| 1815 } else { |
| 1816 // Only the least significant 5 bits of the shift value are used. |
| 1817 // In the slow case, this masking is done inside the runtime call. |
| 1818 int shift_value = int_value & 0x1f; |
| 1819 operand->ToRegister(); |
| 1820 if (shift_value == 0) { |
| 1821 // Spill operand so it can be overwritten in the slow case. |
| 1822 frame_->Spill(operand->reg()); |
| 1823 DeferredInlineSmiOperation* deferred = |
| 1824 new DeferredInlineSmiOperation(op, |
| 1825 operand->reg(), |
| 1826 operand->reg(), |
| 1827 smi_value, |
| 1828 overwrite_mode); |
| 1829 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), |
| 1830 deferred); |
| 1831 deferred->BindExit(); |
| 1832 answer = *operand; |
| 1833 } else { |
| 1834 // Use a fresh temporary for nonzero shift values. |
| 1835 answer = allocator()->Allocate(); |
| 1836 ASSERT(answer.is_valid()); |
| 1837 DeferredInlineSmiOperation* deferred = |
| 1838 new DeferredInlineSmiOperation(op, |
| 1839 answer.reg(), |
| 1840 operand->reg(), |
| 1841 smi_value, |
| 1842 overwrite_mode); |
| 1843 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), |
| 1844 deferred); |
| 1845 __ SmiShiftLeftConstant(answer.reg(), |
| 1846 operand->reg(), |
| 1847 shift_value); |
| 1848 deferred->BindExit(); |
| 1849 operand->Unuse(); |
| 1850 } |
| 1851 } |
| 1852 break; |
| 1853 |
| 1854 case Token::BIT_OR: |
| 1855 case Token::BIT_XOR: |
| 1856 case Token::BIT_AND: { |
| 1857 operand->ToRegister(); |
| 1858 frame_->Spill(operand->reg()); |
| 1859 if (reversed) { |
| 1860 // Bit operations with a constant smi are commutative. |
| 1861 // We can swap left and right operands with no problem. |
| 1862 // Swap left and right overwrite modes. 0->0, 1->2, 2->1. |
| 1863 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3); |
| 1864 } |
| 1865 DeferredCode* deferred = new DeferredInlineSmiOperation(op, |
| 1866 operand->reg(), |
| 1867 operand->reg(), |
| 1868 smi_value, |
| 1869 overwrite_mode); |
| 1870 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), |
| 1871 deferred); |
| 1872 if (op == Token::BIT_AND) { |
| 1873 __ SmiAndConstant(operand->reg(), operand->reg(), smi_value); |
| 1874 } else if (op == Token::BIT_XOR) { |
| 1875 if (int_value != 0) { |
| 1876 __ SmiXorConstant(operand->reg(), operand->reg(), smi_value); |
| 1877 } |
| 1878 } else { |
| 1879 ASSERT(op == Token::BIT_OR); |
| 1880 if (int_value != 0) { |
| 1881 __ SmiOrConstant(operand->reg(), operand->reg(), smi_value); |
| 1882 } |
| 1883 } |
| 1884 deferred->BindExit(); |
| 1885 answer = *operand; |
| 1886 break; |
| 1887 } |
| 1888 |
| 1889 // Generate inline code for mod of powers of 2 and negative powers of 2. |
| 1890 case Token::MOD: |
| 1891 if (!reversed && |
| 1892 int_value != 0 && |
| 1893 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) { |
| 1894 operand->ToRegister(); |
| 1895 frame_->Spill(operand->reg()); |
| 1896 DeferredCode* deferred = |
| 1897 new DeferredInlineSmiOperation(op, |
| 1898 operand->reg(), |
| 1899 operand->reg(), |
| 1900 smi_value, |
| 1901 overwrite_mode); |
| 1902 // Check for negative or non-Smi left hand side. |
| 1903 __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label()); |
| 1904 if (int_value < 0) int_value = -int_value; |
| 1905 if (int_value == 1) { |
| 1906 __ Move(operand->reg(), Smi::FromInt(0)); |
| 1907 } else { |
| 1908 __ SmiAndConstant(operand->reg(), |
| 1909 operand->reg(), |
| 1910 Smi::FromInt(int_value - 1)); |
| 1911 } |
| 1912 deferred->BindExit(); |
| 1913 answer = *operand; |
| 1914 break; // This break only applies if we generated code for MOD. |
| 1915 } |
| 1916 // Fall through if we did not find a power of 2 on the right hand side! |
| 1917 // The next case must be the default. |
| 1918 |
| 1919 default: { |
| 1920 Result constant_operand(value); |
| 1921 if (reversed) { |
| 1922 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, |
| 1923 overwrite_mode); |
| 1924 } else { |
| 1925 answer = LikelySmiBinaryOperation(expr, operand, &constant_operand, |
| 1926 overwrite_mode); |
| 1927 } |
| 1928 break; |
| 1929 } |
| 1930 } |
| 1931 ASSERT(answer.is_valid()); |
| 1932 return answer; |
| 1933 } |
| 1934 |
| 1935 static bool CouldBeNaN(const Result& result) { |
| 1936 if (result.type_info().IsSmi()) return false; |
| 1937 if (result.type_info().IsInteger32()) return false; |
| 1938 if (!result.is_constant()) return true; |
| 1939 if (!result.handle()->IsHeapNumber()) return false; |
| 1940 return isnan(HeapNumber::cast(*result.handle())->value()); |
| 1941 } |
| 1942 |
| 1943 |
| 1944 // Convert from signed to unsigned comparison to match the way EFLAGS are set |
| 1945 // by FPU and XMM compare instructions. |
| 1946 static Condition DoubleCondition(Condition cc) { |
| 1947 switch (cc) { |
| 1948 case less: return below; |
| 1949 case equal: return equal; |
| 1950 case less_equal: return below_equal; |
| 1951 case greater: return above; |
| 1952 case greater_equal: return above_equal; |
| 1953 default: UNREACHABLE(); |
| 1954 } |
| 1955 UNREACHABLE(); |
| 1956 return equal; |
| 1957 } |
| 1958 |
| 1959 |
| 1960 void CodeGenerator::Comparison(AstNode* node, |
| 1961 Condition cc, |
| 1962 bool strict, |
| 1963 ControlDestination* dest) { |
| 1964 // Strict only makes sense for equality comparisons. |
| 1965 ASSERT(!strict || cc == equal); |
| 1966 |
| 1967 Result left_side; |
| 1968 Result right_side; |
| 1969 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. |
| 1970 if (cc == greater || cc == less_equal) { |
| 1971 cc = ReverseCondition(cc); |
| 1972 left_side = frame_->Pop(); |
| 1973 right_side = frame_->Pop(); |
637 } else { | 1974 } else { |
638 __ movq(rax, key_); | 1975 right_side = frame_->Pop(); |
639 __ movq(rdx, receiver_); | 1976 left_side = frame_->Pop(); |
640 } | 1977 } |
641 // Calculate the delta from the IC call instruction to the map check | 1978 ASSERT(cc == less || cc == equal || cc == greater_equal); |
642 // movq instruction in the inlined version. This delta is stored in | 1979 |
643 // a test(rax, delta) instruction after the call so that we can find | 1980 // If either side is a constant smi, optimize the comparison. |
644 // it in the IC initialization code and patch the movq instruction. | 1981 bool left_side_constant_smi = false; |
645 // This means that we cannot allow test instructions after calls to | 1982 bool left_side_constant_null = false; |
646 // KeyedLoadIC stubs in other places. | 1983 bool left_side_constant_1_char_string = false; |
647 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); | 1984 if (left_side.is_constant()) { |
648 __ Call(ic, RelocInfo::CODE_TARGET); | 1985 left_side_constant_smi = left_side.handle()->IsSmi(); |
649 // The delta from the start of the map-compare instruction to the | 1986 left_side_constant_null = left_side.handle()->IsNull(); |
650 // test instruction. We use masm_-> directly here instead of the __ | 1987 left_side_constant_1_char_string = |
651 // macro because the macro sometimes uses macro expansion to turn | 1988 (left_side.handle()->IsString() && |
652 // into something that can't return a value. This is encountered | 1989 String::cast(*left_side.handle())->length() == 1 && |
653 // when doing generated code coverage tests. | 1990 String::cast(*left_side.handle())->IsAsciiRepresentation()); |
654 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | 1991 } |
655 // Here we use masm_-> instead of the __ macro because this is the | 1992 bool right_side_constant_smi = false; |
656 // instruction that gets patched and coverage code gets in the way. | 1993 bool right_side_constant_null = false; |
657 // TODO(X64): Consider whether it's worth switching the test to a | 1994 bool right_side_constant_1_char_string = false; |
658 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't | 1995 if (right_side.is_constant()) { |
659 // be generated normally. | 1996 right_side_constant_smi = right_side.handle()->IsSmi(); |
660 masm_->testl(rax, Immediate(-delta_to_patch_site)); | 1997 right_side_constant_null = right_side.handle()->IsNull(); |
661 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); | 1998 right_side_constant_1_char_string = |
662 | 1999 (right_side.handle()->IsString() && |
663 if (!dst_.is(rax)) __ movq(dst_, rax); | 2000 String::cast(*right_side.handle())->length() == 1 && |
664 } | 2001 String::cast(*right_side.handle())->IsAsciiRepresentation()); |
665 | 2002 } |
666 | 2003 |
667 class DeferredReferenceSetKeyedValue: public DeferredCode { | 2004 if (left_side_constant_smi || right_side_constant_smi) { |
668 public: | 2005 if (left_side_constant_smi && right_side_constant_smi) { |
669 DeferredReferenceSetKeyedValue(Register value, | 2006 // Trivial case, comparing two constants. |
670 Register key, | 2007 int left_value = Smi::cast(*left_side.handle())->value(); |
671 Register receiver) | 2008 int right_value = Smi::cast(*right_side.handle())->value(); |
672 : value_(value), key_(key), receiver_(receiver) { | 2009 switch (cc) { |
673 set_comment("[ DeferredReferenceSetKeyedValue"); | 2010 case less: |
674 } | 2011 dest->Goto(left_value < right_value); |
675 | 2012 break; |
676 virtual void Generate(); | 2013 case equal: |
677 | 2014 dest->Goto(left_value == right_value); |
678 Label* patch_site() { return &patch_site_; } | 2015 break; |
679 | 2016 case greater_equal: |
680 private: | 2017 dest->Goto(left_value >= right_value); |
681 Register value_; | 2018 break; |
682 Register key_; | 2019 default: |
683 Register receiver_; | 2020 UNREACHABLE(); |
684 Label patch_site_; | 2021 } |
685 }; | |
686 | |
687 | |
688 void DeferredReferenceSetKeyedValue::Generate() { | |
689 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1); | |
690 // Move value, receiver, and key to registers rax, rdx, and rcx, as | |
691 // the IC stub expects. | |
692 // Move value to rax, using xchg if the receiver or key is in rax. | |
693 if (!value_.is(rax)) { | |
694 if (!receiver_.is(rax) && !key_.is(rax)) { | |
695 __ movq(rax, value_); | |
696 } else { | 2022 } else { |
697 __ xchg(rax, value_); | 2023 // Only one side is a constant Smi. |
698 // Update receiver_ and key_ if they are affected by the swap. | 2024 // If left side is a constant Smi, reverse the operands. |
699 if (receiver_.is(rax)) { | 2025 // Since one side is a constant Smi, conversion order does not matter. |
700 receiver_ = value_; | 2026 if (left_side_constant_smi) { |
701 } else if (receiver_.is(value_)) { | 2027 Result temp = left_side; |
702 receiver_ = rax; | 2028 left_side = right_side; |
703 } | 2029 right_side = temp; |
704 if (key_.is(rax)) { | 2030 cc = ReverseCondition(cc); |
705 key_ = value_; | 2031 // This may re-introduce greater or less_equal as the value of cc. |
706 } else if (key_.is(value_)) { | 2032 // CompareStub and the inline code both support all values of cc. |
707 key_ = rax; | 2033 } |
708 } | 2034 // Implement comparison against a constant Smi, inlining the case |
709 } | 2035 // where both sides are Smis. |
710 } | 2036 left_side.ToRegister(); |
711 // Value is now in rax. Its original location is remembered in value_, | 2037 Register left_reg = left_side.reg(); |
712 // and the value is restored to value_ before returning. | 2038 Handle<Object> right_val = right_side.handle(); |
713 // The variables receiver_ and key_ are not preserved. | 2039 |
714 // Move receiver and key to rdx and rcx, swapping if necessary. | 2040 // Here we split control flow to the stub call and inlined cases |
715 if (receiver_.is(rdx)) { | 2041 // before finally splitting it to the control destination. We use |
716 if (!key_.is(rcx)) { | 2042 // a jump target and branching to duplicate the virtual frame at |
717 __ movq(rcx, key_); | 2043 // the first split. We manually handle the off-frame references |
718 } // Else everything is already in the right place. | 2044 // by reconstituting them on the non-fall-through path. |
719 } else if (receiver_.is(rcx)) { | 2045 JumpTarget is_smi; |
720 if (key_.is(rdx)) { | 2046 |
721 __ xchg(rcx, rdx); | 2047 if (left_side.is_smi()) { |
722 } else if (key_.is(rcx)) { | 2048 if (FLAG_debug_code) { |
723 __ movq(rdx, receiver_); | 2049 __ AbortIfNotSmi(left_side.reg()); |
| 2050 } |
| 2051 } else { |
| 2052 Condition left_is_smi = masm_->CheckSmi(left_side.reg()); |
| 2053 is_smi.Branch(left_is_smi); |
| 2054 |
| 2055 bool is_loop_condition = (node->AsExpression() != NULL) && |
| 2056 node->AsExpression()->is_loop_condition(); |
| 2057 if (!is_loop_condition && right_val->IsSmi()) { |
| 2058 // Right side is a constant smi and left side has been checked |
| 2059 // not to be a smi. |
| 2060 JumpTarget not_number; |
| 2061 __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset), |
| 2062 Factory::heap_number_map()); |
| 2063 not_number.Branch(not_equal, &left_side); |
| 2064 __ movsd(xmm1, |
| 2065 FieldOperand(left_reg, HeapNumber::kValueOffset)); |
| 2066 int value = Smi::cast(*right_val)->value(); |
| 2067 if (value == 0) { |
| 2068 __ xorpd(xmm0, xmm0); |
| 2069 } else { |
| 2070 Result temp = allocator()->Allocate(); |
| 2071 __ movl(temp.reg(), Immediate(value)); |
| 2072 __ cvtlsi2sd(xmm0, temp.reg()); |
| 2073 temp.Unuse(); |
| 2074 } |
| 2075 __ ucomisd(xmm1, xmm0); |
| 2076 // Jump to builtin for NaN. |
| 2077 not_number.Branch(parity_even, &left_side); |
| 2078 left_side.Unuse(); |
| 2079 dest->true_target()->Branch(DoubleCondition(cc)); |
| 2080 dest->false_target()->Jump(); |
| 2081 not_number.Bind(&left_side); |
| 2082 } |
| 2083 |
| 2084 // Setup and call the compare stub. |
| 2085 CompareStub stub(cc, strict, kCantBothBeNaN); |
| 2086 Result result = frame_->CallStub(&stub, &left_side, &right_side); |
| 2087 result.ToRegister(); |
| 2088 __ testq(result.reg(), result.reg()); |
| 2089 result.Unuse(); |
| 2090 dest->true_target()->Branch(cc); |
| 2091 dest->false_target()->Jump(); |
| 2092 |
| 2093 is_smi.Bind(); |
| 2094 } |
| 2095 |
| 2096 left_side = Result(left_reg); |
| 2097 right_side = Result(right_val); |
| 2098 // Test smi equality and comparison by signed int comparison. |
| 2099 // Both sides are smis, so we can use an Immediate. |
| 2100 __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle())); |
| 2101 left_side.Unuse(); |
| 2102 right_side.Unuse(); |
| 2103 dest->Split(cc); |
| 2104 } |
| 2105 } else if (cc == equal && |
| 2106 (left_side_constant_null || right_side_constant_null)) { |
| 2107 // To make null checks efficient, we check if either the left side or |
| 2108 // the right side is the constant 'null'. |
| 2109 // If so, we optimize the code by inlining a null check instead of |
| 2110 // calling the (very) general runtime routine for checking equality. |
| 2111 Result operand = left_side_constant_null ? right_side : left_side; |
| 2112 right_side.Unuse(); |
| 2113 left_side.Unuse(); |
| 2114 operand.ToRegister(); |
| 2115 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex); |
| 2116 if (strict) { |
| 2117 operand.Unuse(); |
| 2118 dest->Split(equal); |
724 } else { | 2119 } else { |
725 __ movq(rdx, receiver_); | 2120 // The 'null' value is only equal to 'undefined' if using non-strict |
726 __ movq(rcx, key_); | 2121 // comparisons. |
727 } | 2122 dest->true_target()->Branch(equal); |
728 } else if (key_.is(rcx)) { | 2123 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex); |
729 __ movq(rdx, receiver_); | 2124 dest->true_target()->Branch(equal); |
| 2125 Condition is_smi = masm_->CheckSmi(operand.reg()); |
| 2126 dest->false_target()->Branch(is_smi); |
| 2127 |
| 2128 // It can be an undetectable object. |
| 2129 // Use a scratch register in preference to spilling operand.reg(). |
| 2130 Result temp = allocator()->Allocate(); |
| 2131 ASSERT(temp.is_valid()); |
| 2132 __ movq(temp.reg(), |
| 2133 FieldOperand(operand.reg(), HeapObject::kMapOffset)); |
| 2134 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset), |
| 2135 Immediate(1 << Map::kIsUndetectable)); |
| 2136 temp.Unuse(); |
| 2137 operand.Unuse(); |
| 2138 dest->Split(not_zero); |
| 2139 } |
| 2140 } else if (left_side_constant_1_char_string || |
| 2141 right_side_constant_1_char_string) { |
| 2142 if (left_side_constant_1_char_string && right_side_constant_1_char_string) { |
| 2143 // Trivial case, comparing two constants. |
| 2144 int left_value = String::cast(*left_side.handle())->Get(0); |
| 2145 int right_value = String::cast(*right_side.handle())->Get(0); |
| 2146 switch (cc) { |
| 2147 case less: |
| 2148 dest->Goto(left_value < right_value); |
| 2149 break; |
| 2150 case equal: |
| 2151 dest->Goto(left_value == right_value); |
| 2152 break; |
| 2153 case greater_equal: |
| 2154 dest->Goto(left_value >= right_value); |
| 2155 break; |
| 2156 default: |
| 2157 UNREACHABLE(); |
| 2158 } |
| 2159 } else { |
| 2160 // Only one side is a constant 1 character string. |
| 2161 // If left side is a constant 1-character string, reverse the operands. |
| 2162 // Since one side is a constant string, conversion order does not matter. |
| 2163 if (left_side_constant_1_char_string) { |
| 2164 Result temp = left_side; |
| 2165 left_side = right_side; |
| 2166 right_side = temp; |
| 2167 cc = ReverseCondition(cc); |
| 2168 // This may reintroduce greater or less_equal as the value of cc. |
| 2169 // CompareStub and the inline code both support all values of cc. |
| 2170 } |
| 2171 // Implement comparison against a constant string, inlining the case |
| 2172 // where both sides are strings. |
| 2173 left_side.ToRegister(); |
| 2174 |
| 2175 // Here we split control flow to the stub call and inlined cases |
| 2176 // before finally splitting it to the control destination. We use |
| 2177 // a jump target and branching to duplicate the virtual frame at |
| 2178 // the first split. We manually handle the off-frame references |
| 2179 // by reconstituting them on the non-fall-through path. |
| 2180 JumpTarget is_not_string, is_string; |
| 2181 Register left_reg = left_side.reg(); |
| 2182 Handle<Object> right_val = right_side.handle(); |
| 2183 ASSERT(StringShape(String::cast(*right_val)).IsSymbol()); |
| 2184 Condition is_smi = masm()->CheckSmi(left_reg); |
| 2185 is_not_string.Branch(is_smi, &left_side); |
| 2186 Result temp = allocator_->Allocate(); |
| 2187 ASSERT(temp.is_valid()); |
| 2188 __ movq(temp.reg(), |
| 2189 FieldOperand(left_reg, HeapObject::kMapOffset)); |
| 2190 __ movzxbl(temp.reg(), |
| 2191 FieldOperand(temp.reg(), Map::kInstanceTypeOffset)); |
| 2192 // If we are testing for equality then make use of the symbol shortcut. |
| 2193 // Check if the left hand side has the same type as the right hand |
| 2194 // side (which is always a symbol). |
| 2195 if (cc == equal) { |
| 2196 Label not_a_symbol; |
| 2197 ASSERT(kSymbolTag != 0); |
| 2198 // Ensure that no non-strings have the symbol bit set. |
| 2199 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); |
| 2200 __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit. |
| 2201 __ j(zero, ¬_a_symbol); |
| 2202 // They are symbols, so do identity compare. |
| 2203 __ Cmp(left_reg, right_side.handle()); |
| 2204 dest->true_target()->Branch(equal); |
| 2205 dest->false_target()->Branch(not_equal); |
| 2206 __ bind(¬_a_symbol); |
| 2207 } |
| 2208 // Call the compare stub if the left side is not a flat ascii string. |
| 2209 __ andb(temp.reg(), |
| 2210 Immediate(kIsNotStringMask | |
| 2211 kStringRepresentationMask | |
| 2212 kStringEncodingMask)); |
| 2213 __ cmpb(temp.reg(), |
| 2214 Immediate(kStringTag | kSeqStringTag | kAsciiStringTag)); |
| 2215 temp.Unuse(); |
| 2216 is_string.Branch(equal, &left_side); |
| 2217 |
| 2218 // Setup and call the compare stub. |
| 2219 is_not_string.Bind(&left_side); |
| 2220 CompareStub stub(cc, strict, kCantBothBeNaN); |
| 2221 Result result = frame_->CallStub(&stub, &left_side, &right_side); |
| 2222 result.ToRegister(); |
| 2223 __ testq(result.reg(), result.reg()); |
| 2224 result.Unuse(); |
| 2225 dest->true_target()->Branch(cc); |
| 2226 dest->false_target()->Jump(); |
| 2227 |
| 2228 is_string.Bind(&left_side); |
| 2229 // left_side is a sequential ASCII string. |
| 2230 ASSERT(left_side.reg().is(left_reg)); |
| 2231 right_side = Result(right_val); |
| 2232 Result temp2 = allocator_->Allocate(); |
| 2233 ASSERT(temp2.is_valid()); |
| 2234 // Test string equality and comparison. |
| 2235 if (cc == equal) { |
| 2236 Label comparison_done; |
| 2237 __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), |
| 2238 Smi::FromInt(1)); |
| 2239 __ j(not_equal, &comparison_done); |
| 2240 uint8_t char_value = |
| 2241 static_cast<uint8_t>(String::cast(*right_val)->Get(0)); |
| 2242 __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize), |
| 2243 Immediate(char_value)); |
| 2244 __ bind(&comparison_done); |
| 2245 } else { |
| 2246 __ movq(temp2.reg(), |
| 2247 FieldOperand(left_side.reg(), String::kLengthOffset)); |
| 2248 __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1)); |
| 2249 Label comparison; |
| 2250 // If the length is 0 then the subtraction gave -1 which compares less |
| 2251 // than any character. |
| 2252 __ j(negative, &comparison); |
| 2253 // Otherwise load the first character. |
| 2254 __ movzxbl(temp2.reg(), |
| 2255 FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize)); |
| 2256 __ bind(&comparison); |
| 2257 // Compare the first character of the string with the |
| 2258 // constant 1-character string. |
| 2259 uint8_t char_value = |
| 2260 static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0)); |
| 2261 __ cmpb(temp2.reg(), Immediate(char_value)); |
| 2262 Label characters_were_different; |
| 2263 __ j(not_equal, &characters_were_different); |
| 2264 // If the first character is the same then the long string sorts after |
| 2265 // the short one. |
| 2266 __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), |
| 2267 Smi::FromInt(1)); |
| 2268 __ bind(&characters_were_different); |
| 2269 } |
| 2270 temp2.Unuse(); |
| 2271 left_side.Unuse(); |
| 2272 right_side.Unuse(); |
| 2273 dest->Split(cc); |
| 2274 } |
730 } else { | 2275 } else { |
731 __ movq(rcx, key_); | 2276 // Neither side is a constant Smi, constant 1-char string, or constant null. |
732 __ movq(rdx, receiver_); | 2277 // If either side is a non-smi constant, skip the smi check. |
733 } | 2278 bool known_non_smi = |
734 | 2279 (left_side.is_constant() && !left_side.handle()->IsSmi()) || |
735 // Call the IC stub. | 2280 (right_side.is_constant() && !right_side.handle()->IsSmi()) || |
736 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); | 2281 left_side.type_info().IsDouble() || |
737 __ Call(ic, RelocInfo::CODE_TARGET); | 2282 right_side.type_info().IsDouble(); |
738 // The delta from the start of the map-compare instructions (initial movq) | 2283 |
739 // to the test instruction. We use masm_-> directly here instead of the | 2284 NaNInformation nan_info = |
740 // __ macro because the macro sometimes uses macro expansion to turn | 2285 (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ? |
741 // into something that can't return a value. This is encountered | 2286 kBothCouldBeNaN : |
742 // when doing generated code coverage tests. | 2287 kCantBothBeNaN; |
743 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | 2288 |
744 // Here we use masm_-> instead of the __ macro because this is the | 2289 // Inline number comparison handling any combination of smi's and heap |
745 // instruction that gets patched and coverage code gets in the way. | 2290 // numbers if: |
746 masm_->testl(rax, Immediate(-delta_to_patch_site)); | 2291 // code is in a loop |
747 // Restore value (returned from store IC). | 2292 // the compare operation is different from equal |
748 if (!value_.is(rax)) __ movq(value_, rax); | 2293 // compare is not a for-loop comparison |
| 2294 // The reason for excluding equal is that it will most likely be done |
| 2295 // with smi's (not heap numbers) and the code to comparing smi's is inlined |
| 2296 // separately. The same reason applies for for-loop comparison which will |
| 2297 // also most likely be smi comparisons. |
| 2298 bool is_loop_condition = (node->AsExpression() != NULL) |
| 2299 && node->AsExpression()->is_loop_condition(); |
| 2300 bool inline_number_compare = |
| 2301 loop_nesting() > 0 && cc != equal && !is_loop_condition; |
| 2302 |
| 2303 left_side.ToRegister(); |
| 2304 right_side.ToRegister(); |
| 2305 |
| 2306 if (known_non_smi) { |
| 2307 // Inlined equality check: |
| 2308 // If at least one of the objects is not NaN, then if the objects |
| 2309 // are identical, they are equal. |
| 2310 if (nan_info == kCantBothBeNaN && cc == equal) { |
| 2311 __ cmpq(left_side.reg(), right_side.reg()); |
| 2312 dest->true_target()->Branch(equal); |
| 2313 } |
| 2314 |
| 2315 // Inlined number comparison: |
| 2316 if (inline_number_compare) { |
| 2317 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); |
| 2318 } |
| 2319 |
| 2320 CompareStub stub(cc, strict, nan_info, !inline_number_compare); |
| 2321 Result answer = frame_->CallStub(&stub, &left_side, &right_side); |
| 2322 __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag. |
| 2323 answer.Unuse(); |
| 2324 dest->Split(cc); |
| 2325 } else { |
| 2326 // Here we split control flow to the stub call and inlined cases |
| 2327 // before finally splitting it to the control destination. We use |
| 2328 // a jump target and branching to duplicate the virtual frame at |
| 2329 // the first split. We manually handle the off-frame references |
| 2330 // by reconstituting them on the non-fall-through path. |
| 2331 JumpTarget is_smi; |
| 2332 Register left_reg = left_side.reg(); |
| 2333 Register right_reg = right_side.reg(); |
| 2334 |
| 2335 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg); |
| 2336 is_smi.Branch(both_smi); |
| 2337 |
| 2338 // Inline the equality check if both operands can't be a NaN. If both |
| 2339 // objects are the same they are equal. |
| 2340 if (nan_info == kCantBothBeNaN && cc == equal) { |
| 2341 __ cmpq(left_side.reg(), right_side.reg()); |
| 2342 dest->true_target()->Branch(equal); |
| 2343 } |
| 2344 |
| 2345 // Inlined number comparison: |
| 2346 if (inline_number_compare) { |
| 2347 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); |
| 2348 } |
| 2349 |
| 2350 CompareStub stub(cc, strict, nan_info, !inline_number_compare); |
| 2351 Result answer = frame_->CallStub(&stub, &left_side, &right_side); |
| 2352 __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags. |
| 2353 answer.Unuse(); |
| 2354 dest->true_target()->Branch(cc); |
| 2355 dest->false_target()->Jump(); |
| 2356 |
| 2357 is_smi.Bind(); |
| 2358 left_side = Result(left_reg); |
| 2359 right_side = Result(right_reg); |
| 2360 __ SmiCompare(left_side.reg(), right_side.reg()); |
| 2361 right_side.Unuse(); |
| 2362 left_side.Unuse(); |
| 2363 dest->Split(cc); |
| 2364 } |
| 2365 } |
| 2366 } |
| 2367 |
| 2368 |
| 2369 // Load a comparison operand into into a XMM register. Jump to not_numbers jump |
| 2370 // target passing the left and right result if the operand is not a number. |
| 2371 static void LoadComparisonOperand(MacroAssembler* masm_, |
| 2372 Result* operand, |
| 2373 XMMRegister xmm_reg, |
| 2374 Result* left_side, |
| 2375 Result* right_side, |
| 2376 JumpTarget* not_numbers) { |
| 2377 Label done; |
| 2378 if (operand->type_info().IsDouble()) { |
| 2379 // Operand is known to be a heap number, just load it. |
| 2380 __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); |
| 2381 } else if (operand->type_info().IsSmi()) { |
| 2382 // Operand is known to be a smi. Convert it to double and keep the original |
| 2383 // smi. |
| 2384 __ SmiToInteger32(kScratchRegister, operand->reg()); |
| 2385 __ cvtlsi2sd(xmm_reg, kScratchRegister); |
| 2386 } else { |
| 2387 // Operand type not known, check for smi or heap number. |
| 2388 Label smi; |
| 2389 __ JumpIfSmi(operand->reg(), &smi); |
| 2390 if (!operand->type_info().IsNumber()) { |
| 2391 __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); |
| 2392 __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset), |
| 2393 kScratchRegister); |
| 2394 not_numbers->Branch(not_equal, left_side, right_side, taken); |
| 2395 } |
| 2396 __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); |
| 2397 __ jmp(&done); |
| 2398 |
| 2399 __ bind(&smi); |
| 2400 // Comvert smi to float and keep the original smi. |
| 2401 __ SmiToInteger32(kScratchRegister, operand->reg()); |
| 2402 __ cvtlsi2sd(xmm_reg, kScratchRegister); |
| 2403 __ jmp(&done); |
| 2404 } |
| 2405 __ bind(&done); |
| 2406 } |
| 2407 |
| 2408 |
| 2409 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, |
| 2410 Result* right_side, |
| 2411 Condition cc, |
| 2412 ControlDestination* dest) { |
| 2413 ASSERT(left_side->is_register()); |
| 2414 ASSERT(right_side->is_register()); |
| 2415 |
| 2416 JumpTarget not_numbers; |
| 2417 // Load left and right operand into registers xmm0 and xmm1 and compare. |
| 2418 LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side, |
| 2419 ¬_numbers); |
| 2420 LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, |
| 2421 ¬_numbers); |
| 2422 __ ucomisd(xmm0, xmm1); |
| 2423 // Bail out if a NaN is involved. |
| 2424 not_numbers.Branch(parity_even, left_side, right_side); |
| 2425 |
| 2426 // Split to destination targets based on comparison. |
| 2427 left_side->Unuse(); |
| 2428 right_side->Unuse(); |
| 2429 dest->true_target()->Branch(DoubleCondition(cc)); |
| 2430 dest->false_target()->Jump(); |
| 2431 |
| 2432 not_numbers.Bind(left_side, right_side); |
| 2433 } |
| 2434 |
| 2435 |
| 2436 // Call the function just below TOS on the stack with the given |
| 2437 // arguments. The receiver is the TOS. |
| 2438 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, |
| 2439 CallFunctionFlags flags, |
| 2440 int position) { |
| 2441 // Push the arguments ("left-to-right") on the stack. |
| 2442 int arg_count = args->length(); |
| 2443 for (int i = 0; i < arg_count; i++) { |
| 2444 Load(args->at(i)); |
| 2445 frame_->SpillTop(); |
| 2446 } |
| 2447 |
| 2448 // Record the position for debugging purposes. |
| 2449 CodeForSourcePosition(position); |
| 2450 |
| 2451 // Use the shared code stub to call the function. |
| 2452 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; |
| 2453 CallFunctionStub call_function(arg_count, in_loop, flags); |
| 2454 Result answer = frame_->CallStub(&call_function, arg_count + 1); |
| 2455 // Restore context and replace function on the stack with the |
| 2456 // result of the stub invocation. |
| 2457 frame_->RestoreContextRegister(); |
| 2458 frame_->SetElementAt(0, &answer); |
749 } | 2459 } |
750 | 2460 |
751 | 2461 |
752 void CodeGenerator::CallApplyLazy(Expression* applicand, | 2462 void CodeGenerator::CallApplyLazy(Expression* applicand, |
753 Expression* receiver, | 2463 Expression* receiver, |
754 VariableProxy* arguments, | 2464 VariableProxy* arguments, |
755 int position) { | 2465 int position) { |
756 // An optimized implementation of expressions of the form | 2466 // An optimized implementation of expressions of the form |
757 // x.apply(y, arguments). | 2467 // x.apply(y, arguments). |
758 // If the arguments object of the scope has not been allocated, | 2468 // If the arguments object of the scope has not been allocated, |
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1003 CodeForStatementPosition(node); | 2713 CodeForStatementPosition(node); |
1004 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | 2714 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); |
1005 VisitStatements(node->statements()); | 2715 VisitStatements(node->statements()); |
1006 if (node->break_target()->is_linked()) { | 2716 if (node->break_target()->is_linked()) { |
1007 node->break_target()->Bind(); | 2717 node->break_target()->Bind(); |
1008 } | 2718 } |
1009 node->break_target()->Unuse(); | 2719 node->break_target()->Unuse(); |
1010 } | 2720 } |
1011 | 2721 |
1012 | 2722 |
| 2723 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { |
| 2724 // Call the runtime to declare the globals. The inevitable call |
| 2725 // will sync frame elements to memory anyway, so we do it eagerly to |
| 2726 // allow us to push the arguments directly into place. |
| 2727 frame_->SyncRange(0, frame_->element_count() - 1); |
| 2728 |
| 2729 __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT); |
| 2730 frame_->EmitPush(rsi); // The context is the first argument. |
| 2731 frame_->EmitPush(kScratchRegister); |
| 2732 frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0)); |
| 2733 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); |
| 2734 // Return value is ignored. |
| 2735 } |
| 2736 |
| 2737 |
1013 void CodeGenerator::VisitDeclaration(Declaration* node) { | 2738 void CodeGenerator::VisitDeclaration(Declaration* node) { |
1014 Comment cmnt(masm_, "[ Declaration"); | 2739 Comment cmnt(masm_, "[ Declaration"); |
1015 Variable* var = node->proxy()->var(); | 2740 Variable* var = node->proxy()->var(); |
1016 ASSERT(var != NULL); // must have been resolved | 2741 ASSERT(var != NULL); // must have been resolved |
1017 Slot* slot = var->slot(); | 2742 Slot* slot = var->slot(); |
1018 | 2743 |
1019 // If it was not possible to allocate the variable at compile time, | 2744 // If it was not possible to allocate the variable at compile time, |
1020 // we need to "declare" it at runtime to make sure it actually | 2745 // we need to "declare" it at runtime to make sure it actually |
1021 // exists in the local context. | 2746 // exists in the local context. |
1022 if (slot != NULL && slot->type() == Slot::LOOKUP) { | 2747 if (slot != NULL && slot->type() == Slot::LOOKUP) { |
(...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1223 // code by jumping to the return site. | 2948 // code by jumping to the return site. |
1224 function_return_.Jump(&return_value); | 2949 function_return_.Jump(&return_value); |
1225 } else { | 2950 } else { |
1226 function_return_.Bind(&return_value); | 2951 function_return_.Bind(&return_value); |
1227 GenerateReturnSequence(&return_value); | 2952 GenerateReturnSequence(&return_value); |
1228 } | 2953 } |
1229 } | 2954 } |
1230 } | 2955 } |
1231 | 2956 |
1232 | 2957 |
| 2958 void CodeGenerator::GenerateReturnSequence(Result* return_value) { |
| 2959 // The return value is a live (but not currently reference counted) |
| 2960 // reference to rax. This is safe because the current frame does not |
| 2961 // contain a reference to rax (it is prepared for the return by spilling |
| 2962 // all registers). |
| 2963 if (FLAG_trace) { |
| 2964 frame_->Push(return_value); |
| 2965 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1); |
| 2966 } |
| 2967 return_value->ToRegister(rax); |
| 2968 |
| 2969 // Add a label for checking the size of the code used for returning. |
| 2970 #ifdef DEBUG |
| 2971 Label check_exit_codesize; |
| 2972 masm_->bind(&check_exit_codesize); |
| 2973 #endif |
| 2974 |
| 2975 // Leave the frame and return popping the arguments and the |
| 2976 // receiver. |
| 2977 frame_->Exit(); |
| 2978 masm_->ret((scope()->num_parameters() + 1) * kPointerSize); |
| 2979 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 2980 // Add padding that will be overwritten by a debugger breakpoint. |
| 2981 // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k" |
| 2982 // with length 7 (3 + 1 + 3). |
| 2983 const int kPadding = Assembler::kJSReturnSequenceLength - 7; |
| 2984 for (int i = 0; i < kPadding; ++i) { |
| 2985 masm_->int3(); |
| 2986 } |
| 2987 // Check that the size of the code used for returning matches what is |
| 2988 // expected by the debugger. |
| 2989 ASSERT_EQ(Assembler::kJSReturnSequenceLength, |
| 2990 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); |
| 2991 #endif |
| 2992 DeleteFrame(); |
| 2993 } |
| 2994 |
| 2995 |
1233 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { | 2996 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { |
1234 ASSERT(!in_spilled_code()); | 2997 ASSERT(!in_spilled_code()); |
1235 Comment cmnt(masm_, "[ WithEnterStatement"); | 2998 Comment cmnt(masm_, "[ WithEnterStatement"); |
1236 CodeForStatementPosition(node); | 2999 CodeForStatementPosition(node); |
1237 Load(node->expression()); | 3000 Load(node->expression()); |
1238 Result context; | 3001 Result context; |
1239 if (node->is_catch_block()) { | 3002 if (node->is_catch_block()) { |
1240 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1); | 3003 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1); |
1241 } else { | 3004 } else { |
1242 context = frame_->CallRuntime(Runtime::kPushContext, 1); | 3005 context = frame_->CallRuntime(Runtime::kPushContext, 1); |
(...skipping 1281 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2524 exit.Jump(); | 4287 exit.Jump(); |
2525 else_.Bind(); | 4288 else_.Bind(); |
2526 Load(node->else_expression()); | 4289 Load(node->else_expression()); |
2527 } | 4290 } |
2528 } | 4291 } |
2529 | 4292 |
2530 exit.Bind(); | 4293 exit.Bind(); |
2531 } | 4294 } |
2532 | 4295 |
2533 | 4296 |
| 4297 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { |
| 4298 if (slot->type() == Slot::LOOKUP) { |
| 4299 ASSERT(slot->var()->is_dynamic()); |
| 4300 |
| 4301 JumpTarget slow; |
| 4302 JumpTarget done; |
| 4303 Result value; |
| 4304 |
| 4305 // Generate fast case for loading from slots that correspond to |
| 4306 // local/global variables or arguments unless they are shadowed by |
| 4307 // eval-introduced bindings. |
| 4308 EmitDynamicLoadFromSlotFastCase(slot, |
| 4309 typeof_state, |
| 4310 &value, |
| 4311 &slow, |
| 4312 &done); |
| 4313 |
| 4314 slow.Bind(); |
| 4315 // A runtime call is inevitable. We eagerly sync frame elements |
| 4316 // to memory so that we can push the arguments directly into place |
| 4317 // on top of the frame. |
| 4318 frame_->SyncRange(0, frame_->element_count() - 1); |
| 4319 frame_->EmitPush(rsi); |
| 4320 __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT); |
| 4321 frame_->EmitPush(kScratchRegister); |
| 4322 if (typeof_state == INSIDE_TYPEOF) { |
| 4323 value = |
| 4324 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); |
| 4325 } else { |
| 4326 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2); |
| 4327 } |
| 4328 |
| 4329 done.Bind(&value); |
| 4330 frame_->Push(&value); |
| 4331 |
| 4332 } else if (slot->var()->mode() == Variable::CONST) { |
| 4333 // Const slots may contain 'the hole' value (the constant hasn't been |
| 4334 // initialized yet) which needs to be converted into the 'undefined' |
| 4335 // value. |
| 4336 // |
| 4337 // We currently spill the virtual frame because constants use the |
| 4338 // potentially unsafe direct-frame access of SlotOperand. |
| 4339 VirtualFrame::SpilledScope spilled_scope; |
| 4340 Comment cmnt(masm_, "[ Load const"); |
| 4341 JumpTarget exit; |
| 4342 __ movq(rcx, SlotOperand(slot, rcx)); |
| 4343 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); |
| 4344 exit.Branch(not_equal); |
| 4345 __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex); |
| 4346 exit.Bind(); |
| 4347 frame_->EmitPush(rcx); |
| 4348 |
| 4349 } else if (slot->type() == Slot::PARAMETER) { |
| 4350 frame_->PushParameterAt(slot->index()); |
| 4351 |
| 4352 } else if (slot->type() == Slot::LOCAL) { |
| 4353 frame_->PushLocalAt(slot->index()); |
| 4354 |
| 4355 } else { |
| 4356 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach |
| 4357 // here. |
| 4358 // |
| 4359 // The use of SlotOperand below is safe for an unspilled frame |
| 4360 // because it will always be a context slot. |
| 4361 ASSERT(slot->type() == Slot::CONTEXT); |
| 4362 Result temp = allocator_->Allocate(); |
| 4363 ASSERT(temp.is_valid()); |
| 4364 __ movq(temp.reg(), SlotOperand(slot, temp.reg())); |
| 4365 frame_->Push(&temp); |
| 4366 } |
| 4367 } |
| 4368 |
| 4369 |
| 4370 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, |
| 4371 TypeofState state) { |
| 4372 LoadFromSlot(slot, state); |
| 4373 |
| 4374 // Bail out quickly if we're not using lazy arguments allocation. |
| 4375 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return; |
| 4376 |
| 4377 // ... or if the slot isn't a non-parameter arguments slot. |
| 4378 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; |
| 4379 |
| 4380 // Pop the loaded value from the stack. |
| 4381 Result value = frame_->Pop(); |
| 4382 |
| 4383 // If the loaded value is a constant, we know if the arguments |
| 4384 // object has been lazily loaded yet. |
| 4385 if (value.is_constant()) { |
| 4386 if (value.handle()->IsTheHole()) { |
| 4387 Result arguments = StoreArgumentsObject(false); |
| 4388 frame_->Push(&arguments); |
| 4389 } else { |
| 4390 frame_->Push(&value); |
| 4391 } |
| 4392 return; |
| 4393 } |
| 4394 |
| 4395 // The loaded value is in a register. If it is the sentinel that |
| 4396 // indicates that we haven't loaded the arguments object yet, we |
| 4397 // need to do it now. |
| 4398 JumpTarget exit; |
| 4399 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); |
| 4400 frame_->Push(&value); |
| 4401 exit.Branch(not_equal); |
| 4402 Result arguments = StoreArgumentsObject(false); |
| 4403 frame_->SetElementAt(0, &arguments); |
| 4404 exit.Bind(); |
| 4405 } |
| 4406 |
| 4407 |
| 4408 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( |
| 4409 Slot* slot, |
| 4410 TypeofState typeof_state, |
| 4411 JumpTarget* slow) { |
| 4412 // Check that no extension objects have been created by calls to |
| 4413 // eval from the current scope to the global scope. |
| 4414 Register context = rsi; |
| 4415 Result tmp = allocator_->Allocate(); |
| 4416 ASSERT(tmp.is_valid()); // All non-reserved registers were available. |
| 4417 |
| 4418 Scope* s = scope(); |
| 4419 while (s != NULL) { |
| 4420 if (s->num_heap_slots() > 0) { |
| 4421 if (s->calls_eval()) { |
| 4422 // Check that extension is NULL. |
| 4423 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), |
| 4424 Immediate(0)); |
| 4425 slow->Branch(not_equal, not_taken); |
| 4426 } |
| 4427 // Load next context in chain. |
| 4428 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); |
| 4429 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); |
| 4430 context = tmp.reg(); |
| 4431 } |
| 4432 // If no outer scope calls eval, we do not need to check more |
| 4433 // context extensions. If we have reached an eval scope, we check |
| 4434 // all extensions from this point. |
| 4435 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; |
| 4436 s = s->outer_scope(); |
| 4437 } |
| 4438 |
| 4439 if (s->is_eval_scope()) { |
| 4440 // Loop up the context chain. There is no frame effect so it is |
| 4441 // safe to use raw labels here. |
| 4442 Label next, fast; |
| 4443 if (!context.is(tmp.reg())) { |
| 4444 __ movq(tmp.reg(), context); |
| 4445 } |
| 4446 // Load map for comparison into register, outside loop. |
| 4447 __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex); |
| 4448 __ bind(&next); |
| 4449 // Terminate at global context. |
| 4450 __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset)); |
| 4451 __ j(equal, &fast); |
| 4452 // Check that extension is NULL. |
| 4453 __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0)); |
| 4454 slow->Branch(not_equal); |
| 4455 // Load next context in chain. |
| 4456 __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX)); |
| 4457 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); |
| 4458 __ jmp(&next); |
| 4459 __ bind(&fast); |
| 4460 } |
| 4461 tmp.Unuse(); |
| 4462 |
| 4463 // All extension objects were empty and it is safe to use a global |
| 4464 // load IC call. |
| 4465 LoadGlobal(); |
| 4466 frame_->Push(slot->var()->name()); |
| 4467 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) |
| 4468 ? RelocInfo::CODE_TARGET |
| 4469 : RelocInfo::CODE_TARGET_CONTEXT; |
| 4470 Result answer = frame_->CallLoadIC(mode); |
| 4471 // A test rax instruction following the call signals that the inobject |
| 4472 // property case was inlined. Ensure that there is not a test rax |
| 4473 // instruction here. |
| 4474 masm_->nop(); |
| 4475 return answer; |
| 4476 } |
| 4477 |
| 4478 |
| 4479 void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot, |
| 4480 TypeofState typeof_state, |
| 4481 Result* result, |
| 4482 JumpTarget* slow, |
| 4483 JumpTarget* done) { |
| 4484 // Generate fast-case code for variables that might be shadowed by |
| 4485 // eval-introduced variables. Eval is used a lot without |
| 4486 // introducing variables. In those cases, we do not want to |
| 4487 // perform a runtime call for all variables in the scope |
| 4488 // containing the eval. |
| 4489 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { |
| 4490 *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow); |
| 4491 done->Jump(result); |
| 4492 |
| 4493 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { |
| 4494 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); |
| 4495 Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); |
| 4496 if (potential_slot != NULL) { |
| 4497 // Generate fast case for locals that rewrite to slots. |
| 4498 // Allocate a fresh register to use as a temp in |
| 4499 // ContextSlotOperandCheckExtensions and to hold the result |
| 4500 // value. |
| 4501 *result = allocator_->Allocate(); |
| 4502 ASSERT(result->is_valid()); |
| 4503 __ movq(result->reg(), |
| 4504 ContextSlotOperandCheckExtensions(potential_slot, |
| 4505 *result, |
| 4506 slow)); |
| 4507 if (potential_slot->var()->mode() == Variable::CONST) { |
| 4508 __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex); |
| 4509 done->Branch(not_equal, result); |
| 4510 __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex); |
| 4511 } |
| 4512 done->Jump(result); |
| 4513 } else if (rewrite != NULL) { |
| 4514 // Generate fast case for argument loads. |
| 4515 Property* property = rewrite->AsProperty(); |
| 4516 if (property != NULL) { |
| 4517 VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); |
| 4518 Literal* key_literal = property->key()->AsLiteral(); |
| 4519 if (obj_proxy != NULL && |
| 4520 key_literal != NULL && |
| 4521 obj_proxy->IsArguments() && |
| 4522 key_literal->handle()->IsSmi()) { |
| 4523 // Load arguments object if there are no eval-introduced |
| 4524 // variables. Then load the argument from the arguments |
| 4525 // object using keyed load. |
| 4526 Result arguments = allocator()->Allocate(); |
| 4527 ASSERT(arguments.is_valid()); |
| 4528 __ movq(arguments.reg(), |
| 4529 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), |
| 4530 arguments, |
| 4531 slow)); |
| 4532 frame_->Push(&arguments); |
| 4533 frame_->Push(key_literal->handle()); |
| 4534 *result = EmitKeyedLoad(); |
| 4535 done->Jump(result); |
| 4536 } |
| 4537 } |
| 4538 } |
| 4539 } |
| 4540 } |
| 4541 |
| 4542 |
| 4543 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { |
| 4544 if (slot->type() == Slot::LOOKUP) { |
| 4545 ASSERT(slot->var()->is_dynamic()); |
| 4546 |
| 4547 // For now, just do a runtime call. Since the call is inevitable, |
| 4548 // we eagerly sync the virtual frame so we can directly push the |
| 4549 // arguments into place. |
| 4550 frame_->SyncRange(0, frame_->element_count() - 1); |
| 4551 |
| 4552 frame_->EmitPush(rsi); |
| 4553 frame_->EmitPush(slot->var()->name()); |
| 4554 |
| 4555 Result value; |
| 4556 if (init_state == CONST_INIT) { |
| 4557 // Same as the case for a normal store, but ignores attribute |
| 4558 // (e.g. READ_ONLY) of context slot so that we can initialize const |
| 4559 // properties (introduced via eval("const foo = (some expr);")). Also, |
| 4560 // uses the current function context instead of the top context. |
| 4561 // |
| 4562 // Note that we must declare the foo upon entry of eval(), via a |
| 4563 // context slot declaration, but we cannot initialize it at the same |
| 4564 // time, because the const declaration may be at the end of the eval |
| 4565 // code (sigh...) and the const variable may have been used before |
| 4566 // (where its value is 'undefined'). Thus, we can only do the |
| 4567 // initialization when we actually encounter the expression and when |
| 4568 // the expression operands are defined and valid, and thus we need the |
| 4569 // split into 2 operations: declaration of the context slot followed |
| 4570 // by initialization. |
| 4571 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); |
| 4572 } else { |
| 4573 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); |
| 4574 } |
| 4575 // Storing a variable must keep the (new) value on the expression |
| 4576 // stack. This is necessary for compiling chained assignment |
| 4577 // expressions. |
| 4578 frame_->Push(&value); |
| 4579 } else { |
| 4580 ASSERT(!slot->var()->is_dynamic()); |
| 4581 |
| 4582 JumpTarget exit; |
| 4583 if (init_state == CONST_INIT) { |
| 4584 ASSERT(slot->var()->mode() == Variable::CONST); |
| 4585 // Only the first const initialization must be executed (the slot |
| 4586 // still contains 'the hole' value). When the assignment is executed, |
| 4587 // the code is identical to a normal store (see below). |
| 4588 // |
| 4589 // We spill the frame in the code below because the direct-frame |
| 4590 // access of SlotOperand is potentially unsafe with an unspilled |
| 4591 // frame. |
| 4592 VirtualFrame::SpilledScope spilled_scope; |
| 4593 Comment cmnt(masm_, "[ Init const"); |
| 4594 __ movq(rcx, SlotOperand(slot, rcx)); |
| 4595 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); |
| 4596 exit.Branch(not_equal); |
| 4597 } |
| 4598 |
| 4599 // We must execute the store. Storing a variable must keep the (new) |
| 4600 // value on the stack. This is necessary for compiling assignment |
| 4601 // expressions. |
| 4602 // |
| 4603 // Note: We will reach here even with slot->var()->mode() == |
| 4604 // Variable::CONST because of const declarations which will initialize |
| 4605 // consts to 'the hole' value and by doing so, end up calling this code. |
| 4606 if (slot->type() == Slot::PARAMETER) { |
| 4607 frame_->StoreToParameterAt(slot->index()); |
| 4608 } else if (slot->type() == Slot::LOCAL) { |
| 4609 frame_->StoreToLocalAt(slot->index()); |
| 4610 } else { |
| 4611 // The other slot types (LOOKUP and GLOBAL) cannot reach here. |
| 4612 // |
| 4613 // The use of SlotOperand below is safe for an unspilled frame |
| 4614 // because the slot is a context slot. |
| 4615 ASSERT(slot->type() == Slot::CONTEXT); |
| 4616 frame_->Dup(); |
| 4617 Result value = frame_->Pop(); |
| 4618 value.ToRegister(); |
| 4619 Result start = allocator_->Allocate(); |
| 4620 ASSERT(start.is_valid()); |
| 4621 __ movq(SlotOperand(slot, start.reg()), value.reg()); |
| 4622 // RecordWrite may destroy the value registers. |
| 4623 // |
| 4624 // TODO(204): Avoid actually spilling when the value is not |
| 4625 // needed (probably the common case). |
| 4626 frame_->Spill(value.reg()); |
| 4627 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; |
| 4628 Result temp = allocator_->Allocate(); |
| 4629 ASSERT(temp.is_valid()); |
| 4630 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg()); |
| 4631 // The results start, value, and temp are unused by going out of |
| 4632 // scope. |
| 4633 } |
| 4634 |
| 4635 exit.Bind(); |
| 4636 } |
| 4637 } |
| 4638 |
| 4639 |
2534 void CodeGenerator::VisitSlot(Slot* node) { | 4640 void CodeGenerator::VisitSlot(Slot* node) { |
2535 Comment cmnt(masm_, "[ Slot"); | 4641 Comment cmnt(masm_, "[ Slot"); |
2536 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF); | 4642 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF); |
2537 } | 4643 } |
2538 | 4644 |
2539 | 4645 |
2540 void CodeGenerator::VisitVariableProxy(VariableProxy* node) { | 4646 void CodeGenerator::VisitVariableProxy(VariableProxy* node) { |
2541 Comment cmnt(masm_, "[ VariableProxy"); | 4647 Comment cmnt(masm_, "[ VariableProxy"); |
2542 Variable* var = node->var(); | 4648 Variable* var = node->var(); |
2543 Expression* expr = var->rewrite(); | 4649 Expression* expr = var->rewrite(); |
2544 if (expr != NULL) { | 4650 if (expr != NULL) { |
2545 Visit(expr); | 4651 Visit(expr); |
2546 } else { | 4652 } else { |
2547 ASSERT(var->is_global()); | 4653 ASSERT(var->is_global()); |
2548 Reference ref(this, node); | 4654 Reference ref(this, node); |
2549 ref.GetValue(); | 4655 ref.GetValue(); |
2550 } | 4656 } |
2551 } | 4657 } |
2552 | 4658 |
2553 | 4659 |
2554 void CodeGenerator::VisitLiteral(Literal* node) { | 4660 void CodeGenerator::VisitLiteral(Literal* node) { |
2555 Comment cmnt(masm_, "[ Literal"); | 4661 Comment cmnt(masm_, "[ Literal"); |
2556 frame_->Push(node->handle()); | 4662 frame_->Push(node->handle()); |
2557 } | 4663 } |
2558 | 4664 |
2559 | 4665 |
| 4666 void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) { |
| 4667 UNIMPLEMENTED(); |
| 4668 // TODO(X64): Implement security policy for loads of smis. |
| 4669 } |
| 4670 |
| 4671 |
| 4672 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { |
| 4673 return false; |
| 4674 } |
| 4675 |
| 4676 |
2560 // Materialize the regexp literal 'node' in the literals array | 4677 // Materialize the regexp literal 'node' in the literals array |
2561 // 'literals' of the function. Leave the regexp boilerplate in | 4678 // 'literals' of the function. Leave the regexp boilerplate in |
2562 // 'boilerplate'. | 4679 // 'boilerplate'. |
2563 class DeferredRegExpLiteral: public DeferredCode { | 4680 class DeferredRegExpLiteral: public DeferredCode { |
2564 public: | 4681 public: |
2565 DeferredRegExpLiteral(Register boilerplate, | 4682 DeferredRegExpLiteral(Register boilerplate, |
2566 Register literals, | 4683 Register literals, |
2567 RegExpLiteral* node) | 4684 RegExpLiteral* node) |
2568 : boilerplate_(boilerplate), literals_(literals), node_(node) { | 4685 : boilerplate_(boilerplate), literals_(literals), node_(node) { |
2569 set_comment("[ DeferredRegExpLiteral"); | 4686 set_comment("[ DeferredRegExpLiteral"); |
(...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3238 | 5355 |
3239 // Call the construct call builtin that handles allocation and | 5356 // Call the construct call builtin that handles allocation and |
3240 // constructor invocation. | 5357 // constructor invocation. |
3241 CodeForSourcePosition(node->position()); | 5358 CodeForSourcePosition(node->position()); |
3242 Result result = frame_->CallConstructor(arg_count); | 5359 Result result = frame_->CallConstructor(arg_count); |
3243 // Replace the function on the stack with the result. | 5360 // Replace the function on the stack with the result. |
3244 frame_->SetElementAt(0, &result); | 5361 frame_->SetElementAt(0, &result); |
3245 } | 5362 } |
3246 | 5363 |
3247 | 5364 |
3248 void CodeGenerator::VisitCallRuntime(CallRuntime* node) { | 5365 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { |
3249 if (CheckForInlineRuntimeCall(node)) { | |
3250 return; | |
3251 } | |
3252 | |
3253 ZoneList<Expression*>* args = node->arguments(); | |
3254 Comment cmnt(masm_, "[ CallRuntime"); | |
3255 Runtime::Function* function = node->function(); | |
3256 | |
3257 if (function == NULL) { | |
3258 // Push the builtins object found in the current global object. | |
3259 Result temp = allocator()->Allocate(); | |
3260 ASSERT(temp.is_valid()); | |
3261 __ movq(temp.reg(), GlobalObject()); | |
3262 __ movq(temp.reg(), | |
3263 FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset)); | |
3264 frame_->Push(&temp); | |
3265 } | |
3266 | |
3267 // Push the arguments ("left-to-right"). | |
3268 int arg_count = args->length(); | |
3269 for (int i = 0; i < arg_count; i++) { | |
3270 Load(args->at(i)); | |
3271 } | |
3272 | |
3273 if (function == NULL) { | |
3274 // Call the JS runtime function. | |
3275 frame_->Push(node->name()); | |
3276 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, | |
3277 arg_count, | |
3278 loop_nesting_); | |
3279 frame_->RestoreContextRegister(); | |
3280 frame_->Push(&answer); | |
3281 } else { | |
3282 // Call the C runtime function. | |
3283 Result answer = frame_->CallRuntime(function, arg_count); | |
3284 frame_->Push(&answer); | |
3285 } | |
3286 } | |
3287 | |
3288 | |
3289 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { | |
3290 Comment cmnt(masm_, "[ UnaryOperation"); | |
3291 | |
3292 Token::Value op = node->op(); | |
3293 | |
3294 if (op == Token::NOT) { | |
3295 // Swap the true and false targets but keep the same actual label | |
3296 // as the fall through. | |
3297 destination()->Invert(); | |
3298 LoadCondition(node->expression(), destination(), true); | |
3299 // Swap the labels back. | |
3300 destination()->Invert(); | |
3301 | |
3302 } else if (op == Token::DELETE) { | |
3303 Property* property = node->expression()->AsProperty(); | |
3304 if (property != NULL) { | |
3305 Load(property->obj()); | |
3306 Load(property->key()); | |
3307 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2); | |
3308 frame_->Push(&answer); | |
3309 return; | |
3310 } | |
3311 | |
3312 Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); | |
3313 if (variable != NULL) { | |
3314 Slot* slot = variable->slot(); | |
3315 if (variable->is_global()) { | |
3316 LoadGlobal(); | |
3317 frame_->Push(variable->name()); | |
3318 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, | |
3319 CALL_FUNCTION, 2); | |
3320 frame_->Push(&answer); | |
3321 return; | |
3322 | |
3323 } else if (slot != NULL && slot->type() == Slot::LOOKUP) { | |
3324 // Call the runtime to look up the context holding the named | |
3325 // variable. Sync the virtual frame eagerly so we can push the | |
3326 // arguments directly into place. | |
3327 frame_->SyncRange(0, frame_->element_count() - 1); | |
3328 frame_->EmitPush(rsi); | |
3329 frame_->EmitPush(variable->name()); | |
3330 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2); | |
3331 ASSERT(context.is_register()); | |
3332 frame_->EmitPush(context.reg()); | |
3333 context.Unuse(); | |
3334 frame_->EmitPush(variable->name()); | |
3335 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, | |
3336 CALL_FUNCTION, 2); | |
3337 frame_->Push(&answer); | |
3338 return; | |
3339 } | |
3340 | |
3341 // Default: Result of deleting non-global, not dynamically | |
3342 // introduced variables is false. | |
3343 frame_->Push(Factory::false_value()); | |
3344 | |
3345 } else { | |
3346 // Default: Result of deleting expressions is true. | |
3347 Load(node->expression()); // may have side-effects | |
3348 frame_->SetElementAt(0, Factory::true_value()); | |
3349 } | |
3350 | |
3351 } else if (op == Token::TYPEOF) { | |
3352 // Special case for loading the typeof expression; see comment on | |
3353 // LoadTypeofExpression(). | |
3354 LoadTypeofExpression(node->expression()); | |
3355 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1); | |
3356 frame_->Push(&answer); | |
3357 | |
3358 } else if (op == Token::VOID) { | |
3359 Expression* expression = node->expression(); | |
3360 if (expression && expression->AsLiteral() && ( | |
3361 expression->AsLiteral()->IsTrue() || | |
3362 expression->AsLiteral()->IsFalse() || | |
3363 expression->AsLiteral()->handle()->IsNumber() || | |
3364 expression->AsLiteral()->handle()->IsString() || | |
3365 expression->AsLiteral()->handle()->IsJSRegExp() || | |
3366 expression->AsLiteral()->IsNull())) { | |
3367 // Omit evaluating the value of the primitive literal. | |
3368 // It will be discarded anyway, and can have no side effect. | |
3369 frame_->Push(Factory::undefined_value()); | |
3370 } else { | |
3371 Load(node->expression()); | |
3372 frame_->SetElementAt(0, Factory::undefined_value()); | |
3373 } | |
3374 | |
3375 } else { | |
3376 bool can_overwrite = | |
3377 (node->expression()->AsBinaryOperation() != NULL && | |
3378 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); | |
3379 UnaryOverwriteMode overwrite = | |
3380 can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; | |
3381 bool no_negative_zero = node->expression()->no_negative_zero(); | |
3382 Load(node->expression()); | |
3383 switch (op) { | |
3384 case Token::NOT: | |
3385 case Token::DELETE: | |
3386 case Token::TYPEOF: | |
3387 UNREACHABLE(); // handled above | |
3388 break; | |
3389 | |
3390 case Token::SUB: { | |
3391 GenericUnaryOpStub stub( | |
3392 Token::SUB, | |
3393 overwrite, | |
3394 no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); | |
3395 Result operand = frame_->Pop(); | |
3396 Result answer = frame_->CallStub(&stub, &operand); | |
3397 answer.set_type_info(TypeInfo::Number()); | |
3398 frame_->Push(&answer); | |
3399 break; | |
3400 } | |
3401 | |
3402 case Token::BIT_NOT: { | |
3403 // Smi check. | |
3404 JumpTarget smi_label; | |
3405 JumpTarget continue_label; | |
3406 Result operand = frame_->Pop(); | |
3407 operand.ToRegister(); | |
3408 | |
3409 Condition is_smi = masm_->CheckSmi(operand.reg()); | |
3410 smi_label.Branch(is_smi, &operand); | |
3411 | |
3412 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); | |
3413 Result answer = frame_->CallStub(&stub, &operand); | |
3414 continue_label.Jump(&answer); | |
3415 | |
3416 smi_label.Bind(&answer); | |
3417 answer.ToRegister(); | |
3418 frame_->Spill(answer.reg()); | |
3419 __ SmiNot(answer.reg(), answer.reg()); | |
3420 continue_label.Bind(&answer); | |
3421 answer.set_type_info(TypeInfo::Smi()); | |
3422 frame_->Push(&answer); | |
3423 break; | |
3424 } | |
3425 | |
3426 case Token::ADD: { | |
3427 // Smi check. | |
3428 JumpTarget continue_label; | |
3429 Result operand = frame_->Pop(); | |
3430 TypeInfo operand_info = operand.type_info(); | |
3431 operand.ToRegister(); | |
3432 Condition is_smi = masm_->CheckSmi(operand.reg()); | |
3433 continue_label.Branch(is_smi, &operand); | |
3434 frame_->Push(&operand); | |
3435 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER, | |
3436 CALL_FUNCTION, 1); | |
3437 | |
3438 continue_label.Bind(&answer); | |
3439 if (operand_info.IsSmi()) { | |
3440 answer.set_type_info(TypeInfo::Smi()); | |
3441 } else if (operand_info.IsInteger32()) { | |
3442 answer.set_type_info(TypeInfo::Integer32()); | |
3443 } else { | |
3444 answer.set_type_info(TypeInfo::Number()); | |
3445 } | |
3446 frame_->Push(&answer); | |
3447 break; | |
3448 } | |
3449 default: | |
3450 UNREACHABLE(); | |
3451 } | |
3452 } | |
3453 } | |
3454 | |
3455 | |
3456 // The value in dst was optimistically incremented or decremented. | |
3457 // The result overflowed or was not smi tagged. Call into the runtime | |
3458 // to convert the argument to a number, and call the specialized add | |
3459 // or subtract stub. The result is left in dst. | |
3460 class DeferredPrefixCountOperation: public DeferredCode { | |
3461 public: | |
3462 DeferredPrefixCountOperation(Register dst, | |
3463 bool is_increment, | |
3464 TypeInfo input_type) | |
3465 : dst_(dst), is_increment_(is_increment), input_type_(input_type) { | |
3466 set_comment("[ DeferredCountOperation"); | |
3467 } | |
3468 | |
3469 virtual void Generate(); | |
3470 | |
3471 private: | |
3472 Register dst_; | |
3473 bool is_increment_; | |
3474 TypeInfo input_type_; | |
3475 }; | |
3476 | |
3477 | |
3478 void DeferredPrefixCountOperation::Generate() { | |
3479 Register left; | |
3480 if (input_type_.IsNumber()) { | |
3481 left = dst_; | |
3482 } else { | |
3483 __ push(dst_); | |
3484 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); | |
3485 left = rax; | |
3486 } | |
3487 | |
3488 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, | |
3489 NO_OVERWRITE, | |
3490 NO_GENERIC_BINARY_FLAGS, | |
3491 TypeInfo::Number()); | |
3492 stub.GenerateCall(masm_, left, Smi::FromInt(1)); | |
3493 | |
3494 if (!dst_.is(rax)) __ movq(dst_, rax); | |
3495 } | |
3496 | |
3497 | |
3498 // The value in dst was optimistically incremented or decremented. | |
3499 // The result overflowed or was not smi tagged. Call into the runtime | |
3500 // to convert the argument to a number. Update the original value in | |
3501 // old. Call the specialized add or subtract stub. The result is | |
3502 // left in dst. | |
3503 class DeferredPostfixCountOperation: public DeferredCode { | |
3504 public: | |
3505 DeferredPostfixCountOperation(Register dst, | |
3506 Register old, | |
3507 bool is_increment, | |
3508 TypeInfo input_type) | |
3509 : dst_(dst), | |
3510 old_(old), | |
3511 is_increment_(is_increment), | |
3512 input_type_(input_type) { | |
3513 set_comment("[ DeferredCountOperation"); | |
3514 } | |
3515 | |
3516 virtual void Generate(); | |
3517 | |
3518 private: | |
3519 Register dst_; | |
3520 Register old_; | |
3521 bool is_increment_; | |
3522 TypeInfo input_type_; | |
3523 }; | |
3524 | |
3525 | |
3526 void DeferredPostfixCountOperation::Generate() { | |
3527 Register left; | |
3528 if (input_type_.IsNumber()) { | |
3529 __ push(dst_); // Save the input to use as the old value. | |
3530 left = dst_; | |
3531 } else { | |
3532 __ push(dst_); | |
3533 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); | |
3534 __ push(rax); // Save the result of ToNumber to use as the old value. | |
3535 left = rax; | |
3536 } | |
3537 | |
3538 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, | |
3539 NO_OVERWRITE, | |
3540 NO_GENERIC_BINARY_FLAGS, | |
3541 TypeInfo::Number()); | |
3542 stub.GenerateCall(masm_, left, Smi::FromInt(1)); | |
3543 | |
3544 if (!dst_.is(rax)) __ movq(dst_, rax); | |
3545 __ pop(old_); | |
3546 } | |
3547 | |
3548 | |
3549 void CodeGenerator::VisitCountOperation(CountOperation* node) { | |
3550 Comment cmnt(masm_, "[ CountOperation"); | |
3551 | |
3552 bool is_postfix = node->is_postfix(); | |
3553 bool is_increment = node->op() == Token::INC; | |
3554 | |
3555 Variable* var = node->expression()->AsVariableProxy()->AsVariable(); | |
3556 bool is_const = (var != NULL && var->mode() == Variable::CONST); | |
3557 | |
3558 // Postfix operations need a stack slot under the reference to hold | |
3559 // the old value while the new value is being stored. This is so that | |
3560 // in the case that storing the new value requires a call, the old | |
3561 // value will be in the frame to be spilled. | |
3562 if (is_postfix) frame_->Push(Smi::FromInt(0)); | |
3563 | |
3564 // A constant reference is not saved to, so the reference is not a | |
3565 // compound assignment reference. | |
3566 { Reference target(this, node->expression(), !is_const); | |
3567 if (target.is_illegal()) { | |
3568 // Spoof the virtual frame to have the expected height (one higher | |
3569 // than on entry). | |
3570 if (!is_postfix) frame_->Push(Smi::FromInt(0)); | |
3571 return; | |
3572 } | |
3573 target.TakeValue(); | |
3574 | |
3575 Result new_value = frame_->Pop(); | |
3576 new_value.ToRegister(); | |
3577 | |
3578 Result old_value; // Only allocated in the postfix case. | |
3579 if (is_postfix) { | |
3580 // Allocate a temporary to preserve the old value. | |
3581 old_value = allocator_->Allocate(); | |
3582 ASSERT(old_value.is_valid()); | |
3583 __ movq(old_value.reg(), new_value.reg()); | |
3584 | |
3585 // The return value for postfix operations is ToNumber(input). | |
3586 // Keep more precise type info if the input is some kind of | |
3587 // number already. If the input is not a number we have to wait | |
3588 // for the deferred code to convert it. | |
3589 if (new_value.type_info().IsNumber()) { | |
3590 old_value.set_type_info(new_value.type_info()); | |
3591 } | |
3592 } | |
3593 // Ensure the new value is writable. | |
3594 frame_->Spill(new_value.reg()); | |
3595 | |
3596 DeferredCode* deferred = NULL; | |
3597 if (is_postfix) { | |
3598 deferred = new DeferredPostfixCountOperation(new_value.reg(), | |
3599 old_value.reg(), | |
3600 is_increment, | |
3601 new_value.type_info()); | |
3602 } else { | |
3603 deferred = new DeferredPrefixCountOperation(new_value.reg(), | |
3604 is_increment, | |
3605 new_value.type_info()); | |
3606 } | |
3607 | |
3608 if (new_value.is_smi()) { | |
3609 if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); } | |
3610 } else { | |
3611 __ JumpIfNotSmi(new_value.reg(), deferred->entry_label()); | |
3612 } | |
3613 if (is_increment) { | |
3614 __ SmiAddConstant(new_value.reg(), | |
3615 new_value.reg(), | |
3616 Smi::FromInt(1), | |
3617 deferred->entry_label()); | |
3618 } else { | |
3619 __ SmiSubConstant(new_value.reg(), | |
3620 new_value.reg(), | |
3621 Smi::FromInt(1), | |
3622 deferred->entry_label()); | |
3623 } | |
3624 deferred->BindExit(); | |
3625 | |
3626 // Postfix count operations return their input converted to | |
3627 // number. The case when the input is already a number is covered | |
3628 // above in the allocation code for old_value. | |
3629 if (is_postfix && !new_value.type_info().IsNumber()) { | |
3630 old_value.set_type_info(TypeInfo::Number()); | |
3631 } | |
3632 | |
3633 new_value.set_type_info(TypeInfo::Number()); | |
3634 | |
3635 // Postfix: store the old value in the allocated slot under the | |
3636 // reference. | |
3637 if (is_postfix) frame_->SetElementAt(target.size(), &old_value); | |
3638 | |
3639 frame_->Push(&new_value); | |
3640 // Non-constant: update the reference. | |
3641 if (!is_const) target.SetValue(NOT_CONST_INIT); | |
3642 } | |
3643 | |
3644 // Postfix: drop the new value and use the old. | |
3645 if (is_postfix) frame_->Drop(); | |
3646 } | |
3647 | |
3648 | |
3649 void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) { | |
3650 // According to ECMA-262 section 11.11, page 58, the binary logical | |
3651 // operators must yield the result of one of the two expressions | |
3652 // before any ToBoolean() conversions. This means that the value | |
3653 // produced by a && or || operator is not necessarily a boolean. | |
3654 | |
3655 // NOTE: If the left hand side produces a materialized value (not | |
3656 // control flow), we force the right hand side to do the same. This | |
3657 // is necessary because we assume that if we get control flow on the | |
3658 // last path out of an expression we got it on all paths. | |
3659 if (node->op() == Token::AND) { | |
3660 JumpTarget is_true; | |
3661 ControlDestination dest(&is_true, destination()->false_target(), true); | |
3662 LoadCondition(node->left(), &dest, false); | |
3663 | |
3664 if (dest.false_was_fall_through()) { | |
3665 // The current false target was used as the fall-through. If | |
3666 // there are no dangling jumps to is_true then the left | |
3667 // subexpression was unconditionally false. Otherwise we have | |
3668 // paths where we do have to evaluate the right subexpression. | |
3669 if (is_true.is_linked()) { | |
3670 // We need to compile the right subexpression. If the jump to | |
3671 // the current false target was a forward jump then we have a | |
3672 // valid frame, we have just bound the false target, and we | |
3673 // have to jump around the code for the right subexpression. | |
3674 if (has_valid_frame()) { | |
3675 destination()->false_target()->Unuse(); | |
3676 destination()->false_target()->Jump(); | |
3677 } | |
3678 is_true.Bind(); | |
3679 // The left subexpression compiled to control flow, so the | |
3680 // right one is free to do so as well. | |
3681 LoadCondition(node->right(), destination(), false); | |
3682 } else { | |
3683 // We have actually just jumped to or bound the current false | |
3684 // target but the current control destination is not marked as | |
3685 // used. | |
3686 destination()->Use(false); | |
3687 } | |
3688 | |
3689 } else if (dest.is_used()) { | |
3690 // The left subexpression compiled to control flow (and is_true | |
3691 // was just bound), so the right is free to do so as well. | |
3692 LoadCondition(node->right(), destination(), false); | |
3693 | |
3694 } else { | |
3695 // We have a materialized value on the frame, so we exit with | |
3696 // one on all paths. There are possibly also jumps to is_true | |
3697 // from nested subexpressions. | |
3698 JumpTarget pop_and_continue; | |
3699 JumpTarget exit; | |
3700 | |
3701 // Avoid popping the result if it converts to 'false' using the | |
3702 // standard ToBoolean() conversion as described in ECMA-262, | |
3703 // section 9.2, page 30. | |
3704 // | |
3705 // Duplicate the TOS value. The duplicate will be popped by | |
3706 // ToBoolean. | |
3707 frame_->Dup(); | |
3708 ControlDestination dest(&pop_and_continue, &exit, true); | |
3709 ToBoolean(&dest); | |
3710 | |
3711 // Pop the result of evaluating the first part. | |
3712 frame_->Drop(); | |
3713 | |
3714 // Compile right side expression. | |
3715 is_true.Bind(); | |
3716 Load(node->right()); | |
3717 | |
3718 // Exit (always with a materialized value). | |
3719 exit.Bind(); | |
3720 } | |
3721 | |
3722 } else { | |
3723 ASSERT(node->op() == Token::OR); | |
3724 JumpTarget is_false; | |
3725 ControlDestination dest(destination()->true_target(), &is_false, false); | |
3726 LoadCondition(node->left(), &dest, false); | |
3727 | |
3728 if (dest.true_was_fall_through()) { | |
3729 // The current true target was used as the fall-through. If | |
3730 // there are no dangling jumps to is_false then the left | |
3731 // subexpression was unconditionally true. Otherwise we have | |
3732 // paths where we do have to evaluate the right subexpression. | |
3733 if (is_false.is_linked()) { | |
3734 // We need to compile the right subexpression. If the jump to | |
3735 // the current true target was a forward jump then we have a | |
3736 // valid frame, we have just bound the true target, and we | |
3737 // have to jump around the code for the right subexpression. | |
3738 if (has_valid_frame()) { | |
3739 destination()->true_target()->Unuse(); | |
3740 destination()->true_target()->Jump(); | |
3741 } | |
3742 is_false.Bind(); | |
3743 // The left subexpression compiled to control flow, so the | |
3744 // right one is free to do so as well. | |
3745 LoadCondition(node->right(), destination(), false); | |
3746 } else { | |
3747 // We have just jumped to or bound the current true target but | |
3748 // the current control destination is not marked as used. | |
3749 destination()->Use(true); | |
3750 } | |
3751 | |
3752 } else if (dest.is_used()) { | |
3753 // The left subexpression compiled to control flow (and is_false | |
3754 // was just bound), so the right is free to do so as well. | |
3755 LoadCondition(node->right(), destination(), false); | |
3756 | |
3757 } else { | |
3758 // We have a materialized value on the frame, so we exit with | |
3759 // one on all paths. There are possibly also jumps to is_false | |
3760 // from nested subexpressions. | |
3761 JumpTarget pop_and_continue; | |
3762 JumpTarget exit; | |
3763 | |
3764 // Avoid popping the result if it converts to 'true' using the | |
3765 // standard ToBoolean() conversion as described in ECMA-262, | |
3766 // section 9.2, page 30. | |
3767 // | |
3768 // Duplicate the TOS value. The duplicate will be popped by | |
3769 // ToBoolean. | |
3770 frame_->Dup(); | |
3771 ControlDestination dest(&exit, &pop_and_continue, false); | |
3772 ToBoolean(&dest); | |
3773 | |
3774 // Pop the result of evaluating the first part. | |
3775 frame_->Drop(); | |
3776 | |
3777 // Compile right side expression. | |
3778 is_false.Bind(); | |
3779 Load(node->right()); | |
3780 | |
3781 // Exit (always with a materialized value). | |
3782 exit.Bind(); | |
3783 } | |
3784 } | |
3785 } | |
3786 | |
3787 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { | |
3788 Comment cmnt(masm_, "[ BinaryOperation"); | |
3789 | |
3790 if (node->op() == Token::AND || node->op() == Token::OR) { | |
3791 GenerateLogicalBooleanOperation(node); | |
3792 } else { | |
3793 // NOTE: The code below assumes that the slow cases (calls to runtime) | |
3794 // never return a constant/immutable object. | |
3795 OverwriteMode overwrite_mode = NO_OVERWRITE; | |
3796 if (node->left()->AsBinaryOperation() != NULL && | |
3797 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { | |
3798 overwrite_mode = OVERWRITE_LEFT; | |
3799 } else if (node->right()->AsBinaryOperation() != NULL && | |
3800 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { | |
3801 overwrite_mode = OVERWRITE_RIGHT; | |
3802 } | |
3803 | |
3804 if (node->left()->IsTrivial()) { | |
3805 Load(node->right()); | |
3806 Result right = frame_->Pop(); | |
3807 frame_->Push(node->left()); | |
3808 frame_->Push(&right); | |
3809 } else { | |
3810 Load(node->left()); | |
3811 Load(node->right()); | |
3812 } | |
3813 GenericBinaryOperation(node, overwrite_mode); | |
3814 } | |
3815 } | |
3816 | |
3817 | |
3818 | |
3819 void CodeGenerator::VisitCompareOperation(CompareOperation* node) { | |
3820 Comment cmnt(masm_, "[ CompareOperation"); | |
3821 | |
3822 // Get the expressions from the node. | |
3823 Expression* left = node->left(); | |
3824 Expression* right = node->right(); | |
3825 Token::Value op = node->op(); | |
3826 // To make typeof testing for natives implemented in JavaScript really | |
3827 // efficient, we generate special code for expressions of the form: | |
3828 // 'typeof <expression> == <string>'. | |
3829 UnaryOperation* operation = left->AsUnaryOperation(); | |
3830 if ((op == Token::EQ || op == Token::EQ_STRICT) && | |
3831 (operation != NULL && operation->op() == Token::TYPEOF) && | |
3832 (right->AsLiteral() != NULL && | |
3833 right->AsLiteral()->handle()->IsString())) { | |
3834 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle())); | |
3835 | |
3836 // Load the operand and move it to a register. | |
3837 LoadTypeofExpression(operation->expression()); | |
3838 Result answer = frame_->Pop(); | |
3839 answer.ToRegister(); | |
3840 | |
3841 if (check->Equals(Heap::number_symbol())) { | |
3842 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
3843 destination()->true_target()->Branch(is_smi); | |
3844 frame_->Spill(answer.reg()); | |
3845 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
3846 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex); | |
3847 answer.Unuse(); | |
3848 destination()->Split(equal); | |
3849 | |
3850 } else if (check->Equals(Heap::string_symbol())) { | |
3851 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
3852 destination()->false_target()->Branch(is_smi); | |
3853 | |
3854 // It can be an undetectable string object. | |
3855 __ movq(kScratchRegister, | |
3856 FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
3857 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), | |
3858 Immediate(1 << Map::kIsUndetectable)); | |
3859 destination()->false_target()->Branch(not_zero); | |
3860 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE); | |
3861 answer.Unuse(); | |
3862 destination()->Split(below); // Unsigned byte comparison needed. | |
3863 | |
3864 } else if (check->Equals(Heap::boolean_symbol())) { | |
3865 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex); | |
3866 destination()->true_target()->Branch(equal); | |
3867 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex); | |
3868 answer.Unuse(); | |
3869 destination()->Split(equal); | |
3870 | |
3871 } else if (check->Equals(Heap::undefined_symbol())) { | |
3872 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex); | |
3873 destination()->true_target()->Branch(equal); | |
3874 | |
3875 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
3876 destination()->false_target()->Branch(is_smi); | |
3877 | |
3878 // It can be an undetectable object. | |
3879 __ movq(kScratchRegister, | |
3880 FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
3881 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), | |
3882 Immediate(1 << Map::kIsUndetectable)); | |
3883 answer.Unuse(); | |
3884 destination()->Split(not_zero); | |
3885 | |
3886 } else if (check->Equals(Heap::function_symbol())) { | |
3887 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
3888 destination()->false_target()->Branch(is_smi); | |
3889 frame_->Spill(answer.reg()); | |
3890 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); | |
3891 destination()->true_target()->Branch(equal); | |
3892 // Regular expressions are callable so typeof == 'function'. | |
3893 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE); | |
3894 answer.Unuse(); | |
3895 destination()->Split(equal); | |
3896 | |
3897 } else if (check->Equals(Heap::object_symbol())) { | |
3898 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
3899 destination()->false_target()->Branch(is_smi); | |
3900 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex); | |
3901 destination()->true_target()->Branch(equal); | |
3902 | |
3903 // Regular expressions are typeof == 'function', not 'object'. | |
3904 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister); | |
3905 destination()->false_target()->Branch(equal); | |
3906 | |
3907 // It can be an undetectable object. | |
3908 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), | |
3909 Immediate(1 << Map::kIsUndetectable)); | |
3910 destination()->false_target()->Branch(not_zero); | |
3911 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE); | |
3912 destination()->false_target()->Branch(below); | |
3913 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); | |
3914 answer.Unuse(); | |
3915 destination()->Split(below_equal); | |
3916 } else { | |
3917 // Uncommon case: typeof testing against a string literal that is | |
3918 // never returned from the typeof operator. | |
3919 answer.Unuse(); | |
3920 destination()->Goto(false); | |
3921 } | |
3922 return; | |
3923 } | |
3924 | |
3925 Condition cc = no_condition; | |
3926 bool strict = false; | |
3927 switch (op) { | |
3928 case Token::EQ_STRICT: | |
3929 strict = true; | |
3930 // Fall through | |
3931 case Token::EQ: | |
3932 cc = equal; | |
3933 break; | |
3934 case Token::LT: | |
3935 cc = less; | |
3936 break; | |
3937 case Token::GT: | |
3938 cc = greater; | |
3939 break; | |
3940 case Token::LTE: | |
3941 cc = less_equal; | |
3942 break; | |
3943 case Token::GTE: | |
3944 cc = greater_equal; | |
3945 break; | |
3946 case Token::IN: { | |
3947 Load(left); | |
3948 Load(right); | |
3949 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2); | |
3950 frame_->Push(&answer); // push the result | |
3951 return; | |
3952 } | |
3953 case Token::INSTANCEOF: { | |
3954 Load(left); | |
3955 Load(right); | |
3956 InstanceofStub stub; | |
3957 Result answer = frame_->CallStub(&stub, 2); | |
3958 answer.ToRegister(); | |
3959 __ testq(answer.reg(), answer.reg()); | |
3960 answer.Unuse(); | |
3961 destination()->Split(zero); | |
3962 return; | |
3963 } | |
3964 default: | |
3965 UNREACHABLE(); | |
3966 } | |
3967 | |
3968 if (left->IsTrivial()) { | |
3969 Load(right); | |
3970 Result right_result = frame_->Pop(); | |
3971 frame_->Push(left); | |
3972 frame_->Push(&right_result); | |
3973 } else { | |
3974 Load(left); | |
3975 Load(right); | |
3976 } | |
3977 | |
3978 Comparison(node, cc, strict, destination()); | |
3979 } | |
3980 | |
3981 | |
3982 void CodeGenerator::VisitThisFunction(ThisFunction* node) { | |
3983 frame_->PushFunction(); | |
3984 } | |
3985 | |
3986 | |
3987 void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) { | |
3988 ASSERT(args->length() == 1); | |
3989 | |
3990 // ArgumentsAccessStub expects the key in rdx and the formal | |
3991 // parameter count in rax. | |
3992 Load(args->at(0)); | |
3993 Result key = frame_->Pop(); | |
3994 // Explicitly create a constant result. | |
3995 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters()))); | |
3996 // Call the shared stub to get to arguments[key]. | |
3997 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); | |
3998 Result result = frame_->CallStub(&stub, &key, &count); | |
3999 frame_->Push(&result); | |
4000 } | |
4001 | |
4002 | |
4003 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { | |
4004 ASSERT(args->length() == 1); | 5366 ASSERT(args->length() == 1); |
4005 Load(args->at(0)); | 5367 Load(args->at(0)); |
4006 Result value = frame_->Pop(); | 5368 Result value = frame_->Pop(); |
4007 value.ToRegister(); | 5369 value.ToRegister(); |
4008 ASSERT(value.is_valid()); | 5370 ASSERT(value.is_valid()); |
4009 Condition is_smi = masm_->CheckSmi(value.reg()); | 5371 Condition is_smi = masm_->CheckSmi(value.reg()); |
4010 destination()->false_target()->Branch(is_smi); | |
4011 // It is a heap object - get map. | |
4012 // Check if the object is a JS array or not. | |
4013 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister); | |
4014 value.Unuse(); | 5372 value.Unuse(); |
4015 destination()->Split(equal); | 5373 destination()->Split(is_smi); |
4016 } | 5374 } |
4017 | 5375 |
4018 | 5376 |
4019 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) { | 5377 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { |
| 5378 // Conditionally generate a log call. |
| 5379 // Args: |
| 5380 // 0 (literal string): The type of logging (corresponds to the flags). |
| 5381 // This is used to determine whether or not to generate the log call. |
| 5382 // 1 (string): Format string. Access the string at argument index 2 |
| 5383 // with '%2s' (see Logger::LogRuntime for all the formats). |
| 5384 // 2 (array): Arguments to the format string. |
| 5385 ASSERT_EQ(args->length(), 3); |
| 5386 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 5387 if (ShouldGenerateLog(args->at(0))) { |
| 5388 Load(args->at(1)); |
| 5389 Load(args->at(2)); |
| 5390 frame_->CallRuntime(Runtime::kLog, 2); |
| 5391 } |
| 5392 #endif |
| 5393 // Finally, we're expected to leave a value on the top of the stack. |
| 5394 frame_->Push(Factory::undefined_value()); |
| 5395 } |
| 5396 |
| 5397 |
| 5398 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { |
4020 ASSERT(args->length() == 1); | 5399 ASSERT(args->length() == 1); |
4021 Load(args->at(0)); | 5400 Load(args->at(0)); |
4022 Result value = frame_->Pop(); | 5401 Result value = frame_->Pop(); |
4023 value.ToRegister(); | 5402 value.ToRegister(); |
4024 ASSERT(value.is_valid()); | 5403 ASSERT(value.is_valid()); |
4025 Condition is_smi = masm_->CheckSmi(value.reg()); | 5404 Condition positive_smi = masm_->CheckPositiveSmi(value.reg()); |
4026 destination()->false_target()->Branch(is_smi); | |
4027 // It is a heap object - get map. | |
4028 // Check if the object is a regexp. | |
4029 __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister); | |
4030 value.Unuse(); | 5405 value.Unuse(); |
4031 destination()->Split(equal); | 5406 destination()->Split(positive_smi); |
4032 } | |
4033 | |
4034 | |
4035 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { | |
4036 // This generates a fast version of: | |
4037 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') | |
4038 ASSERT(args->length() == 1); | |
4039 Load(args->at(0)); | |
4040 Result obj = frame_->Pop(); | |
4041 obj.ToRegister(); | |
4042 Condition is_smi = masm_->CheckSmi(obj.reg()); | |
4043 destination()->false_target()->Branch(is_smi); | |
4044 | |
4045 __ Move(kScratchRegister, Factory::null_value()); | |
4046 __ cmpq(obj.reg(), kScratchRegister); | |
4047 destination()->true_target()->Branch(equal); | |
4048 | |
4049 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); | |
4050 // Undetectable objects behave like undefined when tested with typeof. | |
4051 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), | |
4052 Immediate(1 << Map::kIsUndetectable)); | |
4053 destination()->false_target()->Branch(not_zero); | |
4054 __ movzxbq(kScratchRegister, | |
4055 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); | |
4056 __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE)); | |
4057 destination()->false_target()->Branch(below); | |
4058 __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE)); | |
4059 obj.Unuse(); | |
4060 destination()->Split(below_equal); | |
4061 } | |
4062 | |
4063 | |
4064 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) { | |
4065 // This generates a fast version of: | |
4066 // (%_ClassOf(arg) === 'Function') | |
4067 ASSERT(args->length() == 1); | |
4068 Load(args->at(0)); | |
4069 Result obj = frame_->Pop(); | |
4070 obj.ToRegister(); | |
4071 Condition is_smi = masm_->CheckSmi(obj.reg()); | |
4072 destination()->false_target()->Branch(is_smi); | |
4073 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); | |
4074 obj.Unuse(); | |
4075 destination()->Split(equal); | |
4076 } | |
4077 | |
4078 | |
4079 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) { | |
4080 ASSERT(args->length() == 1); | |
4081 Load(args->at(0)); | |
4082 Result obj = frame_->Pop(); | |
4083 obj.ToRegister(); | |
4084 Condition is_smi = masm_->CheckSmi(obj.reg()); | |
4085 destination()->false_target()->Branch(is_smi); | |
4086 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); | |
4087 __ movzxbl(kScratchRegister, | |
4088 FieldOperand(kScratchRegister, Map::kBitFieldOffset)); | |
4089 __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable)); | |
4090 obj.Unuse(); | |
4091 destination()->Split(not_zero); | |
4092 } | |
4093 | |
4094 | |
4095 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { | |
4096 ASSERT(args->length() == 0); | |
4097 | |
4098 // Get the frame pointer for the calling frame. | |
4099 Result fp = allocator()->Allocate(); | |
4100 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | |
4101 | |
4102 // Skip the arguments adaptor frame if it exists. | |
4103 Label check_frame_marker; | |
4104 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset), | |
4105 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
4106 __ j(not_equal, &check_frame_marker); | |
4107 __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); | |
4108 | |
4109 // Check the marker in the calling frame. | |
4110 __ bind(&check_frame_marker); | |
4111 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset), | |
4112 Smi::FromInt(StackFrame::CONSTRUCT)); | |
4113 fp.Unuse(); | |
4114 destination()->Split(equal); | |
4115 } | |
4116 | |
4117 | |
4118 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { | |
4119 ASSERT(args->length() == 0); | |
4120 | |
4121 Result fp = allocator_->Allocate(); | |
4122 Result result = allocator_->Allocate(); | |
4123 ASSERT(fp.is_valid() && result.is_valid()); | |
4124 | |
4125 Label exit; | |
4126 | |
4127 // Get the number of formal parameters. | |
4128 __ Move(result.reg(), Smi::FromInt(scope()->num_parameters())); | |
4129 | |
4130 // Check if the calling frame is an arguments adaptor frame. | |
4131 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | |
4132 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset), | |
4133 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
4134 __ j(not_equal, &exit); | |
4135 | |
4136 // Arguments adaptor case: Read the arguments length from the | |
4137 // adaptor frame. | |
4138 __ movq(result.reg(), | |
4139 Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
4140 | |
4141 __ bind(&exit); | |
4142 result.set_type_info(TypeInfo::Smi()); | |
4143 if (FLAG_debug_code) { | |
4144 __ AbortIfNotSmi(result.reg()); | |
4145 } | |
4146 frame_->Push(&result); | |
4147 } | 5407 } |
4148 | 5408 |
4149 | 5409 |
4150 class DeferredStringCharCodeAt : public DeferredCode { | 5410 class DeferredStringCharCodeAt : public DeferredCode { |
4151 public: | 5411 public: |
4152 DeferredStringCharCodeAt(Register object, | 5412 DeferredStringCharCodeAt(Register object, |
4153 Register index, | 5413 Register index, |
4154 Register scratch, | 5414 Register scratch, |
4155 Register result) | 5415 Register result) |
4156 : result_(result), | 5416 : result_(result), |
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4345 index.reg(), | 5605 index.reg(), |
4346 scratch1.reg(), | 5606 scratch1.reg(), |
4347 scratch2.reg(), | 5607 scratch2.reg(), |
4348 result.reg()); | 5608 result.reg()); |
4349 deferred->fast_case_generator()->GenerateFast(masm_); | 5609 deferred->fast_case_generator()->GenerateFast(masm_); |
4350 deferred->BindExit(); | 5610 deferred->BindExit(); |
4351 frame_->Push(&result); | 5611 frame_->Push(&result); |
4352 } | 5612 } |
4353 | 5613 |
4354 | 5614 |
4355 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { | 5615 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { |
4356 ASSERT(args->length() == 1); | |
4357 Load(args->at(0)); | |
4358 Result value = frame_->Pop(); | |
4359 value.ToRegister(); | |
4360 ASSERT(value.is_valid()); | |
4361 Condition positive_smi = masm_->CheckPositiveSmi(value.reg()); | |
4362 value.Unuse(); | |
4363 destination()->Split(positive_smi); | |
4364 } | |
4365 | |
4366 | |
4367 // Generates the Math.pow method. Only handles special cases and | |
4368 // branches to the runtime system for everything else. Please note | |
4369 // that this function assumes that the callsite has executed ToNumber | |
4370 // on both arguments. | |
4371 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { | |
4372 ASSERT(args->length() == 2); | |
4373 Load(args->at(0)); | |
4374 Load(args->at(1)); | |
4375 | |
4376 Label allocate_return; | |
4377 // Load the two operands while leaving the values on the frame. | |
4378 frame()->Dup(); | |
4379 Result exponent = frame()->Pop(); | |
4380 exponent.ToRegister(); | |
4381 frame()->Spill(exponent.reg()); | |
4382 frame()->PushElementAt(1); | |
4383 Result base = frame()->Pop(); | |
4384 base.ToRegister(); | |
4385 frame()->Spill(base.reg()); | |
4386 | |
4387 Result answer = allocator()->Allocate(); | |
4388 ASSERT(answer.is_valid()); | |
4389 ASSERT(!exponent.reg().is(base.reg())); | |
4390 JumpTarget call_runtime; | |
4391 | |
4392 // Save 1 in xmm3 - we need this several times later on. | |
4393 __ movl(answer.reg(), Immediate(1)); | |
4394 __ cvtlsi2sd(xmm3, answer.reg()); | |
4395 | |
4396 Label exponent_nonsmi; | |
4397 Label base_nonsmi; | |
4398 // If the exponent is a heap number go to that specific case. | |
4399 __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi); | |
4400 __ JumpIfNotSmi(base.reg(), &base_nonsmi); | |
4401 | |
4402 // Optimized version when y is an integer. | |
4403 Label powi; | |
4404 __ SmiToInteger32(base.reg(), base.reg()); | |
4405 __ cvtlsi2sd(xmm0, base.reg()); | |
4406 __ jmp(&powi); | |
4407 // exponent is smi and base is a heapnumber. | |
4408 __ bind(&base_nonsmi); | |
4409 __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), | |
4410 Heap::kHeapNumberMapRootIndex); | |
4411 call_runtime.Branch(not_equal); | |
4412 | |
4413 __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); | |
4414 | |
4415 // Optimized version of pow if y is an integer. | |
4416 __ bind(&powi); | |
4417 __ SmiToInteger32(exponent.reg(), exponent.reg()); | |
4418 | |
4419 // Save exponent in base as we need to check if exponent is negative later. | |
4420 // We know that base and exponent are in different registers. | |
4421 __ movl(base.reg(), exponent.reg()); | |
4422 | |
4423 // Get absolute value of exponent. | |
4424 Label no_neg; | |
4425 __ cmpl(exponent.reg(), Immediate(0)); | |
4426 __ j(greater_equal, &no_neg); | |
4427 __ negl(exponent.reg()); | |
4428 __ bind(&no_neg); | |
4429 | |
4430 // Load xmm1 with 1. | |
4431 __ movsd(xmm1, xmm3); | |
4432 Label while_true; | |
4433 Label no_multiply; | |
4434 | |
4435 __ bind(&while_true); | |
4436 __ shrl(exponent.reg(), Immediate(1)); | |
4437 __ j(not_carry, &no_multiply); | |
4438 __ mulsd(xmm1, xmm0); | |
4439 __ bind(&no_multiply); | |
4440 __ testl(exponent.reg(), exponent.reg()); | |
4441 __ mulsd(xmm0, xmm0); | |
4442 __ j(not_zero, &while_true); | |
4443 | |
4444 // x has the original value of y - if y is negative return 1/result. | |
4445 __ testl(base.reg(), base.reg()); | |
4446 __ j(positive, &allocate_return); | |
4447 // Special case if xmm1 has reached infinity. | |
4448 __ movl(answer.reg(), Immediate(0x7FB00000)); | |
4449 __ movd(xmm0, answer.reg()); | |
4450 __ cvtss2sd(xmm0, xmm0); | |
4451 __ ucomisd(xmm0, xmm1); | |
4452 call_runtime.Branch(equal); | |
4453 __ divsd(xmm3, xmm1); | |
4454 __ movsd(xmm1, xmm3); | |
4455 __ jmp(&allocate_return); | |
4456 | |
4457 // exponent (or both) is a heapnumber - no matter what we should now work | |
4458 // on doubles. | |
4459 __ bind(&exponent_nonsmi); | |
4460 __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset), | |
4461 Heap::kHeapNumberMapRootIndex); | |
4462 call_runtime.Branch(not_equal); | |
4463 __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset)); | |
4464 // Test if exponent is nan. | |
4465 __ ucomisd(xmm1, xmm1); | |
4466 call_runtime.Branch(parity_even); | |
4467 | |
4468 Label base_not_smi; | |
4469 Label handle_special_cases; | |
4470 __ JumpIfNotSmi(base.reg(), &base_not_smi); | |
4471 __ SmiToInteger32(base.reg(), base.reg()); | |
4472 __ cvtlsi2sd(xmm0, base.reg()); | |
4473 __ jmp(&handle_special_cases); | |
4474 __ bind(&base_not_smi); | |
4475 __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), | |
4476 Heap::kHeapNumberMapRootIndex); | |
4477 call_runtime.Branch(not_equal); | |
4478 __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset)); | |
4479 __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask)); | |
4480 __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask)); | |
4481 // base is NaN or +/-Infinity | |
4482 call_runtime.Branch(greater_equal); | |
4483 __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); | |
4484 | |
4485 // base is in xmm0 and exponent is in xmm1. | |
4486 __ bind(&handle_special_cases); | |
4487 Label not_minus_half; | |
4488 // Test for -0.5. | |
4489 // Load xmm2 with -0.5. | |
4490 __ movl(answer.reg(), Immediate(0xBF000000)); | |
4491 __ movd(xmm2, answer.reg()); | |
4492 __ cvtss2sd(xmm2, xmm2); | |
4493 // xmm2 now has -0.5. | |
4494 __ ucomisd(xmm2, xmm1); | |
4495 __ j(not_equal, ¬_minus_half); | |
4496 | |
4497 // Calculates reciprocal of square root. | |
4498 // Note that 1/sqrt(x) = sqrt(1/x)) | |
4499 __ divsd(xmm3, xmm0); | |
4500 __ movsd(xmm1, xmm3); | |
4501 __ sqrtsd(xmm1, xmm1); | |
4502 __ jmp(&allocate_return); | |
4503 | |
4504 // Test for 0.5. | |
4505 __ bind(¬_minus_half); | |
4506 // Load xmm2 with 0.5. | |
4507 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. | |
4508 __ addsd(xmm2, xmm3); | |
4509 // xmm2 now has 0.5. | |
4510 __ ucomisd(xmm2, xmm1); | |
4511 call_runtime.Branch(not_equal); | |
4512 | |
4513 // Calculates square root. | |
4514 __ movsd(xmm1, xmm0); | |
4515 __ sqrtsd(xmm1, xmm1); | |
4516 | |
4517 JumpTarget done; | |
4518 Label failure, success; | |
4519 __ bind(&allocate_return); | |
4520 // Make a copy of the frame to enable us to handle allocation | |
4521 // failure after the JumpTarget jump. | |
4522 VirtualFrame* clone = new VirtualFrame(frame()); | |
4523 __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure); | |
4524 __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1); | |
4525 // Remove the two original values from the frame - we only need those | |
4526 // in the case where we branch to runtime. | |
4527 frame()->Drop(2); | |
4528 exponent.Unuse(); | |
4529 base.Unuse(); | |
4530 done.Jump(&answer); | |
4531 // Use the copy of the original frame as our current frame. | |
4532 RegisterFile empty_regs; | |
4533 SetFrame(clone, &empty_regs); | |
4534 // If we experience an allocation failure we branch to runtime. | |
4535 __ bind(&failure); | |
4536 call_runtime.Bind(); | |
4537 answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2); | |
4538 | |
4539 done.Bind(&answer); | |
4540 frame()->Push(&answer); | |
4541 } | |
4542 | |
4543 | |
4544 // Generates the Math.sqrt method. Please note - this function assumes that | |
4545 // the callsite has executed ToNumber on the argument. | |
4546 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { | |
4547 ASSERT(args->length() == 1); | |
4548 Load(args->at(0)); | |
4549 | |
4550 // Leave original value on the frame if we need to call runtime. | |
4551 frame()->Dup(); | |
4552 Result result = frame()->Pop(); | |
4553 result.ToRegister(); | |
4554 frame()->Spill(result.reg()); | |
4555 Label runtime; | |
4556 Label non_smi; | |
4557 Label load_done; | |
4558 JumpTarget end; | |
4559 | |
4560 __ JumpIfNotSmi(result.reg(), &non_smi); | |
4561 __ SmiToInteger32(result.reg(), result.reg()); | |
4562 __ cvtlsi2sd(xmm0, result.reg()); | |
4563 __ jmp(&load_done); | |
4564 __ bind(&non_smi); | |
4565 __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset), | |
4566 Heap::kHeapNumberMapRootIndex); | |
4567 __ j(not_equal, &runtime); | |
4568 __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset)); | |
4569 | |
4570 __ bind(&load_done); | |
4571 __ sqrtsd(xmm0, xmm0); | |
4572 // A copy of the virtual frame to allow us to go to runtime after the | |
4573 // JumpTarget jump. | |
4574 Result scratch = allocator()->Allocate(); | |
4575 VirtualFrame* clone = new VirtualFrame(frame()); | |
4576 __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime); | |
4577 | |
4578 __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0); | |
4579 frame()->Drop(1); | |
4580 scratch.Unuse(); | |
4581 end.Jump(&result); | |
4582 // We only branch to runtime if we have an allocation error. | |
4583 // Use the copy of the original frame as our current frame. | |
4584 RegisterFile empty_regs; | |
4585 SetFrame(clone, &empty_regs); | |
4586 __ bind(&runtime); | |
4587 result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); | |
4588 | |
4589 end.Bind(&result); | |
4590 frame()->Push(&result); | |
4591 } | |
4592 | |
4593 | |
4594 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { | |
4595 ASSERT(args->length() == 1); | 5616 ASSERT(args->length() == 1); |
4596 Load(args->at(0)); | 5617 Load(args->at(0)); |
4597 Result value = frame_->Pop(); | 5618 Result value = frame_->Pop(); |
4598 value.ToRegister(); | 5619 value.ToRegister(); |
4599 ASSERT(value.is_valid()); | 5620 ASSERT(value.is_valid()); |
4600 Condition is_smi = masm_->CheckSmi(value.reg()); | 5621 Condition is_smi = masm_->CheckSmi(value.reg()); |
| 5622 destination()->false_target()->Branch(is_smi); |
| 5623 // It is a heap object - get map. |
| 5624 // Check if the object is a JS array or not. |
| 5625 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister); |
4601 value.Unuse(); | 5626 value.Unuse(); |
4602 destination()->Split(is_smi); | 5627 destination()->Split(equal); |
4603 } | 5628 } |
4604 | 5629 |
4605 | 5630 |
4606 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { | 5631 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) { |
4607 // Conditionally generate a log call. | 5632 ASSERT(args->length() == 1); |
4608 // Args: | 5633 Load(args->at(0)); |
4609 // 0 (literal string): The type of logging (corresponds to the flags). | 5634 Result value = frame_->Pop(); |
4610 // This is used to determine whether or not to generate the log call. | 5635 value.ToRegister(); |
4611 // 1 (string): Format string. Access the string at argument index 2 | 5636 ASSERT(value.is_valid()); |
4612 // with '%2s' (see Logger::LogRuntime for all the formats). | 5637 Condition is_smi = masm_->CheckSmi(value.reg()); |
4613 // 2 (array): Arguments to the format string. | 5638 destination()->false_target()->Branch(is_smi); |
4614 ASSERT_EQ(args->length(), 3); | 5639 // It is a heap object - get map. |
4615 #ifdef ENABLE_LOGGING_AND_PROFILING | 5640 // Check if the object is a regexp. |
4616 if (ShouldGenerateLog(args->at(0))) { | 5641 __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister); |
4617 Load(args->at(1)); | 5642 value.Unuse(); |
4618 Load(args->at(2)); | 5643 destination()->Split(equal); |
4619 frame_->CallRuntime(Runtime::kLog, 2); | 5644 } |
| 5645 |
| 5646 |
| 5647 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { |
| 5648 // This generates a fast version of: |
| 5649 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') |
| 5650 ASSERT(args->length() == 1); |
| 5651 Load(args->at(0)); |
| 5652 Result obj = frame_->Pop(); |
| 5653 obj.ToRegister(); |
| 5654 Condition is_smi = masm_->CheckSmi(obj.reg()); |
| 5655 destination()->false_target()->Branch(is_smi); |
| 5656 |
| 5657 __ Move(kScratchRegister, Factory::null_value()); |
| 5658 __ cmpq(obj.reg(), kScratchRegister); |
| 5659 destination()->true_target()->Branch(equal); |
| 5660 |
| 5661 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); |
| 5662 // Undetectable objects behave like undefined when tested with typeof. |
| 5663 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), |
| 5664 Immediate(1 << Map::kIsUndetectable)); |
| 5665 destination()->false_target()->Branch(not_zero); |
| 5666 __ movzxbq(kScratchRegister, |
| 5667 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); |
| 5668 __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE)); |
| 5669 destination()->false_target()->Branch(below); |
| 5670 __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE)); |
| 5671 obj.Unuse(); |
| 5672 destination()->Split(below_equal); |
| 5673 } |
| 5674 |
| 5675 |
| 5676 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) { |
| 5677 // This generates a fast version of: |
| 5678 // (%_ClassOf(arg) === 'Function') |
| 5679 ASSERT(args->length() == 1); |
| 5680 Load(args->at(0)); |
| 5681 Result obj = frame_->Pop(); |
| 5682 obj.ToRegister(); |
| 5683 Condition is_smi = masm_->CheckSmi(obj.reg()); |
| 5684 destination()->false_target()->Branch(is_smi); |
| 5685 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); |
| 5686 obj.Unuse(); |
| 5687 destination()->Split(equal); |
| 5688 } |
| 5689 |
| 5690 |
| 5691 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) { |
| 5692 ASSERT(args->length() == 1); |
| 5693 Load(args->at(0)); |
| 5694 Result obj = frame_->Pop(); |
| 5695 obj.ToRegister(); |
| 5696 Condition is_smi = masm_->CheckSmi(obj.reg()); |
| 5697 destination()->false_target()->Branch(is_smi); |
| 5698 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); |
| 5699 __ movzxbl(kScratchRegister, |
| 5700 FieldOperand(kScratchRegister, Map::kBitFieldOffset)); |
| 5701 __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable)); |
| 5702 obj.Unuse(); |
| 5703 destination()->Split(not_zero); |
| 5704 } |
| 5705 |
| 5706 |
| 5707 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { |
| 5708 ASSERT(args->length() == 0); |
| 5709 |
| 5710 // Get the frame pointer for the calling frame. |
| 5711 Result fp = allocator()->Allocate(); |
| 5712 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| 5713 |
| 5714 // Skip the arguments adaptor frame if it exists. |
| 5715 Label check_frame_marker; |
| 5716 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset), |
| 5717 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
| 5718 __ j(not_equal, &check_frame_marker); |
| 5719 __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); |
| 5720 |
| 5721 // Check the marker in the calling frame. |
| 5722 __ bind(&check_frame_marker); |
| 5723 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset), |
| 5724 Smi::FromInt(StackFrame::CONSTRUCT)); |
| 5725 fp.Unuse(); |
| 5726 destination()->Split(equal); |
| 5727 } |
| 5728 |
| 5729 |
| 5730 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { |
| 5731 ASSERT(args->length() == 0); |
| 5732 |
| 5733 Result fp = allocator_->Allocate(); |
| 5734 Result result = allocator_->Allocate(); |
| 5735 ASSERT(fp.is_valid() && result.is_valid()); |
| 5736 |
| 5737 Label exit; |
| 5738 |
| 5739 // Get the number of formal parameters. |
| 5740 __ Move(result.reg(), Smi::FromInt(scope()->num_parameters())); |
| 5741 |
| 5742 // Check if the calling frame is an arguments adaptor frame. |
| 5743 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| 5744 __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset), |
| 5745 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
| 5746 __ j(not_equal, &exit); |
| 5747 |
| 5748 // Arguments adaptor case: Read the arguments length from the |
| 5749 // adaptor frame. |
| 5750 __ movq(result.reg(), |
| 5751 Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 5752 |
| 5753 __ bind(&exit); |
| 5754 result.set_type_info(TypeInfo::Smi()); |
| 5755 if (FLAG_debug_code) { |
| 5756 __ AbortIfNotSmi(result.reg()); |
4620 } | 5757 } |
4621 #endif | 5758 frame_->Push(&result); |
4622 // Finally, we're expected to leave a value on the top of the stack. | 5759 } |
4623 frame_->Push(Factory::undefined_value()); | 5760 |
4624 } | 5761 |
4625 | 5762 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { |
4626 | 5763 ASSERT(args->length() == 1); |
| 5764 JumpTarget leave, null, function, non_function_constructor; |
| 5765 Load(args->at(0)); // Load the object. |
| 5766 Result obj = frame_->Pop(); |
| 5767 obj.ToRegister(); |
| 5768 frame_->Spill(obj.reg()); |
| 5769 |
| 5770 // If the object is a smi, we return null. |
| 5771 Condition is_smi = masm_->CheckSmi(obj.reg()); |
| 5772 null.Branch(is_smi); |
| 5773 |
| 5774 // Check that the object is a JS object but take special care of JS |
| 5775 // functions to make sure they have 'Function' as their class. |
| 5776 |
| 5777 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg()); |
| 5778 null.Branch(below); |
| 5779 |
| 5780 // As long as JS_FUNCTION_TYPE is the last instance type and it is |
| 5781 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for |
| 5782 // LAST_JS_OBJECT_TYPE. |
| 5783 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); |
| 5784 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); |
| 5785 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE); |
| 5786 function.Branch(equal); |
| 5787 |
| 5788 // Check if the constructor in the map is a function. |
| 5789 __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset)); |
| 5790 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); |
| 5791 non_function_constructor.Branch(not_equal); |
| 5792 |
| 5793 // The obj register now contains the constructor function. Grab the |
| 5794 // instance class name from there. |
| 5795 __ movq(obj.reg(), |
| 5796 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset)); |
| 5797 __ movq(obj.reg(), |
| 5798 FieldOperand(obj.reg(), |
| 5799 SharedFunctionInfo::kInstanceClassNameOffset)); |
| 5800 frame_->Push(&obj); |
| 5801 leave.Jump(); |
| 5802 |
| 5803 // Functions have class 'Function'. |
| 5804 function.Bind(); |
| 5805 frame_->Push(Factory::function_class_symbol()); |
| 5806 leave.Jump(); |
| 5807 |
| 5808 // Objects with a non-function constructor have class 'Object'. |
| 5809 non_function_constructor.Bind(); |
| 5810 frame_->Push(Factory::Object_symbol()); |
| 5811 leave.Jump(); |
| 5812 |
| 5813 // Non-JS objects have class null. |
| 5814 null.Bind(); |
| 5815 frame_->Push(Factory::null_value()); |
| 5816 |
| 5817 // All done. |
| 5818 leave.Bind(); |
| 5819 } |
| 5820 |
| 5821 |
| 5822 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { |
| 5823 ASSERT(args->length() == 1); |
| 5824 JumpTarget leave; |
| 5825 Load(args->at(0)); // Load the object. |
| 5826 frame_->Dup(); |
| 5827 Result object = frame_->Pop(); |
| 5828 object.ToRegister(); |
| 5829 ASSERT(object.is_valid()); |
| 5830 // if (object->IsSmi()) return object. |
| 5831 Condition is_smi = masm_->CheckSmi(object.reg()); |
| 5832 leave.Branch(is_smi); |
| 5833 // It is a heap object - get map. |
| 5834 Result temp = allocator()->Allocate(); |
| 5835 ASSERT(temp.is_valid()); |
| 5836 // if (!object->IsJSValue()) return object. |
| 5837 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg()); |
| 5838 leave.Branch(not_equal); |
| 5839 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset)); |
| 5840 object.Unuse(); |
| 5841 frame_->SetElementAt(0, &temp); |
| 5842 leave.Bind(); |
| 5843 } |
| 5844 |
| 5845 |
| 5846 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) { |
| 5847 ASSERT(args->length() == 2); |
| 5848 JumpTarget leave; |
| 5849 Load(args->at(0)); // Load the object. |
| 5850 Load(args->at(1)); // Load the value. |
| 5851 Result value = frame_->Pop(); |
| 5852 Result object = frame_->Pop(); |
| 5853 value.ToRegister(); |
| 5854 object.ToRegister(); |
| 5855 |
| 5856 // if (object->IsSmi()) return value. |
| 5857 Condition is_smi = masm_->CheckSmi(object.reg()); |
| 5858 leave.Branch(is_smi, &value); |
| 5859 |
| 5860 // It is a heap object - get its map. |
| 5861 Result scratch = allocator_->Allocate(); |
| 5862 ASSERT(scratch.is_valid()); |
| 5863 // if (!object->IsJSValue()) return value. |
| 5864 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg()); |
| 5865 leave.Branch(not_equal, &value); |
| 5866 |
| 5867 // Store the value. |
| 5868 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg()); |
| 5869 // Update the write barrier. Save the value as it will be |
| 5870 // overwritten by the write barrier code and is needed afterward. |
| 5871 Result duplicate_value = allocator_->Allocate(); |
| 5872 ASSERT(duplicate_value.is_valid()); |
| 5873 __ movq(duplicate_value.reg(), value.reg()); |
| 5874 // The object register is also overwritten by the write barrier and |
| 5875 // possibly aliased in the frame. |
| 5876 frame_->Spill(object.reg()); |
| 5877 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(), |
| 5878 scratch.reg()); |
| 5879 object.Unuse(); |
| 5880 scratch.Unuse(); |
| 5881 duplicate_value.Unuse(); |
| 5882 |
| 5883 // Leave. |
| 5884 leave.Bind(&value); |
| 5885 frame_->Push(&value); |
| 5886 } |
| 5887 |
| 5888 |
| 5889 void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) { |
| 5890 ASSERT(args->length() == 1); |
| 5891 |
| 5892 // ArgumentsAccessStub expects the key in rdx and the formal |
| 5893 // parameter count in rax. |
| 5894 Load(args->at(0)); |
| 5895 Result key = frame_->Pop(); |
| 5896 // Explicitly create a constant result. |
| 5897 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters()))); |
| 5898 // Call the shared stub to get to arguments[key]. |
| 5899 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); |
| 5900 Result result = frame_->CallStub(&stub, &key, &count); |
| 5901 frame_->Push(&result); |
| 5902 } |
| 5903 |
| 5904 |
4627 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) { | 5905 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) { |
4628 ASSERT(args->length() == 2); | 5906 ASSERT(args->length() == 2); |
4629 | 5907 |
4630 // Load the two objects into registers and perform the comparison. | 5908 // Load the two objects into registers and perform the comparison. |
4631 Load(args->at(0)); | 5909 Load(args->at(0)); |
4632 Load(args->at(1)); | 5910 Load(args->at(1)); |
4633 Result right = frame_->Pop(); | 5911 Result right = frame_->Pop(); |
4634 Result left = frame_->Pop(); | 5912 Result left = frame_->Pop(); |
4635 right.ToRegister(); | 5913 right.ToRegister(); |
4636 left.ToRegister(); | 5914 left.ToRegister(); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4685 __ xorpd(xmm0, xmm1); | 5963 __ xorpd(xmm0, xmm1); |
4686 __ subsd(xmm0, xmm1); | 5964 __ subsd(xmm0, xmm1); |
4687 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); | 5965 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); |
4688 | 5966 |
4689 __ movq(rax, rbx); | 5967 __ movq(rax, rbx); |
4690 Result result = allocator_->Allocate(rax); | 5968 Result result = allocator_->Allocate(rax); |
4691 frame_->Push(&result); | 5969 frame_->Push(&result); |
4692 } | 5970 } |
4693 | 5971 |
4694 | 5972 |
| 5973 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) { |
| 5974 ASSERT_EQ(2, args->length()); |
| 5975 |
| 5976 Load(args->at(0)); |
| 5977 Load(args->at(1)); |
| 5978 |
| 5979 StringAddStub stub(NO_STRING_ADD_FLAGS); |
| 5980 Result answer = frame_->CallStub(&stub, 2); |
| 5981 frame_->Push(&answer); |
| 5982 } |
| 5983 |
| 5984 |
| 5985 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) { |
| 5986 ASSERT_EQ(3, args->length()); |
| 5987 |
| 5988 Load(args->at(0)); |
| 5989 Load(args->at(1)); |
| 5990 Load(args->at(2)); |
| 5991 |
| 5992 SubStringStub stub; |
| 5993 Result answer = frame_->CallStub(&stub, 3); |
| 5994 frame_->Push(&answer); |
| 5995 } |
| 5996 |
| 5997 |
| 5998 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) { |
| 5999 ASSERT_EQ(2, args->length()); |
| 6000 |
| 6001 Load(args->at(0)); |
| 6002 Load(args->at(1)); |
| 6003 |
| 6004 StringCompareStub stub; |
| 6005 Result answer = frame_->CallStub(&stub, 2); |
| 6006 frame_->Push(&answer); |
| 6007 } |
| 6008 |
| 6009 |
4695 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) { | 6010 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) { |
4696 ASSERT_EQ(args->length(), 4); | 6011 ASSERT_EQ(args->length(), 4); |
4697 | 6012 |
4698 // Load the arguments on the stack and call the runtime system. | 6013 // Load the arguments on the stack and call the runtime system. |
4699 Load(args->at(0)); | 6014 Load(args->at(0)); |
4700 Load(args->at(1)); | 6015 Load(args->at(1)); |
4701 Load(args->at(2)); | 6016 Load(args->at(2)); |
4702 Load(args->at(3)); | 6017 Load(args->at(3)); |
4703 RegExpExecStub stub; | 6018 RegExpExecStub stub; |
4704 Result result = frame_->CallStub(&stub, 4); | 6019 Result result = frame_->CallStub(&stub, 4); |
(...skipping 421 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5126 Load(args->at(0)); // receiver | 6441 Load(args->at(0)); // receiver |
5127 for (int i = 0; i < n_args; i++) { | 6442 for (int i = 0; i < n_args; i++) { |
5128 Load(args->at(i + 1)); | 6443 Load(args->at(i + 1)); |
5129 } | 6444 } |
5130 Load(args->at(n_args + 1)); // function | 6445 Load(args->at(n_args + 1)); // function |
5131 Result result = frame_->CallJSFunction(n_args); | 6446 Result result = frame_->CallJSFunction(n_args); |
5132 frame_->Push(&result); | 6447 frame_->Push(&result); |
5133 } | 6448 } |
5134 | 6449 |
5135 | 6450 |
| 6451 // Generates the Math.pow method. Only handles special cases and |
| 6452 // branches to the runtime system for everything else. Please note |
| 6453 // that this function assumes that the callsite has executed ToNumber |
| 6454 // on both arguments. |
| 6455 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { |
| 6456 ASSERT(args->length() == 2); |
| 6457 Load(args->at(0)); |
| 6458 Load(args->at(1)); |
| 6459 |
| 6460 Label allocate_return; |
| 6461 // Load the two operands while leaving the values on the frame. |
| 6462 frame()->Dup(); |
| 6463 Result exponent = frame()->Pop(); |
| 6464 exponent.ToRegister(); |
| 6465 frame()->Spill(exponent.reg()); |
| 6466 frame()->PushElementAt(1); |
| 6467 Result base = frame()->Pop(); |
| 6468 base.ToRegister(); |
| 6469 frame()->Spill(base.reg()); |
| 6470 |
| 6471 Result answer = allocator()->Allocate(); |
| 6472 ASSERT(answer.is_valid()); |
| 6473 ASSERT(!exponent.reg().is(base.reg())); |
| 6474 JumpTarget call_runtime; |
| 6475 |
| 6476 // Save 1 in xmm3 - we need this several times later on. |
| 6477 __ movl(answer.reg(), Immediate(1)); |
| 6478 __ cvtlsi2sd(xmm3, answer.reg()); |
| 6479 |
| 6480 Label exponent_nonsmi; |
| 6481 Label base_nonsmi; |
| 6482 // If the exponent is a heap number go to that specific case. |
| 6483 __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi); |
| 6484 __ JumpIfNotSmi(base.reg(), &base_nonsmi); |
| 6485 |
| 6486 // Optimized version when y is an integer. |
| 6487 Label powi; |
| 6488 __ SmiToInteger32(base.reg(), base.reg()); |
| 6489 __ cvtlsi2sd(xmm0, base.reg()); |
| 6490 __ jmp(&powi); |
| 6491 // exponent is smi and base is a heapnumber. |
| 6492 __ bind(&base_nonsmi); |
| 6493 __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), |
| 6494 Heap::kHeapNumberMapRootIndex); |
| 6495 call_runtime.Branch(not_equal); |
| 6496 |
| 6497 __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); |
| 6498 |
| 6499 // Optimized version of pow if y is an integer. |
| 6500 __ bind(&powi); |
| 6501 __ SmiToInteger32(exponent.reg(), exponent.reg()); |
| 6502 |
| 6503 // Save exponent in base as we need to check if exponent is negative later. |
| 6504 // We know that base and exponent are in different registers. |
| 6505 __ movl(base.reg(), exponent.reg()); |
| 6506 |
| 6507 // Get absolute value of exponent. |
| 6508 Label no_neg; |
| 6509 __ cmpl(exponent.reg(), Immediate(0)); |
| 6510 __ j(greater_equal, &no_neg); |
| 6511 __ negl(exponent.reg()); |
| 6512 __ bind(&no_neg); |
| 6513 |
| 6514 // Load xmm1 with 1. |
| 6515 __ movsd(xmm1, xmm3); |
| 6516 Label while_true; |
| 6517 Label no_multiply; |
| 6518 |
| 6519 __ bind(&while_true); |
| 6520 __ shrl(exponent.reg(), Immediate(1)); |
| 6521 __ j(not_carry, &no_multiply); |
| 6522 __ mulsd(xmm1, xmm0); |
| 6523 __ bind(&no_multiply); |
| 6524 __ testl(exponent.reg(), exponent.reg()); |
| 6525 __ mulsd(xmm0, xmm0); |
| 6526 __ j(not_zero, &while_true); |
| 6527 |
| 6528 // x has the original value of y - if y is negative return 1/result. |
| 6529 __ testl(base.reg(), base.reg()); |
| 6530 __ j(positive, &allocate_return); |
| 6531 // Special case if xmm1 has reached infinity. |
| 6532 __ movl(answer.reg(), Immediate(0x7FB00000)); |
| 6533 __ movd(xmm0, answer.reg()); |
| 6534 __ cvtss2sd(xmm0, xmm0); |
| 6535 __ ucomisd(xmm0, xmm1); |
| 6536 call_runtime.Branch(equal); |
| 6537 __ divsd(xmm3, xmm1); |
| 6538 __ movsd(xmm1, xmm3); |
| 6539 __ jmp(&allocate_return); |
| 6540 |
| 6541 // exponent (or both) is a heapnumber - no matter what we should now work |
| 6542 // on doubles. |
| 6543 __ bind(&exponent_nonsmi); |
| 6544 __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset), |
| 6545 Heap::kHeapNumberMapRootIndex); |
| 6546 call_runtime.Branch(not_equal); |
| 6547 __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset)); |
| 6548 // Test if exponent is nan. |
| 6549 __ ucomisd(xmm1, xmm1); |
| 6550 call_runtime.Branch(parity_even); |
| 6551 |
| 6552 Label base_not_smi; |
| 6553 Label handle_special_cases; |
| 6554 __ JumpIfNotSmi(base.reg(), &base_not_smi); |
| 6555 __ SmiToInteger32(base.reg(), base.reg()); |
| 6556 __ cvtlsi2sd(xmm0, base.reg()); |
| 6557 __ jmp(&handle_special_cases); |
| 6558 __ bind(&base_not_smi); |
| 6559 __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), |
| 6560 Heap::kHeapNumberMapRootIndex); |
| 6561 call_runtime.Branch(not_equal); |
| 6562 __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset)); |
| 6563 __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask)); |
| 6564 __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask)); |
| 6565 // base is NaN or +/-Infinity |
| 6566 call_runtime.Branch(greater_equal); |
| 6567 __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); |
| 6568 |
| 6569 // base is in xmm0 and exponent is in xmm1. |
| 6570 __ bind(&handle_special_cases); |
| 6571 Label not_minus_half; |
| 6572 // Test for -0.5. |
| 6573 // Load xmm2 with -0.5. |
| 6574 __ movl(answer.reg(), Immediate(0xBF000000)); |
| 6575 __ movd(xmm2, answer.reg()); |
| 6576 __ cvtss2sd(xmm2, xmm2); |
| 6577 // xmm2 now has -0.5. |
| 6578 __ ucomisd(xmm2, xmm1); |
| 6579 __ j(not_equal, ¬_minus_half); |
| 6580 |
| 6581 // Calculates reciprocal of square root. |
| 6582 // Note that 1/sqrt(x) = sqrt(1/x)) |
| 6583 __ divsd(xmm3, xmm0); |
| 6584 __ movsd(xmm1, xmm3); |
| 6585 __ sqrtsd(xmm1, xmm1); |
| 6586 __ jmp(&allocate_return); |
| 6587 |
| 6588 // Test for 0.5. |
| 6589 __ bind(¬_minus_half); |
| 6590 // Load xmm2 with 0.5. |
| 6591 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. |
| 6592 __ addsd(xmm2, xmm3); |
| 6593 // xmm2 now has 0.5. |
| 6594 __ ucomisd(xmm2, xmm1); |
| 6595 call_runtime.Branch(not_equal); |
| 6596 |
| 6597 // Calculates square root. |
| 6598 __ movsd(xmm1, xmm0); |
| 6599 __ sqrtsd(xmm1, xmm1); |
| 6600 |
| 6601 JumpTarget done; |
| 6602 Label failure, success; |
| 6603 __ bind(&allocate_return); |
| 6604 // Make a copy of the frame to enable us to handle allocation |
| 6605 // failure after the JumpTarget jump. |
| 6606 VirtualFrame* clone = new VirtualFrame(frame()); |
| 6607 __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure); |
| 6608 __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1); |
| 6609 // Remove the two original values from the frame - we only need those |
| 6610 // in the case where we branch to runtime. |
| 6611 frame()->Drop(2); |
| 6612 exponent.Unuse(); |
| 6613 base.Unuse(); |
| 6614 done.Jump(&answer); |
| 6615 // Use the copy of the original frame as our current frame. |
| 6616 RegisterFile empty_regs; |
| 6617 SetFrame(clone, &empty_regs); |
| 6618 // If we experience an allocation failure we branch to runtime. |
| 6619 __ bind(&failure); |
| 6620 call_runtime.Bind(); |
| 6621 answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2); |
| 6622 |
| 6623 done.Bind(&answer); |
| 6624 frame()->Push(&answer); |
| 6625 } |
| 6626 |
| 6627 |
5136 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { | 6628 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { |
5137 ASSERT_EQ(args->length(), 1); | 6629 ASSERT_EQ(args->length(), 1); |
5138 Load(args->at(0)); | 6630 Load(args->at(0)); |
5139 TranscendentalCacheStub stub(TranscendentalCache::SIN); | 6631 TranscendentalCacheStub stub(TranscendentalCache::SIN); |
5140 Result result = frame_->CallStub(&stub, 1); | 6632 Result result = frame_->CallStub(&stub, 1); |
5141 frame_->Push(&result); | 6633 frame_->Push(&result); |
5142 } | 6634 } |
5143 | 6635 |
5144 | 6636 |
5145 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { | 6637 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { |
5146 ASSERT_EQ(args->length(), 1); | 6638 ASSERT_EQ(args->length(), 1); |
5147 Load(args->at(0)); | 6639 Load(args->at(0)); |
5148 TranscendentalCacheStub stub(TranscendentalCache::COS); | 6640 TranscendentalCacheStub stub(TranscendentalCache::COS); |
5149 Result result = frame_->CallStub(&stub, 1); | 6641 Result result = frame_->CallStub(&stub, 1); |
5150 frame_->Push(&result); | 6642 frame_->Push(&result); |
5151 } | 6643 } |
5152 | 6644 |
5153 | 6645 |
5154 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) { | 6646 // Generates the Math.sqrt method. Please note - this function assumes that |
5155 ASSERT_EQ(2, args->length()); | 6647 // the callsite has executed ToNumber on the argument. |
5156 | 6648 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { |
| 6649 ASSERT(args->length() == 1); |
5157 Load(args->at(0)); | 6650 Load(args->at(0)); |
5158 Load(args->at(1)); | 6651 |
5159 | 6652 // Leave original value on the frame if we need to call runtime. |
5160 StringAddStub stub(NO_STRING_ADD_FLAGS); | 6653 frame()->Dup(); |
5161 Result answer = frame_->CallStub(&stub, 2); | 6654 Result result = frame()->Pop(); |
5162 frame_->Push(&answer); | 6655 result.ToRegister(); |
5163 } | 6656 frame()->Spill(result.reg()); |
5164 | 6657 Label runtime; |
5165 | 6658 Label non_smi; |
5166 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) { | 6659 Label load_done; |
5167 ASSERT_EQ(3, args->length()); | 6660 JumpTarget end; |
5168 | 6661 |
5169 Load(args->at(0)); | 6662 __ JumpIfNotSmi(result.reg(), &non_smi); |
5170 Load(args->at(1)); | 6663 __ SmiToInteger32(result.reg(), result.reg()); |
5171 Load(args->at(2)); | 6664 __ cvtlsi2sd(xmm0, result.reg()); |
5172 | 6665 __ jmp(&load_done); |
5173 SubStringStub stub; | 6666 __ bind(&non_smi); |
5174 Result answer = frame_->CallStub(&stub, 3); | 6667 __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset), |
5175 frame_->Push(&answer); | 6668 Heap::kHeapNumberMapRootIndex); |
5176 } | 6669 __ j(not_equal, &runtime); |
5177 | 6670 __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset)); |
5178 | 6671 |
5179 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) { | 6672 __ bind(&load_done); |
5180 ASSERT_EQ(2, args->length()); | 6673 __ sqrtsd(xmm0, xmm0); |
5181 | 6674 // A copy of the virtual frame to allow us to go to runtime after the |
5182 Load(args->at(0)); | 6675 // JumpTarget jump. |
5183 Load(args->at(1)); | 6676 Result scratch = allocator()->Allocate(); |
5184 | 6677 VirtualFrame* clone = new VirtualFrame(frame()); |
5185 StringCompareStub stub; | 6678 __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime); |
5186 Result answer = frame_->CallStub(&stub, 2); | 6679 |
5187 frame_->Push(&answer); | 6680 __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0); |
5188 } | 6681 frame()->Drop(1); |
5189 | |
5190 | |
5191 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { | |
5192 ASSERT(args->length() == 1); | |
5193 JumpTarget leave, null, function, non_function_constructor; | |
5194 Load(args->at(0)); // Load the object. | |
5195 Result obj = frame_->Pop(); | |
5196 obj.ToRegister(); | |
5197 frame_->Spill(obj.reg()); | |
5198 | |
5199 // If the object is a smi, we return null. | |
5200 Condition is_smi = masm_->CheckSmi(obj.reg()); | |
5201 null.Branch(is_smi); | |
5202 | |
5203 // Check that the object is a JS object but take special care of JS | |
5204 // functions to make sure they have 'Function' as their class. | |
5205 | |
5206 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg()); | |
5207 null.Branch(below); | |
5208 | |
5209 // As long as JS_FUNCTION_TYPE is the last instance type and it is | |
5210 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for | |
5211 // LAST_JS_OBJECT_TYPE. | |
5212 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | |
5213 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); | |
5214 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE); | |
5215 function.Branch(equal); | |
5216 | |
5217 // Check if the constructor in the map is a function. | |
5218 __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset)); | |
5219 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); | |
5220 non_function_constructor.Branch(not_equal); | |
5221 | |
5222 // The obj register now contains the constructor function. Grab the | |
5223 // instance class name from there. | |
5224 __ movq(obj.reg(), | |
5225 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset)); | |
5226 __ movq(obj.reg(), | |
5227 FieldOperand(obj.reg(), | |
5228 SharedFunctionInfo::kInstanceClassNameOffset)); | |
5229 frame_->Push(&obj); | |
5230 leave.Jump(); | |
5231 | |
5232 // Functions have class 'Function'. | |
5233 function.Bind(); | |
5234 frame_->Push(Factory::function_class_symbol()); | |
5235 leave.Jump(); | |
5236 | |
5237 // Objects with a non-function constructor have class 'Object'. | |
5238 non_function_constructor.Bind(); | |
5239 frame_->Push(Factory::Object_symbol()); | |
5240 leave.Jump(); | |
5241 | |
5242 // Non-JS objects have class null. | |
5243 null.Bind(); | |
5244 frame_->Push(Factory::null_value()); | |
5245 | |
5246 // All done. | |
5247 leave.Bind(); | |
5248 } | |
5249 | |
5250 | |
5251 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) { | |
5252 ASSERT(args->length() == 2); | |
5253 JumpTarget leave; | |
5254 Load(args->at(0)); // Load the object. | |
5255 Load(args->at(1)); // Load the value. | |
5256 Result value = frame_->Pop(); | |
5257 Result object = frame_->Pop(); | |
5258 value.ToRegister(); | |
5259 object.ToRegister(); | |
5260 | |
5261 // if (object->IsSmi()) return value. | |
5262 Condition is_smi = masm_->CheckSmi(object.reg()); | |
5263 leave.Branch(is_smi, &value); | |
5264 | |
5265 // It is a heap object - get its map. | |
5266 Result scratch = allocator_->Allocate(); | |
5267 ASSERT(scratch.is_valid()); | |
5268 // if (!object->IsJSValue()) return value. | |
5269 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg()); | |
5270 leave.Branch(not_equal, &value); | |
5271 | |
5272 // Store the value. | |
5273 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg()); | |
5274 // Update the write barrier. Save the value as it will be | |
5275 // overwritten by the write barrier code and is needed afterward. | |
5276 Result duplicate_value = allocator_->Allocate(); | |
5277 ASSERT(duplicate_value.is_valid()); | |
5278 __ movq(duplicate_value.reg(), value.reg()); | |
5279 // The object register is also overwritten by the write barrier and | |
5280 // possibly aliased in the frame. | |
5281 frame_->Spill(object.reg()); | |
5282 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(), | |
5283 scratch.reg()); | |
5284 object.Unuse(); | |
5285 scratch.Unuse(); | 6682 scratch.Unuse(); |
5286 duplicate_value.Unuse(); | 6683 end.Jump(&result); |
5287 | 6684 // We only branch to runtime if we have an allocation error. |
5288 // Leave. | 6685 // Use the copy of the original frame as our current frame. |
5289 leave.Bind(&value); | 6686 RegisterFile empty_regs; |
5290 frame_->Push(&value); | 6687 SetFrame(clone, &empty_regs); |
5291 } | 6688 __ bind(&runtime); |
5292 | 6689 result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); |
5293 | 6690 |
5294 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { | 6691 end.Bind(&result); |
5295 ASSERT(args->length() == 1); | 6692 frame()->Push(&result); |
5296 JumpTarget leave; | 6693 } |
5297 Load(args->at(0)); // Load the object. | 6694 |
5298 frame_->Dup(); | 6695 |
5299 Result object = frame_->Pop(); | 6696 void CodeGenerator::VisitCallRuntime(CallRuntime* node) { |
5300 object.ToRegister(); | 6697 if (CheckForInlineRuntimeCall(node)) { |
5301 ASSERT(object.is_valid()); | 6698 return; |
5302 // if (object->IsSmi()) return object. | 6699 } |
5303 Condition is_smi = masm_->CheckSmi(object.reg()); | 6700 |
5304 leave.Branch(is_smi); | 6701 ZoneList<Expression*>* args = node->arguments(); |
5305 // It is a heap object - get map. | 6702 Comment cmnt(masm_, "[ CallRuntime"); |
5306 Result temp = allocator()->Allocate(); | 6703 Runtime::Function* function = node->function(); |
5307 ASSERT(temp.is_valid()); | 6704 |
5308 // if (!object->IsJSValue()) return object. | 6705 if (function == NULL) { |
5309 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg()); | 6706 // Push the builtins object found in the current global object. |
5310 leave.Branch(not_equal); | 6707 Result temp = allocator()->Allocate(); |
5311 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset)); | 6708 ASSERT(temp.is_valid()); |
5312 object.Unuse(); | 6709 __ movq(temp.reg(), GlobalObject()); |
5313 frame_->SetElementAt(0, &temp); | 6710 __ movq(temp.reg(), |
5314 leave.Bind(); | 6711 FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset)); |
5315 } | 6712 frame_->Push(&temp); |
5316 | 6713 } |
5317 | 6714 |
5318 // ----------------------------------------------------------------------------- | 6715 // Push the arguments ("left-to-right"). |
5319 // CodeGenerator implementation of Expressions | 6716 int arg_count = args->length(); |
5320 | 6717 for (int i = 0; i < arg_count; i++) { |
5321 void CodeGenerator::LoadAndSpill(Expression* expression) { | 6718 Load(args->at(i)); |
5322 // TODO(x64): No architecture specific code. Move to shared location. | 6719 } |
5323 ASSERT(in_spilled_code()); | 6720 |
5324 set_in_spilled_code(false); | 6721 if (function == NULL) { |
5325 Load(expression); | 6722 // Call the JS runtime function. |
5326 frame_->SpillAll(); | 6723 frame_->Push(node->name()); |
5327 set_in_spilled_code(true); | 6724 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, |
5328 } | 6725 arg_count, |
5329 | 6726 loop_nesting_); |
5330 | 6727 frame_->RestoreContextRegister(); |
5331 void CodeGenerator::Load(Expression* expr) { | 6728 frame_->Push(&answer); |
5332 #ifdef DEBUG | 6729 } else { |
5333 int original_height = frame_->height(); | 6730 // Call the C runtime function. |
5334 #endif | 6731 Result answer = frame_->CallRuntime(function, arg_count); |
5335 ASSERT(!in_spilled_code()); | 6732 frame_->Push(&answer); |
5336 JumpTarget true_target; | 6733 } |
5337 JumpTarget false_target; | 6734 } |
5338 ControlDestination dest(&true_target, &false_target, true); | 6735 |
5339 LoadCondition(expr, &dest, false); | 6736 |
5340 | 6737 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { |
5341 if (dest.false_was_fall_through()) { | 6738 Comment cmnt(masm_, "[ UnaryOperation"); |
5342 // The false target was just bound. | 6739 |
5343 JumpTarget loaded; | 6740 Token::Value op = node->op(); |
5344 frame_->Push(Factory::false_value()); | 6741 |
5345 // There may be dangling jumps to the true target. | 6742 if (op == Token::NOT) { |
5346 if (true_target.is_linked()) { | 6743 // Swap the true and false targets but keep the same actual label |
5347 loaded.Jump(); | 6744 // as the fall through. |
5348 true_target.Bind(); | 6745 destination()->Invert(); |
5349 frame_->Push(Factory::true_value()); | 6746 LoadCondition(node->expression(), destination(), true); |
5350 loaded.Bind(); | 6747 // Swap the labels back. |
5351 } | 6748 destination()->Invert(); |
5352 | 6749 |
5353 } else if (dest.is_used()) { | 6750 } else if (op == Token::DELETE) { |
5354 // There is true, and possibly false, control flow (with true as | 6751 Property* property = node->expression()->AsProperty(); |
5355 // the fall through). | 6752 if (property != NULL) { |
5356 JumpTarget loaded; | 6753 Load(property->obj()); |
5357 frame_->Push(Factory::true_value()); | 6754 Load(property->key()); |
5358 if (false_target.is_linked()) { | 6755 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2); |
5359 loaded.Jump(); | 6756 frame_->Push(&answer); |
5360 false_target.Bind(); | 6757 return; |
| 6758 } |
| 6759 |
| 6760 Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); |
| 6761 if (variable != NULL) { |
| 6762 Slot* slot = variable->slot(); |
| 6763 if (variable->is_global()) { |
| 6764 LoadGlobal(); |
| 6765 frame_->Push(variable->name()); |
| 6766 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, |
| 6767 CALL_FUNCTION, 2); |
| 6768 frame_->Push(&answer); |
| 6769 return; |
| 6770 |
| 6771 } else if (slot != NULL && slot->type() == Slot::LOOKUP) { |
| 6772 // Call the runtime to look up the context holding the named |
| 6773 // variable. Sync the virtual frame eagerly so we can push the |
| 6774 // arguments directly into place. |
| 6775 frame_->SyncRange(0, frame_->element_count() - 1); |
| 6776 frame_->EmitPush(rsi); |
| 6777 frame_->EmitPush(variable->name()); |
| 6778 Result context = frame_->CallRuntime(Runtime::kLookupContext, 2); |
| 6779 ASSERT(context.is_register()); |
| 6780 frame_->EmitPush(context.reg()); |
| 6781 context.Unuse(); |
| 6782 frame_->EmitPush(variable->name()); |
| 6783 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, |
| 6784 CALL_FUNCTION, 2); |
| 6785 frame_->Push(&answer); |
| 6786 return; |
| 6787 } |
| 6788 |
| 6789 // Default: Result of deleting non-global, not dynamically |
| 6790 // introduced variables is false. |
5361 frame_->Push(Factory::false_value()); | 6791 frame_->Push(Factory::false_value()); |
5362 loaded.Bind(); | 6792 |
| 6793 } else { |
| 6794 // Default: Result of deleting expressions is true. |
| 6795 Load(node->expression()); // may have side-effects |
| 6796 frame_->SetElementAt(0, Factory::true_value()); |
| 6797 } |
| 6798 |
| 6799 } else if (op == Token::TYPEOF) { |
| 6800 // Special case for loading the typeof expression; see comment on |
| 6801 // LoadTypeofExpression(). |
| 6802 LoadTypeofExpression(node->expression()); |
| 6803 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1); |
| 6804 frame_->Push(&answer); |
| 6805 |
| 6806 } else if (op == Token::VOID) { |
| 6807 Expression* expression = node->expression(); |
| 6808 if (expression && expression->AsLiteral() && ( |
| 6809 expression->AsLiteral()->IsTrue() || |
| 6810 expression->AsLiteral()->IsFalse() || |
| 6811 expression->AsLiteral()->handle()->IsNumber() || |
| 6812 expression->AsLiteral()->handle()->IsString() || |
| 6813 expression->AsLiteral()->handle()->IsJSRegExp() || |
| 6814 expression->AsLiteral()->IsNull())) { |
| 6815 // Omit evaluating the value of the primitive literal. |
| 6816 // It will be discarded anyway, and can have no side effect. |
| 6817 frame_->Push(Factory::undefined_value()); |
| 6818 } else { |
| 6819 Load(node->expression()); |
| 6820 frame_->SetElementAt(0, Factory::undefined_value()); |
5363 } | 6821 } |
5364 | 6822 |
5365 } else { | 6823 } else { |
5366 // We have a valid value on top of the frame, but we still may | 6824 bool can_overwrite = |
5367 // have dangling jumps to the true and false targets from nested | 6825 (node->expression()->AsBinaryOperation() != NULL && |
5368 // subexpressions (eg, the left subexpressions of the | 6826 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); |
5369 // short-circuited boolean operators). | 6827 UnaryOverwriteMode overwrite = |
5370 ASSERT(has_valid_frame()); | 6828 can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; |
5371 if (true_target.is_linked() || false_target.is_linked()) { | 6829 bool no_negative_zero = node->expression()->no_negative_zero(); |
5372 JumpTarget loaded; | 6830 Load(node->expression()); |
5373 loaded.Jump(); // Don't lose the current TOS. | 6831 switch (op) { |
5374 if (true_target.is_linked()) { | 6832 case Token::NOT: |
5375 true_target.Bind(); | 6833 case Token::DELETE: |
5376 frame_->Push(Factory::true_value()); | 6834 case Token::TYPEOF: |
5377 if (false_target.is_linked()) { | 6835 UNREACHABLE(); // handled above |
5378 loaded.Jump(); | 6836 break; |
| 6837 |
| 6838 case Token::SUB: { |
| 6839 GenericUnaryOpStub stub( |
| 6840 Token::SUB, |
| 6841 overwrite, |
| 6842 no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); |
| 6843 Result operand = frame_->Pop(); |
| 6844 Result answer = frame_->CallStub(&stub, &operand); |
| 6845 answer.set_type_info(TypeInfo::Number()); |
| 6846 frame_->Push(&answer); |
| 6847 break; |
| 6848 } |
| 6849 |
| 6850 case Token::BIT_NOT: { |
| 6851 // Smi check. |
| 6852 JumpTarget smi_label; |
| 6853 JumpTarget continue_label; |
| 6854 Result operand = frame_->Pop(); |
| 6855 operand.ToRegister(); |
| 6856 |
| 6857 Condition is_smi = masm_->CheckSmi(operand.reg()); |
| 6858 smi_label.Branch(is_smi, &operand); |
| 6859 |
| 6860 GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); |
| 6861 Result answer = frame_->CallStub(&stub, &operand); |
| 6862 continue_label.Jump(&answer); |
| 6863 |
| 6864 smi_label.Bind(&answer); |
| 6865 answer.ToRegister(); |
| 6866 frame_->Spill(answer.reg()); |
| 6867 __ SmiNot(answer.reg(), answer.reg()); |
| 6868 continue_label.Bind(&answer); |
| 6869 answer.set_type_info(TypeInfo::Smi()); |
| 6870 frame_->Push(&answer); |
| 6871 break; |
| 6872 } |
| 6873 |
| 6874 case Token::ADD: { |
| 6875 // Smi check. |
| 6876 JumpTarget continue_label; |
| 6877 Result operand = frame_->Pop(); |
| 6878 TypeInfo operand_info = operand.type_info(); |
| 6879 operand.ToRegister(); |
| 6880 Condition is_smi = masm_->CheckSmi(operand.reg()); |
| 6881 continue_label.Branch(is_smi, &operand); |
| 6882 frame_->Push(&operand); |
| 6883 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER, |
| 6884 CALL_FUNCTION, 1); |
| 6885 |
| 6886 continue_label.Bind(&answer); |
| 6887 if (operand_info.IsSmi()) { |
| 6888 answer.set_type_info(TypeInfo::Smi()); |
| 6889 } else if (operand_info.IsInteger32()) { |
| 6890 answer.set_type_info(TypeInfo::Integer32()); |
| 6891 } else { |
| 6892 answer.set_type_info(TypeInfo::Number()); |
5379 } | 6893 } |
| 6894 frame_->Push(&answer); |
| 6895 break; |
5380 } | 6896 } |
5381 if (false_target.is_linked()) { | 6897 default: |
5382 false_target.Bind(); | 6898 UNREACHABLE(); |
5383 frame_->Push(Factory::false_value()); | 6899 } |
| 6900 } |
| 6901 } |
| 6902 |
| 6903 |
| 6904 // The value in dst was optimistically incremented or decremented. |
| 6905 // The result overflowed or was not smi tagged. Call into the runtime |
| 6906 // to convert the argument to a number, and call the specialized add |
| 6907 // or subtract stub. The result is left in dst. |
| 6908 class DeferredPrefixCountOperation: public DeferredCode { |
| 6909 public: |
| 6910 DeferredPrefixCountOperation(Register dst, |
| 6911 bool is_increment, |
| 6912 TypeInfo input_type) |
| 6913 : dst_(dst), is_increment_(is_increment), input_type_(input_type) { |
| 6914 set_comment("[ DeferredCountOperation"); |
| 6915 } |
| 6916 |
| 6917 virtual void Generate(); |
| 6918 |
| 6919 private: |
| 6920 Register dst_; |
| 6921 bool is_increment_; |
| 6922 TypeInfo input_type_; |
| 6923 }; |
| 6924 |
| 6925 |
| 6926 void DeferredPrefixCountOperation::Generate() { |
| 6927 Register left; |
| 6928 if (input_type_.IsNumber()) { |
| 6929 left = dst_; |
| 6930 } else { |
| 6931 __ push(dst_); |
| 6932 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); |
| 6933 left = rax; |
| 6934 } |
| 6935 |
| 6936 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, |
| 6937 NO_OVERWRITE, |
| 6938 NO_GENERIC_BINARY_FLAGS, |
| 6939 TypeInfo::Number()); |
| 6940 stub.GenerateCall(masm_, left, Smi::FromInt(1)); |
| 6941 |
| 6942 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 6943 } |
| 6944 |
| 6945 |
| 6946 // The value in dst was optimistically incremented or decremented. |
| 6947 // The result overflowed or was not smi tagged. Call into the runtime |
| 6948 // to convert the argument to a number. Update the original value in |
| 6949 // old. Call the specialized add or subtract stub. The result is |
| 6950 // left in dst. |
| 6951 class DeferredPostfixCountOperation: public DeferredCode { |
| 6952 public: |
| 6953 DeferredPostfixCountOperation(Register dst, |
| 6954 Register old, |
| 6955 bool is_increment, |
| 6956 TypeInfo input_type) |
| 6957 : dst_(dst), |
| 6958 old_(old), |
| 6959 is_increment_(is_increment), |
| 6960 input_type_(input_type) { |
| 6961 set_comment("[ DeferredCountOperation"); |
| 6962 } |
| 6963 |
| 6964 virtual void Generate(); |
| 6965 |
| 6966 private: |
| 6967 Register dst_; |
| 6968 Register old_; |
| 6969 bool is_increment_; |
| 6970 TypeInfo input_type_; |
| 6971 }; |
| 6972 |
| 6973 |
| 6974 void DeferredPostfixCountOperation::Generate() { |
| 6975 Register left; |
| 6976 if (input_type_.IsNumber()) { |
| 6977 __ push(dst_); // Save the input to use as the old value. |
| 6978 left = dst_; |
| 6979 } else { |
| 6980 __ push(dst_); |
| 6981 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); |
| 6982 __ push(rax); // Save the result of ToNumber to use as the old value. |
| 6983 left = rax; |
| 6984 } |
| 6985 |
| 6986 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, |
| 6987 NO_OVERWRITE, |
| 6988 NO_GENERIC_BINARY_FLAGS, |
| 6989 TypeInfo::Number()); |
| 6990 stub.GenerateCall(masm_, left, Smi::FromInt(1)); |
| 6991 |
| 6992 if (!dst_.is(rax)) __ movq(dst_, rax); |
| 6993 __ pop(old_); |
| 6994 } |
| 6995 |
| 6996 |
| 6997 void CodeGenerator::VisitCountOperation(CountOperation* node) { |
| 6998 Comment cmnt(masm_, "[ CountOperation"); |
| 6999 |
| 7000 bool is_postfix = node->is_postfix(); |
| 7001 bool is_increment = node->op() == Token::INC; |
| 7002 |
| 7003 Variable* var = node->expression()->AsVariableProxy()->AsVariable(); |
| 7004 bool is_const = (var != NULL && var->mode() == Variable::CONST); |
| 7005 |
| 7006 // Postfix operations need a stack slot under the reference to hold |
| 7007 // the old value while the new value is being stored. This is so that |
| 7008 // in the case that storing the new value requires a call, the old |
| 7009 // value will be in the frame to be spilled. |
| 7010 if (is_postfix) frame_->Push(Smi::FromInt(0)); |
| 7011 |
| 7012 // A constant reference is not saved to, so the reference is not a |
| 7013 // compound assignment reference. |
| 7014 { Reference target(this, node->expression(), !is_const); |
| 7015 if (target.is_illegal()) { |
| 7016 // Spoof the virtual frame to have the expected height (one higher |
| 7017 // than on entry). |
| 7018 if (!is_postfix) frame_->Push(Smi::FromInt(0)); |
| 7019 return; |
| 7020 } |
| 7021 target.TakeValue(); |
| 7022 |
| 7023 Result new_value = frame_->Pop(); |
| 7024 new_value.ToRegister(); |
| 7025 |
| 7026 Result old_value; // Only allocated in the postfix case. |
| 7027 if (is_postfix) { |
| 7028 // Allocate a temporary to preserve the old value. |
| 7029 old_value = allocator_->Allocate(); |
| 7030 ASSERT(old_value.is_valid()); |
| 7031 __ movq(old_value.reg(), new_value.reg()); |
| 7032 |
| 7033 // The return value for postfix operations is ToNumber(input). |
| 7034 // Keep more precise type info if the input is some kind of |
| 7035 // number already. If the input is not a number we have to wait |
| 7036 // for the deferred code to convert it. |
| 7037 if (new_value.type_info().IsNumber()) { |
| 7038 old_value.set_type_info(new_value.type_info()); |
5384 } | 7039 } |
5385 loaded.Bind(); | 7040 } |
5386 } | 7041 // Ensure the new value is writable. |
5387 } | 7042 frame_->Spill(new_value.reg()); |
5388 | 7043 |
5389 ASSERT(has_valid_frame()); | 7044 DeferredCode* deferred = NULL; |
5390 ASSERT(frame_->height() == original_height + 1); | 7045 if (is_postfix) { |
5391 } | 7046 deferred = new DeferredPostfixCountOperation(new_value.reg(), |
5392 | 7047 old_value.reg(), |
5393 | 7048 is_increment, |
5394 // Emit code to load the value of an expression to the top of the | 7049 new_value.type_info()); |
5395 // frame. If the expression is boolean-valued it may be compiled (or | |
5396 // partially compiled) into control flow to the control destination. | |
5397 // If force_control is true, control flow is forced. | |
5398 void CodeGenerator::LoadCondition(Expression* x, | |
5399 ControlDestination* dest, | |
5400 bool force_control) { | |
5401 ASSERT(!in_spilled_code()); | |
5402 int original_height = frame_->height(); | |
5403 | |
5404 { CodeGenState new_state(this, dest); | |
5405 Visit(x); | |
5406 | |
5407 // If we hit a stack overflow, we may not have actually visited | |
5408 // the expression. In that case, we ensure that we have a | |
5409 // valid-looking frame state because we will continue to generate | |
5410 // code as we unwind the C++ stack. | |
5411 // | |
5412 // It's possible to have both a stack overflow and a valid frame | |
5413 // state (eg, a subexpression overflowed, visiting it returned | |
5414 // with a dummied frame state, and visiting this expression | |
5415 // returned with a normal-looking state). | |
5416 if (HasStackOverflow() && | |
5417 !dest->is_used() && | |
5418 frame_->height() == original_height) { | |
5419 dest->Goto(true); | |
5420 } | |
5421 } | |
5422 | |
5423 if (force_control && !dest->is_used()) { | |
5424 // Convert the TOS value into flow to the control destination. | |
5425 // TODO(X64): Make control flow to control destinations work. | |
5426 ToBoolean(dest); | |
5427 } | |
5428 | |
5429 ASSERT(!(force_control && !dest->is_used())); | |
5430 ASSERT(dest->is_used() || frame_->height() == original_height + 1); | |
5431 } | |
5432 | |
5433 | |
5434 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and | |
5435 // convert it to a boolean in the condition code register or jump to | |
5436 // 'false_target'/'true_target' as appropriate. | |
5437 void CodeGenerator::ToBoolean(ControlDestination* dest) { | |
5438 Comment cmnt(masm_, "[ ToBoolean"); | |
5439 | |
5440 // The value to convert should be popped from the frame. | |
5441 Result value = frame_->Pop(); | |
5442 value.ToRegister(); | |
5443 | |
5444 if (value.is_number()) { | |
5445 // Fast case if TypeInfo indicates only numbers. | |
5446 if (FLAG_debug_code) { | |
5447 __ AbortIfNotNumber(value.reg()); | |
5448 } | |
5449 // Smi => false iff zero. | |
5450 __ SmiCompare(value.reg(), Smi::FromInt(0)); | |
5451 if (value.is_smi()) { | |
5452 value.Unuse(); | |
5453 dest->Split(not_zero); | |
5454 } else { | 7050 } else { |
5455 dest->false_target()->Branch(equal); | 7051 deferred = new DeferredPrefixCountOperation(new_value.reg(), |
5456 Condition is_smi = masm_->CheckSmi(value.reg()); | 7052 is_increment, |
5457 dest->true_target()->Branch(is_smi); | 7053 new_value.type_info()); |
5458 __ xorpd(xmm0, xmm0); | 7054 } |
5459 __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset)); | 7055 |
5460 value.Unuse(); | 7056 if (new_value.is_smi()) { |
5461 dest->Split(not_zero); | 7057 if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); } |
5462 } | 7058 } else { |
| 7059 __ JumpIfNotSmi(new_value.reg(), deferred->entry_label()); |
| 7060 } |
| 7061 if (is_increment) { |
| 7062 __ SmiAddConstant(new_value.reg(), |
| 7063 new_value.reg(), |
| 7064 Smi::FromInt(1), |
| 7065 deferred->entry_label()); |
| 7066 } else { |
| 7067 __ SmiSubConstant(new_value.reg(), |
| 7068 new_value.reg(), |
| 7069 Smi::FromInt(1), |
| 7070 deferred->entry_label()); |
| 7071 } |
| 7072 deferred->BindExit(); |
| 7073 |
| 7074 // Postfix count operations return their input converted to |
| 7075 // number. The case when the input is already a number is covered |
| 7076 // above in the allocation code for old_value. |
| 7077 if (is_postfix && !new_value.type_info().IsNumber()) { |
| 7078 old_value.set_type_info(TypeInfo::Number()); |
| 7079 } |
| 7080 |
| 7081 new_value.set_type_info(TypeInfo::Number()); |
| 7082 |
| 7083 // Postfix: store the old value in the allocated slot under the |
| 7084 // reference. |
| 7085 if (is_postfix) frame_->SetElementAt(target.size(), &old_value); |
| 7086 |
| 7087 frame_->Push(&new_value); |
| 7088 // Non-constant: update the reference. |
| 7089 if (!is_const) target.SetValue(NOT_CONST_INIT); |
| 7090 } |
| 7091 |
| 7092 // Postfix: drop the new value and use the old. |
| 7093 if (is_postfix) frame_->Drop(); |
| 7094 } |
| 7095 |
| 7096 |
| 7097 void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) { |
| 7098 // According to ECMA-262 section 11.11, page 58, the binary logical |
| 7099 // operators must yield the result of one of the two expressions |
| 7100 // before any ToBoolean() conversions. This means that the value |
| 7101 // produced by a && or || operator is not necessarily a boolean. |
| 7102 |
| 7103 // NOTE: If the left hand side produces a materialized value (not |
| 7104 // control flow), we force the right hand side to do the same. This |
| 7105 // is necessary because we assume that if we get control flow on the |
| 7106 // last path out of an expression we got it on all paths. |
| 7107 if (node->op() == Token::AND) { |
| 7108 JumpTarget is_true; |
| 7109 ControlDestination dest(&is_true, destination()->false_target(), true); |
| 7110 LoadCondition(node->left(), &dest, false); |
| 7111 |
| 7112 if (dest.false_was_fall_through()) { |
| 7113 // The current false target was used as the fall-through. If |
| 7114 // there are no dangling jumps to is_true then the left |
| 7115 // subexpression was unconditionally false. Otherwise we have |
| 7116 // paths where we do have to evaluate the right subexpression. |
| 7117 if (is_true.is_linked()) { |
| 7118 // We need to compile the right subexpression. If the jump to |
| 7119 // the current false target was a forward jump then we have a |
| 7120 // valid frame, we have just bound the false target, and we |
| 7121 // have to jump around the code for the right subexpression. |
| 7122 if (has_valid_frame()) { |
| 7123 destination()->false_target()->Unuse(); |
| 7124 destination()->false_target()->Jump(); |
| 7125 } |
| 7126 is_true.Bind(); |
| 7127 // The left subexpression compiled to control flow, so the |
| 7128 // right one is free to do so as well. |
| 7129 LoadCondition(node->right(), destination(), false); |
| 7130 } else { |
| 7131 // We have actually just jumped to or bound the current false |
| 7132 // target but the current control destination is not marked as |
| 7133 // used. |
| 7134 destination()->Use(false); |
| 7135 } |
| 7136 |
| 7137 } else if (dest.is_used()) { |
| 7138 // The left subexpression compiled to control flow (and is_true |
| 7139 // was just bound), so the right is free to do so as well. |
| 7140 LoadCondition(node->right(), destination(), false); |
| 7141 |
| 7142 } else { |
| 7143 // We have a materialized value on the frame, so we exit with |
| 7144 // one on all paths. There are possibly also jumps to is_true |
| 7145 // from nested subexpressions. |
| 7146 JumpTarget pop_and_continue; |
| 7147 JumpTarget exit; |
| 7148 |
| 7149 // Avoid popping the result if it converts to 'false' using the |
| 7150 // standard ToBoolean() conversion as described in ECMA-262, |
| 7151 // section 9.2, page 30. |
| 7152 // |
| 7153 // Duplicate the TOS value. The duplicate will be popped by |
| 7154 // ToBoolean. |
| 7155 frame_->Dup(); |
| 7156 ControlDestination dest(&pop_and_continue, &exit, true); |
| 7157 ToBoolean(&dest); |
| 7158 |
| 7159 // Pop the result of evaluating the first part. |
| 7160 frame_->Drop(); |
| 7161 |
| 7162 // Compile right side expression. |
| 7163 is_true.Bind(); |
| 7164 Load(node->right()); |
| 7165 |
| 7166 // Exit (always with a materialized value). |
| 7167 exit.Bind(); |
| 7168 } |
| 7169 |
5463 } else { | 7170 } else { |
5464 // Fast case checks. | 7171 ASSERT(node->op() == Token::OR); |
5465 // 'false' => false. | 7172 JumpTarget is_false; |
5466 __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex); | 7173 ControlDestination dest(destination()->true_target(), &is_false, false); |
5467 dest->false_target()->Branch(equal); | 7174 LoadCondition(node->left(), &dest, false); |
5468 | 7175 |
5469 // 'true' => true. | 7176 if (dest.true_was_fall_through()) { |
5470 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex); | 7177 // The current true target was used as the fall-through. If |
5471 dest->true_target()->Branch(equal); | 7178 // there are no dangling jumps to is_false then the left |
5472 | 7179 // subexpression was unconditionally true. Otherwise we have |
5473 // 'undefined' => false. | 7180 // paths where we do have to evaluate the right subexpression. |
5474 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex); | 7181 if (is_false.is_linked()) { |
5475 dest->false_target()->Branch(equal); | 7182 // We need to compile the right subexpression. If the jump to |
5476 | 7183 // the current true target was a forward jump then we have a |
5477 // Smi => false iff zero. | 7184 // valid frame, we have just bound the true target, and we |
5478 __ SmiCompare(value.reg(), Smi::FromInt(0)); | 7185 // have to jump around the code for the right subexpression. |
5479 dest->false_target()->Branch(equal); | 7186 if (has_valid_frame()) { |
5480 Condition is_smi = masm_->CheckSmi(value.reg()); | 7187 destination()->true_target()->Unuse(); |
5481 dest->true_target()->Branch(is_smi); | 7188 destination()->true_target()->Jump(); |
5482 | 7189 } |
5483 // Call the stub for all other cases. | 7190 is_false.Bind(); |
5484 frame_->Push(&value); // Undo the Pop() from above. | 7191 // The left subexpression compiled to control flow, so the |
5485 ToBooleanStub stub; | 7192 // right one is free to do so as well. |
5486 Result temp = frame_->CallStub(&stub, 1); | 7193 LoadCondition(node->right(), destination(), false); |
5487 // Convert the result to a condition code. | 7194 } else { |
5488 __ testq(temp.reg(), temp.reg()); | 7195 // We have just jumped to or bound the current true target but |
5489 temp.Unuse(); | 7196 // the current control destination is not marked as used. |
5490 dest->Split(not_equal); | 7197 destination()->Use(true); |
5491 } | 7198 } |
5492 } | 7199 |
5493 | 7200 } else if (dest.is_used()) { |
5494 | 7201 // The left subexpression compiled to control flow (and is_false |
5495 void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) { | 7202 // was just bound), so the right is free to do so as well. |
5496 UNIMPLEMENTED(); | 7203 LoadCondition(node->right(), destination(), false); |
5497 // TODO(X64): Implement security policy for loads of smis. | 7204 |
5498 } | |
5499 | |
5500 | |
5501 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { | |
5502 return false; | |
5503 } | |
5504 | |
5505 //------------------------------------------------------------------------------ | |
5506 // CodeGenerator implementation of variables, lookups, and stores. | |
5507 | |
5508 Reference::Reference(CodeGenerator* cgen, | |
5509 Expression* expression, | |
5510 bool persist_after_get) | |
5511 : cgen_(cgen), | |
5512 expression_(expression), | |
5513 type_(ILLEGAL), | |
5514 persist_after_get_(persist_after_get) { | |
5515 cgen->LoadReference(this); | |
5516 } | |
5517 | |
5518 | |
5519 Reference::~Reference() { | |
5520 ASSERT(is_unloaded() || is_illegal()); | |
5521 } | |
5522 | |
5523 | |
5524 void CodeGenerator::LoadReference(Reference* ref) { | |
5525 // References are loaded from both spilled and unspilled code. Set the | |
5526 // state to unspilled to allow that (and explicitly spill after | |
5527 // construction at the construction sites). | |
5528 bool was_in_spilled_code = in_spilled_code_; | |
5529 in_spilled_code_ = false; | |
5530 | |
5531 Comment cmnt(masm_, "[ LoadReference"); | |
5532 Expression* e = ref->expression(); | |
5533 Property* property = e->AsProperty(); | |
5534 Variable* var = e->AsVariableProxy()->AsVariable(); | |
5535 | |
5536 if (property != NULL) { | |
5537 // The expression is either a property or a variable proxy that rewrites | |
5538 // to a property. | |
5539 Load(property->obj()); | |
5540 if (property->key()->IsPropertyName()) { | |
5541 ref->set_type(Reference::NAMED); | |
5542 } else { | 7205 } else { |
5543 Load(property->key()); | 7206 // We have a materialized value on the frame, so we exit with |
5544 ref->set_type(Reference::KEYED); | 7207 // one on all paths. There are possibly also jumps to is_false |
5545 } | 7208 // from nested subexpressions. |
5546 } else if (var != NULL) { | 7209 JumpTarget pop_and_continue; |
5547 // The expression is a variable proxy that does not rewrite to a | 7210 JumpTarget exit; |
5548 // property. Global variables are treated as named property references. | 7211 |
5549 if (var->is_global()) { | 7212 // Avoid popping the result if it converts to 'true' using the |
5550 // If rax is free, the register allocator prefers it. Thus the code | 7213 // standard ToBoolean() conversion as described in ECMA-262, |
5551 // generator will load the global object into rax, which is where | 7214 // section 9.2, page 30. |
5552 // LoadIC wants it. Most uses of Reference call LoadIC directly | 7215 // |
5553 // after the reference is created. | 7216 // Duplicate the TOS value. The duplicate will be popped by |
5554 frame_->Spill(rax); | 7217 // ToBoolean. |
5555 LoadGlobal(); | 7218 frame_->Dup(); |
5556 ref->set_type(Reference::NAMED); | 7219 ControlDestination dest(&exit, &pop_and_continue, false); |
| 7220 ToBoolean(&dest); |
| 7221 |
| 7222 // Pop the result of evaluating the first part. |
| 7223 frame_->Drop(); |
| 7224 |
| 7225 // Compile right side expression. |
| 7226 is_false.Bind(); |
| 7227 Load(node->right()); |
| 7228 |
| 7229 // Exit (always with a materialized value). |
| 7230 exit.Bind(); |
| 7231 } |
| 7232 } |
| 7233 } |
| 7234 |
| 7235 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { |
| 7236 Comment cmnt(masm_, "[ BinaryOperation"); |
| 7237 |
| 7238 if (node->op() == Token::AND || node->op() == Token::OR) { |
| 7239 GenerateLogicalBooleanOperation(node); |
| 7240 } else { |
| 7241 // NOTE: The code below assumes that the slow cases (calls to runtime) |
| 7242 // never return a constant/immutable object. |
| 7243 OverwriteMode overwrite_mode = NO_OVERWRITE; |
| 7244 if (node->left()->AsBinaryOperation() != NULL && |
| 7245 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { |
| 7246 overwrite_mode = OVERWRITE_LEFT; |
| 7247 } else if (node->right()->AsBinaryOperation() != NULL && |
| 7248 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { |
| 7249 overwrite_mode = OVERWRITE_RIGHT; |
| 7250 } |
| 7251 |
| 7252 if (node->left()->IsTrivial()) { |
| 7253 Load(node->right()); |
| 7254 Result right = frame_->Pop(); |
| 7255 frame_->Push(node->left()); |
| 7256 frame_->Push(&right); |
5557 } else { | 7257 } else { |
5558 ASSERT(var->slot() != NULL); | 7258 Load(node->left()); |
5559 ref->set_type(Reference::SLOT); | 7259 Load(node->right()); |
5560 } | 7260 } |
5561 } else { | 7261 GenericBinaryOperation(node, overwrite_mode); |
5562 // Anything else is a runtime error. | 7262 } |
5563 Load(e); | 7263 } |
5564 frame_->CallRuntime(Runtime::kThrowReferenceError, 1); | 7264 |
5565 } | 7265 |
5566 | 7266 void CodeGenerator::VisitThisFunction(ThisFunction* node) { |
5567 in_spilled_code_ = was_in_spilled_code; | 7267 frame_->PushFunction(); |
5568 } | 7268 } |
5569 | 7269 |
5570 | 7270 |
5571 void CodeGenerator::UnloadReference(Reference* ref) { | 7271 void CodeGenerator::VisitCompareOperation(CompareOperation* node) { |
5572 // Pop a reference from the stack while preserving TOS. | 7272 Comment cmnt(masm_, "[ CompareOperation"); |
5573 Comment cmnt(masm_, "[ UnloadReference"); | 7273 |
5574 frame_->Nip(ref->size()); | 7274 // Get the expressions from the node. |
5575 ref->set_unloaded(); | 7275 Expression* left = node->left(); |
5576 } | 7276 Expression* right = node->right(); |
5577 | 7277 Token::Value op = node->op(); |
5578 | 7278 // To make typeof testing for natives implemented in JavaScript really |
5579 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { | 7279 // efficient, we generate special code for expressions of the form: |
5580 // Currently, this assertion will fail if we try to assign to | 7280 // 'typeof <expression> == <string>'. |
5581 // a constant variable that is constant because it is read-only | 7281 UnaryOperation* operation = left->AsUnaryOperation(); |
5582 // (such as the variable referring to a named function expression). | 7282 if ((op == Token::EQ || op == Token::EQ_STRICT) && |
5583 // We need to implement assignments to read-only variables. | 7283 (operation != NULL && operation->op() == Token::TYPEOF) && |
5584 // Ideally, we should do this during AST generation (by converting | 7284 (right->AsLiteral() != NULL && |
5585 // such assignments into expression statements); however, in general | 7285 right->AsLiteral()->handle()->IsString())) { |
5586 // we may not be able to make the decision until past AST generation, | 7286 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle())); |
5587 // that is when the entire program is known. | 7287 |
5588 ASSERT(slot != NULL); | 7288 // Load the operand and move it to a register. |
5589 int index = slot->index(); | 7289 LoadTypeofExpression(operation->expression()); |
5590 switch (slot->type()) { | 7290 Result answer = frame_->Pop(); |
5591 case Slot::PARAMETER: | 7291 answer.ToRegister(); |
5592 return frame_->ParameterAt(index); | 7292 |
5593 | 7293 if (check->Equals(Heap::number_symbol())) { |
5594 case Slot::LOCAL: | 7294 Condition is_smi = masm_->CheckSmi(answer.reg()); |
5595 return frame_->LocalAt(index); | 7295 destination()->true_target()->Branch(is_smi); |
5596 | 7296 frame_->Spill(answer.reg()); |
5597 case Slot::CONTEXT: { | 7297 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); |
5598 // Follow the context chain if necessary. | 7298 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex); |
5599 ASSERT(!tmp.is(rsi)); // do not overwrite context register | 7299 answer.Unuse(); |
5600 Register context = rsi; | 7300 destination()->Split(equal); |
5601 int chain_length = scope()->ContextChainLength(slot->var()->scope()); | 7301 |
5602 for (int i = 0; i < chain_length; i++) { | 7302 } else if (check->Equals(Heap::string_symbol())) { |
5603 // Load the closure. | 7303 Condition is_smi = masm_->CheckSmi(answer.reg()); |
5604 // (All contexts, even 'with' contexts, have a closure, | 7304 destination()->false_target()->Branch(is_smi); |
5605 // and it is the same for all contexts inside a function. | 7305 |
5606 // There is no need to go to the function context first.) | 7306 // It can be an undetectable string object. |
5607 __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); | 7307 __ movq(kScratchRegister, |
5608 // Load the function context (which is the incoming, outer context). | 7308 FieldOperand(answer.reg(), HeapObject::kMapOffset)); |
5609 __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset)); | 7309 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), |
5610 context = tmp; | 7310 Immediate(1 << Map::kIsUndetectable)); |
5611 } | 7311 destination()->false_target()->Branch(not_zero); |
5612 // We may have a 'with' context now. Get the function context. | 7312 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE); |
5613 // (In fact this mov may never be the needed, since the scope analysis | 7313 answer.Unuse(); |
5614 // may not permit a direct context access in this case and thus we are | 7314 destination()->Split(below); // Unsigned byte comparison needed. |
5615 // always at a function context. However it is safe to dereference be- | 7315 |
5616 // cause the function context of a function context is itself. Before | 7316 } else if (check->Equals(Heap::boolean_symbol())) { |
5617 // deleting this mov we should try to create a counter-example first, | 7317 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex); |
5618 // though...) | 7318 destination()->true_target()->Branch(equal); |
5619 __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); | 7319 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex); |
5620 return ContextOperand(tmp, index); | 7320 answer.Unuse(); |
5621 } | 7321 destination()->Split(equal); |
5622 | 7322 |
| 7323 } else if (check->Equals(Heap::undefined_symbol())) { |
| 7324 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex); |
| 7325 destination()->true_target()->Branch(equal); |
| 7326 |
| 7327 Condition is_smi = masm_->CheckSmi(answer.reg()); |
| 7328 destination()->false_target()->Branch(is_smi); |
| 7329 |
| 7330 // It can be an undetectable object. |
| 7331 __ movq(kScratchRegister, |
| 7332 FieldOperand(answer.reg(), HeapObject::kMapOffset)); |
| 7333 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), |
| 7334 Immediate(1 << Map::kIsUndetectable)); |
| 7335 answer.Unuse(); |
| 7336 destination()->Split(not_zero); |
| 7337 |
| 7338 } else if (check->Equals(Heap::function_symbol())) { |
| 7339 Condition is_smi = masm_->CheckSmi(answer.reg()); |
| 7340 destination()->false_target()->Branch(is_smi); |
| 7341 frame_->Spill(answer.reg()); |
| 7342 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); |
| 7343 destination()->true_target()->Branch(equal); |
| 7344 // Regular expressions are callable so typeof == 'function'. |
| 7345 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE); |
| 7346 answer.Unuse(); |
| 7347 destination()->Split(equal); |
| 7348 |
| 7349 } else if (check->Equals(Heap::object_symbol())) { |
| 7350 Condition is_smi = masm_->CheckSmi(answer.reg()); |
| 7351 destination()->false_target()->Branch(is_smi); |
| 7352 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex); |
| 7353 destination()->true_target()->Branch(equal); |
| 7354 |
| 7355 // Regular expressions are typeof == 'function', not 'object'. |
| 7356 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister); |
| 7357 destination()->false_target()->Branch(equal); |
| 7358 |
| 7359 // It can be an undetectable object. |
| 7360 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), |
| 7361 Immediate(1 << Map::kIsUndetectable)); |
| 7362 destination()->false_target()->Branch(not_zero); |
| 7363 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE); |
| 7364 destination()->false_target()->Branch(below); |
| 7365 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); |
| 7366 answer.Unuse(); |
| 7367 destination()->Split(below_equal); |
| 7368 } else { |
| 7369 // Uncommon case: typeof testing against a string literal that is |
| 7370 // never returned from the typeof operator. |
| 7371 answer.Unuse(); |
| 7372 destination()->Goto(false); |
| 7373 } |
| 7374 return; |
| 7375 } |
| 7376 |
| 7377 Condition cc = no_condition; |
| 7378 bool strict = false; |
| 7379 switch (op) { |
| 7380 case Token::EQ_STRICT: |
| 7381 strict = true; |
| 7382 // Fall through |
| 7383 case Token::EQ: |
| 7384 cc = equal; |
| 7385 break; |
| 7386 case Token::LT: |
| 7387 cc = less; |
| 7388 break; |
| 7389 case Token::GT: |
| 7390 cc = greater; |
| 7391 break; |
| 7392 case Token::LTE: |
| 7393 cc = less_equal; |
| 7394 break; |
| 7395 case Token::GTE: |
| 7396 cc = greater_equal; |
| 7397 break; |
| 7398 case Token::IN: { |
| 7399 Load(left); |
| 7400 Load(right); |
| 7401 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2); |
| 7402 frame_->Push(&answer); // push the result |
| 7403 return; |
| 7404 } |
| 7405 case Token::INSTANCEOF: { |
| 7406 Load(left); |
| 7407 Load(right); |
| 7408 InstanceofStub stub; |
| 7409 Result answer = frame_->CallStub(&stub, 2); |
| 7410 answer.ToRegister(); |
| 7411 __ testq(answer.reg(), answer.reg()); |
| 7412 answer.Unuse(); |
| 7413 destination()->Split(zero); |
| 7414 return; |
| 7415 } |
5623 default: | 7416 default: |
5624 UNREACHABLE(); | 7417 UNREACHABLE(); |
5625 return Operand(rsp, 0); | 7418 } |
5626 } | 7419 |
5627 } | 7420 if (left->IsTrivial()) { |
5628 | 7421 Load(right); |
5629 | 7422 Result right_result = frame_->Pop(); |
5630 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, | 7423 frame_->Push(left); |
5631 Result tmp, | 7424 frame_->Push(&right_result); |
5632 JumpTarget* slow) { | |
5633 ASSERT(slot->type() == Slot::CONTEXT); | |
5634 ASSERT(tmp.is_register()); | |
5635 Register context = rsi; | |
5636 | |
5637 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { | |
5638 if (s->num_heap_slots() > 0) { | |
5639 if (s->calls_eval()) { | |
5640 // Check that extension is NULL. | |
5641 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), | |
5642 Immediate(0)); | |
5643 slow->Branch(not_equal, not_taken); | |
5644 } | |
5645 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); | |
5646 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
5647 context = tmp.reg(); | |
5648 } | |
5649 } | |
5650 // Check that last extension is NULL. | |
5651 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); | |
5652 slow->Branch(not_equal, not_taken); | |
5653 __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX)); | |
5654 return ContextOperand(tmp.reg(), slot->index()); | |
5655 } | |
5656 | |
5657 | |
5658 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { | |
5659 if (slot->type() == Slot::LOOKUP) { | |
5660 ASSERT(slot->var()->is_dynamic()); | |
5661 | |
5662 JumpTarget slow; | |
5663 JumpTarget done; | |
5664 Result value; | |
5665 | |
5666 // Generate fast case for loading from slots that correspond to | |
5667 // local/global variables or arguments unless they are shadowed by | |
5668 // eval-introduced bindings. | |
5669 EmitDynamicLoadFromSlotFastCase(slot, | |
5670 typeof_state, | |
5671 &value, | |
5672 &slow, | |
5673 &done); | |
5674 | |
5675 slow.Bind(); | |
5676 // A runtime call is inevitable. We eagerly sync frame elements | |
5677 // to memory so that we can push the arguments directly into place | |
5678 // on top of the frame. | |
5679 frame_->SyncRange(0, frame_->element_count() - 1); | |
5680 frame_->EmitPush(rsi); | |
5681 __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT); | |
5682 frame_->EmitPush(kScratchRegister); | |
5683 if (typeof_state == INSIDE_TYPEOF) { | |
5684 value = | |
5685 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); | |
5686 } else { | |
5687 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2); | |
5688 } | |
5689 | |
5690 done.Bind(&value); | |
5691 frame_->Push(&value); | |
5692 | |
5693 } else if (slot->var()->mode() == Variable::CONST) { | |
5694 // Const slots may contain 'the hole' value (the constant hasn't been | |
5695 // initialized yet) which needs to be converted into the 'undefined' | |
5696 // value. | |
5697 // | |
5698 // We currently spill the virtual frame because constants use the | |
5699 // potentially unsafe direct-frame access of SlotOperand. | |
5700 VirtualFrame::SpilledScope spilled_scope; | |
5701 Comment cmnt(masm_, "[ Load const"); | |
5702 JumpTarget exit; | |
5703 __ movq(rcx, SlotOperand(slot, rcx)); | |
5704 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); | |
5705 exit.Branch(not_equal); | |
5706 __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex); | |
5707 exit.Bind(); | |
5708 frame_->EmitPush(rcx); | |
5709 | |
5710 } else if (slot->type() == Slot::PARAMETER) { | |
5711 frame_->PushParameterAt(slot->index()); | |
5712 | |
5713 } else if (slot->type() == Slot::LOCAL) { | |
5714 frame_->PushLocalAt(slot->index()); | |
5715 | |
5716 } else { | 7425 } else { |
5717 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach | 7426 Load(left); |
5718 // here. | 7427 Load(right); |
5719 // | 7428 } |
5720 // The use of SlotOperand below is safe for an unspilled frame | 7429 |
5721 // because it will always be a context slot. | 7430 Comparison(node, cc, strict, destination()); |
5722 ASSERT(slot->type() == Slot::CONTEXT); | 7431 } |
5723 Result temp = allocator_->Allocate(); | 7432 |
5724 ASSERT(temp.is_valid()); | 7433 |
5725 __ movq(temp.reg(), SlotOperand(slot, temp.reg())); | 7434 #ifdef DEBUG |
5726 frame_->Push(&temp); | 7435 bool CodeGenerator::HasValidEntryRegisters() { |
5727 } | 7436 return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0)) |
5728 } | 7437 && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0)) |
5729 | 7438 && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0)) |
5730 | 7439 && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0)) |
5731 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, | 7440 && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0)) |
5732 TypeofState state) { | 7441 && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0)) |
5733 LoadFromSlot(slot, state); | 7442 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) |
5734 | 7443 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) |
5735 // Bail out quickly if we're not using lazy arguments allocation. | 7444 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) |
5736 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return; | 7445 && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); |
5737 | 7446 } |
5738 // ... or if the slot isn't a non-parameter arguments slot. | 7447 #endif |
5739 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; | 7448 |
5740 | |
5741 // Pop the loaded value from the stack. | |
5742 Result value = frame_->Pop(); | |
5743 | |
5744 // If the loaded value is a constant, we know if the arguments | |
5745 // object has been lazily loaded yet. | |
5746 if (value.is_constant()) { | |
5747 if (value.handle()->IsTheHole()) { | |
5748 Result arguments = StoreArgumentsObject(false); | |
5749 frame_->Push(&arguments); | |
5750 } else { | |
5751 frame_->Push(&value); | |
5752 } | |
5753 return; | |
5754 } | |
5755 | |
5756 // The loaded value is in a register. If it is the sentinel that | |
5757 // indicates that we haven't loaded the arguments object yet, we | |
5758 // need to do it now. | |
5759 JumpTarget exit; | |
5760 __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex); | |
5761 frame_->Push(&value); | |
5762 exit.Branch(not_equal); | |
5763 Result arguments = StoreArgumentsObject(false); | |
5764 frame_->SetElementAt(0, &arguments); | |
5765 exit.Bind(); | |
5766 } | |
5767 | |
5768 | |
5769 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { | |
5770 if (slot->type() == Slot::LOOKUP) { | |
5771 ASSERT(slot->var()->is_dynamic()); | |
5772 | |
5773 // For now, just do a runtime call. Since the call is inevitable, | |
5774 // we eagerly sync the virtual frame so we can directly push the | |
5775 // arguments into place. | |
5776 frame_->SyncRange(0, frame_->element_count() - 1); | |
5777 | |
5778 frame_->EmitPush(rsi); | |
5779 frame_->EmitPush(slot->var()->name()); | |
5780 | |
5781 Result value; | |
5782 if (init_state == CONST_INIT) { | |
5783 // Same as the case for a normal store, but ignores attribute | |
5784 // (e.g. READ_ONLY) of context slot so that we can initialize const | |
5785 // properties (introduced via eval("const foo = (some expr);")). Also, | |
5786 // uses the current function context instead of the top context. | |
5787 // | |
5788 // Note that we must declare the foo upon entry of eval(), via a | |
5789 // context slot declaration, but we cannot initialize it at the same | |
5790 // time, because the const declaration may be at the end of the eval | |
5791 // code (sigh...) and the const variable may have been used before | |
5792 // (where its value is 'undefined'). Thus, we can only do the | |
5793 // initialization when we actually encounter the expression and when | |
5794 // the expression operands are defined and valid, and thus we need the | |
5795 // split into 2 operations: declaration of the context slot followed | |
5796 // by initialization. | |
5797 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); | |
5798 } else { | |
5799 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); | |
5800 } | |
5801 // Storing a variable must keep the (new) value on the expression | |
5802 // stack. This is necessary for compiling chained assignment | |
5803 // expressions. | |
5804 frame_->Push(&value); | |
5805 } else { | |
5806 ASSERT(!slot->var()->is_dynamic()); | |
5807 | |
5808 JumpTarget exit; | |
5809 if (init_state == CONST_INIT) { | |
5810 ASSERT(slot->var()->mode() == Variable::CONST); | |
5811 // Only the first const initialization must be executed (the slot | |
5812 // still contains 'the hole' value). When the assignment is executed, | |
5813 // the code is identical to a normal store (see below). | |
5814 // | |
5815 // We spill the frame in the code below because the direct-frame | |
5816 // access of SlotOperand is potentially unsafe with an unspilled | |
5817 // frame. | |
5818 VirtualFrame::SpilledScope spilled_scope; | |
5819 Comment cmnt(masm_, "[ Init const"); | |
5820 __ movq(rcx, SlotOperand(slot, rcx)); | |
5821 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); | |
5822 exit.Branch(not_equal); | |
5823 } | |
5824 | |
5825 // We must execute the store. Storing a variable must keep the (new) | |
5826 // value on the stack. This is necessary for compiling assignment | |
5827 // expressions. | |
5828 // | |
5829 // Note: We will reach here even with slot->var()->mode() == | |
5830 // Variable::CONST because of const declarations which will initialize | |
5831 // consts to 'the hole' value and by doing so, end up calling this code. | |
5832 if (slot->type() == Slot::PARAMETER) { | |
5833 frame_->StoreToParameterAt(slot->index()); | |
5834 } else if (slot->type() == Slot::LOCAL) { | |
5835 frame_->StoreToLocalAt(slot->index()); | |
5836 } else { | |
5837 // The other slot types (LOOKUP and GLOBAL) cannot reach here. | |
5838 // | |
5839 // The use of SlotOperand below is safe for an unspilled frame | |
5840 // because the slot is a context slot. | |
5841 ASSERT(slot->type() == Slot::CONTEXT); | |
5842 frame_->Dup(); | |
5843 Result value = frame_->Pop(); | |
5844 value.ToRegister(); | |
5845 Result start = allocator_->Allocate(); | |
5846 ASSERT(start.is_valid()); | |
5847 __ movq(SlotOperand(slot, start.reg()), value.reg()); | |
5848 // RecordWrite may destroy the value registers. | |
5849 // | |
5850 // TODO(204): Avoid actually spilling when the value is not | |
5851 // needed (probably the common case). | |
5852 frame_->Spill(value.reg()); | |
5853 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; | |
5854 Result temp = allocator_->Allocate(); | |
5855 ASSERT(temp.is_valid()); | |
5856 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg()); | |
5857 // The results start, value, and temp are unused by going out of | |
5858 // scope. | |
5859 } | |
5860 | |
5861 exit.Bind(); | |
5862 } | |
5863 } | |
5864 | |
5865 | |
5866 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( | |
5867 Slot* slot, | |
5868 TypeofState typeof_state, | |
5869 JumpTarget* slow) { | |
5870 // Check that no extension objects have been created by calls to | |
5871 // eval from the current scope to the global scope. | |
5872 Register context = rsi; | |
5873 Result tmp = allocator_->Allocate(); | |
5874 ASSERT(tmp.is_valid()); // All non-reserved registers were available. | |
5875 | |
5876 Scope* s = scope(); | |
5877 while (s != NULL) { | |
5878 if (s->num_heap_slots() > 0) { | |
5879 if (s->calls_eval()) { | |
5880 // Check that extension is NULL. | |
5881 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), | |
5882 Immediate(0)); | |
5883 slow->Branch(not_equal, not_taken); | |
5884 } | |
5885 // Load next context in chain. | |
5886 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); | |
5887 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
5888 context = tmp.reg(); | |
5889 } | |
5890 // If no outer scope calls eval, we do not need to check more | |
5891 // context extensions. If we have reached an eval scope, we check | |
5892 // all extensions from this point. | |
5893 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; | |
5894 s = s->outer_scope(); | |
5895 } | |
5896 | |
5897 if (s->is_eval_scope()) { | |
5898 // Loop up the context chain. There is no frame effect so it is | |
5899 // safe to use raw labels here. | |
5900 Label next, fast; | |
5901 if (!context.is(tmp.reg())) { | |
5902 __ movq(tmp.reg(), context); | |
5903 } | |
5904 // Load map for comparison into register, outside loop. | |
5905 __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex); | |
5906 __ bind(&next); | |
5907 // Terminate at global context. | |
5908 __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset)); | |
5909 __ j(equal, &fast); | |
5910 // Check that extension is NULL. | |
5911 __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0)); | |
5912 slow->Branch(not_equal); | |
5913 // Load next context in chain. | |
5914 __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX)); | |
5915 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
5916 __ jmp(&next); | |
5917 __ bind(&fast); | |
5918 } | |
5919 tmp.Unuse(); | |
5920 | |
5921 // All extension objects were empty and it is safe to use a global | |
5922 // load IC call. | |
5923 LoadGlobal(); | |
5924 frame_->Push(slot->var()->name()); | |
5925 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) | |
5926 ? RelocInfo::CODE_TARGET | |
5927 : RelocInfo::CODE_TARGET_CONTEXT; | |
5928 Result answer = frame_->CallLoadIC(mode); | |
5929 // A test rax instruction following the call signals that the inobject | |
5930 // property case was inlined. Ensure that there is not a test rax | |
5931 // instruction here. | |
5932 masm_->nop(); | |
5933 return answer; | |
5934 } | |
5935 | |
5936 | |
5937 void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot, | |
5938 TypeofState typeof_state, | |
5939 Result* result, | |
5940 JumpTarget* slow, | |
5941 JumpTarget* done) { | |
5942 // Generate fast-case code for variables that might be shadowed by | |
5943 // eval-introduced variables. Eval is used a lot without | |
5944 // introducing variables. In those cases, we do not want to | |
5945 // perform a runtime call for all variables in the scope | |
5946 // containing the eval. | |
5947 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { | |
5948 *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow); | |
5949 done->Jump(result); | |
5950 | |
5951 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { | |
5952 Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); | |
5953 Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); | |
5954 if (potential_slot != NULL) { | |
5955 // Generate fast case for locals that rewrite to slots. | |
5956 // Allocate a fresh register to use as a temp in | |
5957 // ContextSlotOperandCheckExtensions and to hold the result | |
5958 // value. | |
5959 *result = allocator_->Allocate(); | |
5960 ASSERT(result->is_valid()); | |
5961 __ movq(result->reg(), | |
5962 ContextSlotOperandCheckExtensions(potential_slot, | |
5963 *result, | |
5964 slow)); | |
5965 if (potential_slot->var()->mode() == Variable::CONST) { | |
5966 __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex); | |
5967 done->Branch(not_equal, result); | |
5968 __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex); | |
5969 } | |
5970 done->Jump(result); | |
5971 } else if (rewrite != NULL) { | |
5972 // Generate fast case for argument loads. | |
5973 Property* property = rewrite->AsProperty(); | |
5974 if (property != NULL) { | |
5975 VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); | |
5976 Literal* key_literal = property->key()->AsLiteral(); | |
5977 if (obj_proxy != NULL && | |
5978 key_literal != NULL && | |
5979 obj_proxy->IsArguments() && | |
5980 key_literal->handle()->IsSmi()) { | |
5981 // Load arguments object if there are no eval-introduced | |
5982 // variables. Then load the argument from the arguments | |
5983 // object using keyed load. | |
5984 Result arguments = allocator()->Allocate(); | |
5985 ASSERT(arguments.is_valid()); | |
5986 __ movq(arguments.reg(), | |
5987 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), | |
5988 arguments, | |
5989 slow)); | |
5990 frame_->Push(&arguments); | |
5991 frame_->Push(key_literal->handle()); | |
5992 *result = EmitKeyedLoad(); | |
5993 done->Jump(result); | |
5994 } | |
5995 } | |
5996 } | |
5997 } | |
5998 } | |
5999 | |
6000 | |
6001 void CodeGenerator::LoadGlobal() { | |
6002 if (in_spilled_code()) { | |
6003 frame_->EmitPush(GlobalObject()); | |
6004 } else { | |
6005 Result temp = allocator_->Allocate(); | |
6006 __ movq(temp.reg(), GlobalObject()); | |
6007 frame_->Push(&temp); | |
6008 } | |
6009 } | |
6010 | |
6011 | |
6012 void CodeGenerator::LoadGlobalReceiver() { | |
6013 Result temp = allocator_->Allocate(); | |
6014 Register reg = temp.reg(); | |
6015 __ movq(reg, GlobalObject()); | |
6016 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset)); | |
6017 frame_->Push(&temp); | |
6018 } | |
6019 | |
6020 | |
6021 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() { | |
6022 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION; | |
6023 ASSERT(scope()->arguments_shadow() != NULL); | |
6024 // We don't want to do lazy arguments allocation for functions that | |
6025 // have heap-allocated contexts, because it interfers with the | |
6026 // uninitialized const tracking in the context objects. | |
6027 return (scope()->num_heap_slots() > 0) | |
6028 ? EAGER_ARGUMENTS_ALLOCATION | |
6029 : LAZY_ARGUMENTS_ALLOCATION; | |
6030 } | |
6031 | |
6032 | |
6033 Result CodeGenerator::StoreArgumentsObject(bool initial) { | |
6034 ArgumentsAllocationMode mode = ArgumentsMode(); | |
6035 ASSERT(mode != NO_ARGUMENTS_ALLOCATION); | |
6036 | |
6037 Comment cmnt(masm_, "[ store arguments object"); | |
6038 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { | |
6039 // When using lazy arguments allocation, we store the hole value | |
6040 // as a sentinel indicating that the arguments object hasn't been | |
6041 // allocated yet. | |
6042 frame_->Push(Factory::the_hole_value()); | |
6043 } else { | |
6044 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); | |
6045 frame_->PushFunction(); | |
6046 frame_->PushReceiverSlotAddress(); | |
6047 frame_->Push(Smi::FromInt(scope()->num_parameters())); | |
6048 Result result = frame_->CallStub(&stub, 3); | |
6049 frame_->Push(&result); | |
6050 } | |
6051 | |
6052 | |
6053 Variable* arguments = scope()->arguments()->var(); | |
6054 Variable* shadow = scope()->arguments_shadow()->var(); | |
6055 ASSERT(arguments != NULL && arguments->slot() != NULL); | |
6056 ASSERT(shadow != NULL && shadow->slot() != NULL); | |
6057 JumpTarget done; | |
6058 bool skip_arguments = false; | |
6059 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { | |
6060 // We have to skip storing into the arguments slot if it has | |
6061 // already been written to. This can happen if the a function | |
6062 // has a local variable named 'arguments'. | |
6063 LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); | |
6064 Result probe = frame_->Pop(); | |
6065 if (probe.is_constant()) { | |
6066 // We have to skip updating the arguments object if it has been | |
6067 // assigned a proper value. | |
6068 skip_arguments = !probe.handle()->IsTheHole(); | |
6069 } else { | |
6070 __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex); | |
6071 probe.Unuse(); | |
6072 done.Branch(not_equal); | |
6073 } | |
6074 } | |
6075 if (!skip_arguments) { | |
6076 StoreToSlot(arguments->slot(), NOT_CONST_INIT); | |
6077 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); | |
6078 } | |
6079 StoreToSlot(shadow->slot(), NOT_CONST_INIT); | |
6080 return frame_->Pop(); | |
6081 } | |
6082 | |
6083 | |
6084 void CodeGenerator::LoadTypeofExpression(Expression* expr) { | |
6085 // Special handling of identifiers as subexpressions of typeof. | |
6086 Variable* variable = expr->AsVariableProxy()->AsVariable(); | |
6087 if (variable != NULL && !variable->is_this() && variable->is_global()) { | |
6088 // For a global variable we build the property reference | |
6089 // <global>.<variable> and perform a (regular non-contextual) property | |
6090 // load to make sure we do not get reference errors. | |
6091 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); | |
6092 Literal key(variable->name()); | |
6093 Property property(&global, &key, RelocInfo::kNoPosition); | |
6094 Reference ref(this, &property); | |
6095 ref.GetValue(); | |
6096 } else if (variable != NULL && variable->slot() != NULL) { | |
6097 // For a variable that rewrites to a slot, we signal it is the immediate | |
6098 // subexpression of a typeof. | |
6099 LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF); | |
6100 } else { | |
6101 // Anything else can be handled normally. | |
6102 Load(expr); | |
6103 } | |
6104 } | |
6105 | |
6106 | |
6107 static bool CouldBeNaN(const Result& result) { | |
6108 if (result.type_info().IsSmi()) return false; | |
6109 if (result.type_info().IsInteger32()) return false; | |
6110 if (!result.is_constant()) return true; | |
6111 if (!result.handle()->IsHeapNumber()) return false; | |
6112 return isnan(HeapNumber::cast(*result.handle())->value()); | |
6113 } | |
6114 | |
6115 | |
6116 // Convert from signed to unsigned comparison to match the way EFLAGS are set | |
6117 // by FPU and XMM compare instructions. | |
6118 static Condition DoubleCondition(Condition cc) { | |
6119 switch (cc) { | |
6120 case less: return below; | |
6121 case equal: return equal; | |
6122 case less_equal: return below_equal; | |
6123 case greater: return above; | |
6124 case greater_equal: return above_equal; | |
6125 default: UNREACHABLE(); | |
6126 } | |
6127 UNREACHABLE(); | |
6128 return equal; | |
6129 } | |
6130 | |
6131 | |
6132 void CodeGenerator::Comparison(AstNode* node, | |
6133 Condition cc, | |
6134 bool strict, | |
6135 ControlDestination* dest) { | |
6136 // Strict only makes sense for equality comparisons. | |
6137 ASSERT(!strict || cc == equal); | |
6138 | |
6139 Result left_side; | |
6140 Result right_side; | |
6141 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. | |
6142 if (cc == greater || cc == less_equal) { | |
6143 cc = ReverseCondition(cc); | |
6144 left_side = frame_->Pop(); | |
6145 right_side = frame_->Pop(); | |
6146 } else { | |
6147 right_side = frame_->Pop(); | |
6148 left_side = frame_->Pop(); | |
6149 } | |
6150 ASSERT(cc == less || cc == equal || cc == greater_equal); | |
6151 | |
6152 // If either side is a constant smi, optimize the comparison. | |
6153 bool left_side_constant_smi = false; | |
6154 bool left_side_constant_null = false; | |
6155 bool left_side_constant_1_char_string = false; | |
6156 if (left_side.is_constant()) { | |
6157 left_side_constant_smi = left_side.handle()->IsSmi(); | |
6158 left_side_constant_null = left_side.handle()->IsNull(); | |
6159 left_side_constant_1_char_string = | |
6160 (left_side.handle()->IsString() && | |
6161 String::cast(*left_side.handle())->length() == 1 && | |
6162 String::cast(*left_side.handle())->IsAsciiRepresentation()); | |
6163 } | |
6164 bool right_side_constant_smi = false; | |
6165 bool right_side_constant_null = false; | |
6166 bool right_side_constant_1_char_string = false; | |
6167 if (right_side.is_constant()) { | |
6168 right_side_constant_smi = right_side.handle()->IsSmi(); | |
6169 right_side_constant_null = right_side.handle()->IsNull(); | |
6170 right_side_constant_1_char_string = | |
6171 (right_side.handle()->IsString() && | |
6172 String::cast(*right_side.handle())->length() == 1 && | |
6173 String::cast(*right_side.handle())->IsAsciiRepresentation()); | |
6174 } | |
6175 | |
6176 if (left_side_constant_smi || right_side_constant_smi) { | |
6177 if (left_side_constant_smi && right_side_constant_smi) { | |
6178 // Trivial case, comparing two constants. | |
6179 int left_value = Smi::cast(*left_side.handle())->value(); | |
6180 int right_value = Smi::cast(*right_side.handle())->value(); | |
6181 switch (cc) { | |
6182 case less: | |
6183 dest->Goto(left_value < right_value); | |
6184 break; | |
6185 case equal: | |
6186 dest->Goto(left_value == right_value); | |
6187 break; | |
6188 case greater_equal: | |
6189 dest->Goto(left_value >= right_value); | |
6190 break; | |
6191 default: | |
6192 UNREACHABLE(); | |
6193 } | |
6194 } else { | |
6195 // Only one side is a constant Smi. | |
6196 // If left side is a constant Smi, reverse the operands. | |
6197 // Since one side is a constant Smi, conversion order does not matter. | |
6198 if (left_side_constant_smi) { | |
6199 Result temp = left_side; | |
6200 left_side = right_side; | |
6201 right_side = temp; | |
6202 cc = ReverseCondition(cc); | |
6203 // This may re-introduce greater or less_equal as the value of cc. | |
6204 // CompareStub and the inline code both support all values of cc. | |
6205 } | |
6206 // Implement comparison against a constant Smi, inlining the case | |
6207 // where both sides are Smis. | |
6208 left_side.ToRegister(); | |
6209 Register left_reg = left_side.reg(); | |
6210 Handle<Object> right_val = right_side.handle(); | |
6211 | |
6212 // Here we split control flow to the stub call and inlined cases | |
6213 // before finally splitting it to the control destination. We use | |
6214 // a jump target and branching to duplicate the virtual frame at | |
6215 // the first split. We manually handle the off-frame references | |
6216 // by reconstituting them on the non-fall-through path. | |
6217 JumpTarget is_smi; | |
6218 | |
6219 if (left_side.is_smi()) { | |
6220 if (FLAG_debug_code) { | |
6221 __ AbortIfNotSmi(left_side.reg()); | |
6222 } | |
6223 } else { | |
6224 Condition left_is_smi = masm_->CheckSmi(left_side.reg()); | |
6225 is_smi.Branch(left_is_smi); | |
6226 | |
6227 bool is_loop_condition = (node->AsExpression() != NULL) && | |
6228 node->AsExpression()->is_loop_condition(); | |
6229 if (!is_loop_condition && right_val->IsSmi()) { | |
6230 // Right side is a constant smi and left side has been checked | |
6231 // not to be a smi. | |
6232 JumpTarget not_number; | |
6233 __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset), | |
6234 Factory::heap_number_map()); | |
6235 not_number.Branch(not_equal, &left_side); | |
6236 __ movsd(xmm1, | |
6237 FieldOperand(left_reg, HeapNumber::kValueOffset)); | |
6238 int value = Smi::cast(*right_val)->value(); | |
6239 if (value == 0) { | |
6240 __ xorpd(xmm0, xmm0); | |
6241 } else { | |
6242 Result temp = allocator()->Allocate(); | |
6243 __ movl(temp.reg(), Immediate(value)); | |
6244 __ cvtlsi2sd(xmm0, temp.reg()); | |
6245 temp.Unuse(); | |
6246 } | |
6247 __ ucomisd(xmm1, xmm0); | |
6248 // Jump to builtin for NaN. | |
6249 not_number.Branch(parity_even, &left_side); | |
6250 left_side.Unuse(); | |
6251 dest->true_target()->Branch(DoubleCondition(cc)); | |
6252 dest->false_target()->Jump(); | |
6253 not_number.Bind(&left_side); | |
6254 } | |
6255 | |
6256 // Setup and call the compare stub. | |
6257 CompareStub stub(cc, strict, kCantBothBeNaN); | |
6258 Result result = frame_->CallStub(&stub, &left_side, &right_side); | |
6259 result.ToRegister(); | |
6260 __ testq(result.reg(), result.reg()); | |
6261 result.Unuse(); | |
6262 dest->true_target()->Branch(cc); | |
6263 dest->false_target()->Jump(); | |
6264 | |
6265 is_smi.Bind(); | |
6266 } | |
6267 | |
6268 left_side = Result(left_reg); | |
6269 right_side = Result(right_val); | |
6270 // Test smi equality and comparison by signed int comparison. | |
6271 // Both sides are smis, so we can use an Immediate. | |
6272 __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle())); | |
6273 left_side.Unuse(); | |
6274 right_side.Unuse(); | |
6275 dest->Split(cc); | |
6276 } | |
6277 } else if (cc == equal && | |
6278 (left_side_constant_null || right_side_constant_null)) { | |
6279 // To make null checks efficient, we check if either the left side or | |
6280 // the right side is the constant 'null'. | |
6281 // If so, we optimize the code by inlining a null check instead of | |
6282 // calling the (very) general runtime routine for checking equality. | |
6283 Result operand = left_side_constant_null ? right_side : left_side; | |
6284 right_side.Unuse(); | |
6285 left_side.Unuse(); | |
6286 operand.ToRegister(); | |
6287 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex); | |
6288 if (strict) { | |
6289 operand.Unuse(); | |
6290 dest->Split(equal); | |
6291 } else { | |
6292 // The 'null' value is only equal to 'undefined' if using non-strict | |
6293 // comparisons. | |
6294 dest->true_target()->Branch(equal); | |
6295 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex); | |
6296 dest->true_target()->Branch(equal); | |
6297 Condition is_smi = masm_->CheckSmi(operand.reg()); | |
6298 dest->false_target()->Branch(is_smi); | |
6299 | |
6300 // It can be an undetectable object. | |
6301 // Use a scratch register in preference to spilling operand.reg(). | |
6302 Result temp = allocator()->Allocate(); | |
6303 ASSERT(temp.is_valid()); | |
6304 __ movq(temp.reg(), | |
6305 FieldOperand(operand.reg(), HeapObject::kMapOffset)); | |
6306 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset), | |
6307 Immediate(1 << Map::kIsUndetectable)); | |
6308 temp.Unuse(); | |
6309 operand.Unuse(); | |
6310 dest->Split(not_zero); | |
6311 } | |
6312 } else if (left_side_constant_1_char_string || | |
6313 right_side_constant_1_char_string) { | |
6314 if (left_side_constant_1_char_string && right_side_constant_1_char_string) { | |
6315 // Trivial case, comparing two constants. | |
6316 int left_value = String::cast(*left_side.handle())->Get(0); | |
6317 int right_value = String::cast(*right_side.handle())->Get(0); | |
6318 switch (cc) { | |
6319 case less: | |
6320 dest->Goto(left_value < right_value); | |
6321 break; | |
6322 case equal: | |
6323 dest->Goto(left_value == right_value); | |
6324 break; | |
6325 case greater_equal: | |
6326 dest->Goto(left_value >= right_value); | |
6327 break; | |
6328 default: | |
6329 UNREACHABLE(); | |
6330 } | |
6331 } else { | |
6332 // Only one side is a constant 1 character string. | |
6333 // If left side is a constant 1-character string, reverse the operands. | |
6334 // Since one side is a constant string, conversion order does not matter. | |
6335 if (left_side_constant_1_char_string) { | |
6336 Result temp = left_side; | |
6337 left_side = right_side; | |
6338 right_side = temp; | |
6339 cc = ReverseCondition(cc); | |
6340 // This may reintroduce greater or less_equal as the value of cc. | |
6341 // CompareStub and the inline code both support all values of cc. | |
6342 } | |
6343 // Implement comparison against a constant string, inlining the case | |
6344 // where both sides are strings. | |
6345 left_side.ToRegister(); | |
6346 | |
6347 // Here we split control flow to the stub call and inlined cases | |
6348 // before finally splitting it to the control destination. We use | |
6349 // a jump target and branching to duplicate the virtual frame at | |
6350 // the first split. We manually handle the off-frame references | |
6351 // by reconstituting them on the non-fall-through path. | |
6352 JumpTarget is_not_string, is_string; | |
6353 Register left_reg = left_side.reg(); | |
6354 Handle<Object> right_val = right_side.handle(); | |
6355 ASSERT(StringShape(String::cast(*right_val)).IsSymbol()); | |
6356 Condition is_smi = masm()->CheckSmi(left_reg); | |
6357 is_not_string.Branch(is_smi, &left_side); | |
6358 Result temp = allocator_->Allocate(); | |
6359 ASSERT(temp.is_valid()); | |
6360 __ movq(temp.reg(), | |
6361 FieldOperand(left_reg, HeapObject::kMapOffset)); | |
6362 __ movzxbl(temp.reg(), | |
6363 FieldOperand(temp.reg(), Map::kInstanceTypeOffset)); | |
6364 // If we are testing for equality then make use of the symbol shortcut. | |
6365 // Check if the left hand side has the same type as the right hand | |
6366 // side (which is always a symbol). | |
6367 if (cc == equal) { | |
6368 Label not_a_symbol; | |
6369 ASSERT(kSymbolTag != 0); | |
6370 // Ensure that no non-strings have the symbol bit set. | |
6371 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); | |
6372 __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit. | |
6373 __ j(zero, ¬_a_symbol); | |
6374 // They are symbols, so do identity compare. | |
6375 __ Cmp(left_reg, right_side.handle()); | |
6376 dest->true_target()->Branch(equal); | |
6377 dest->false_target()->Branch(not_equal); | |
6378 __ bind(¬_a_symbol); | |
6379 } | |
6380 // Call the compare stub if the left side is not a flat ascii string. | |
6381 __ andb(temp.reg(), | |
6382 Immediate(kIsNotStringMask | | |
6383 kStringRepresentationMask | | |
6384 kStringEncodingMask)); | |
6385 __ cmpb(temp.reg(), | |
6386 Immediate(kStringTag | kSeqStringTag | kAsciiStringTag)); | |
6387 temp.Unuse(); | |
6388 is_string.Branch(equal, &left_side); | |
6389 | |
6390 // Setup and call the compare stub. | |
6391 is_not_string.Bind(&left_side); | |
6392 CompareStub stub(cc, strict, kCantBothBeNaN); | |
6393 Result result = frame_->CallStub(&stub, &left_side, &right_side); | |
6394 result.ToRegister(); | |
6395 __ testq(result.reg(), result.reg()); | |
6396 result.Unuse(); | |
6397 dest->true_target()->Branch(cc); | |
6398 dest->false_target()->Jump(); | |
6399 | |
6400 is_string.Bind(&left_side); | |
6401 // left_side is a sequential ASCII string. | |
6402 ASSERT(left_side.reg().is(left_reg)); | |
6403 right_side = Result(right_val); | |
6404 Result temp2 = allocator_->Allocate(); | |
6405 ASSERT(temp2.is_valid()); | |
6406 // Test string equality and comparison. | |
6407 if (cc == equal) { | |
6408 Label comparison_done; | |
6409 __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), | |
6410 Smi::FromInt(1)); | |
6411 __ j(not_equal, &comparison_done); | |
6412 uint8_t char_value = | |
6413 static_cast<uint8_t>(String::cast(*right_val)->Get(0)); | |
6414 __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize), | |
6415 Immediate(char_value)); | |
6416 __ bind(&comparison_done); | |
6417 } else { | |
6418 __ movq(temp2.reg(), | |
6419 FieldOperand(left_side.reg(), String::kLengthOffset)); | |
6420 __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1)); | |
6421 Label comparison; | |
6422 // If the length is 0 then the subtraction gave -1 which compares less | |
6423 // than any character. | |
6424 __ j(negative, &comparison); | |
6425 // Otherwise load the first character. | |
6426 __ movzxbl(temp2.reg(), | |
6427 FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize)); | |
6428 __ bind(&comparison); | |
6429 // Compare the first character of the string with the | |
6430 // constant 1-character string. | |
6431 uint8_t char_value = | |
6432 static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0)); | |
6433 __ cmpb(temp2.reg(), Immediate(char_value)); | |
6434 Label characters_were_different; | |
6435 __ j(not_equal, &characters_were_different); | |
6436 // If the first character is the same then the long string sorts after | |
6437 // the short one. | |
6438 __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), | |
6439 Smi::FromInt(1)); | |
6440 __ bind(&characters_were_different); | |
6441 } | |
6442 temp2.Unuse(); | |
6443 left_side.Unuse(); | |
6444 right_side.Unuse(); | |
6445 dest->Split(cc); | |
6446 } | |
6447 } else { | |
6448 // Neither side is a constant Smi, constant 1-char string, or constant null. | |
6449 // If either side is a non-smi constant, skip the smi check. | |
6450 bool known_non_smi = | |
6451 (left_side.is_constant() && !left_side.handle()->IsSmi()) || | |
6452 (right_side.is_constant() && !right_side.handle()->IsSmi()) || | |
6453 left_side.type_info().IsDouble() || | |
6454 right_side.type_info().IsDouble(); | |
6455 | |
6456 NaNInformation nan_info = | |
6457 (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ? | |
6458 kBothCouldBeNaN : | |
6459 kCantBothBeNaN; | |
6460 | |
6461 // Inline number comparison handling any combination of smi's and heap | |
6462 // numbers if: | |
6463 // code is in a loop | |
6464 // the compare operation is different from equal | |
6465 // compare is not a for-loop comparison | |
6466 // The reason for excluding equal is that it will most likely be done | |
6467 // with smi's (not heap numbers) and the code to comparing smi's is inlined | |
6468 // separately. The same reason applies for for-loop comparison which will | |
6469 // also most likely be smi comparisons. | |
6470 bool is_loop_condition = (node->AsExpression() != NULL) | |
6471 && node->AsExpression()->is_loop_condition(); | |
6472 bool inline_number_compare = | |
6473 loop_nesting() > 0 && cc != equal && !is_loop_condition; | |
6474 | |
6475 left_side.ToRegister(); | |
6476 right_side.ToRegister(); | |
6477 | |
6478 if (known_non_smi) { | |
6479 // Inlined equality check: | |
6480 // If at least one of the objects is not NaN, then if the objects | |
6481 // are identical, they are equal. | |
6482 if (nan_info == kCantBothBeNaN && cc == equal) { | |
6483 __ cmpq(left_side.reg(), right_side.reg()); | |
6484 dest->true_target()->Branch(equal); | |
6485 } | |
6486 | |
6487 // Inlined number comparison: | |
6488 if (inline_number_compare) { | |
6489 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); | |
6490 } | |
6491 | |
6492 CompareStub stub(cc, strict, nan_info, !inline_number_compare); | |
6493 Result answer = frame_->CallStub(&stub, &left_side, &right_side); | |
6494 __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag. | |
6495 answer.Unuse(); | |
6496 dest->Split(cc); | |
6497 } else { | |
6498 // Here we split control flow to the stub call and inlined cases | |
6499 // before finally splitting it to the control destination. We use | |
6500 // a jump target and branching to duplicate the virtual frame at | |
6501 // the first split. We manually handle the off-frame references | |
6502 // by reconstituting them on the non-fall-through path. | |
6503 JumpTarget is_smi; | |
6504 Register left_reg = left_side.reg(); | |
6505 Register right_reg = right_side.reg(); | |
6506 | |
6507 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg); | |
6508 is_smi.Branch(both_smi); | |
6509 | |
6510 // Inline the equality check if both operands can't be a NaN. If both | |
6511 // objects are the same they are equal. | |
6512 if (nan_info == kCantBothBeNaN && cc == equal) { | |
6513 __ cmpq(left_side.reg(), right_side.reg()); | |
6514 dest->true_target()->Branch(equal); | |
6515 } | |
6516 | |
6517 // Inlined number comparison: | |
6518 if (inline_number_compare) { | |
6519 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); | |
6520 } | |
6521 | |
6522 CompareStub stub(cc, strict, nan_info, !inline_number_compare); | |
6523 Result answer = frame_->CallStub(&stub, &left_side, &right_side); | |
6524 __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags. | |
6525 answer.Unuse(); | |
6526 dest->true_target()->Branch(cc); | |
6527 dest->false_target()->Jump(); | |
6528 | |
6529 is_smi.Bind(); | |
6530 left_side = Result(left_reg); | |
6531 right_side = Result(right_reg); | |
6532 __ SmiCompare(left_side.reg(), right_side.reg()); | |
6533 right_side.Unuse(); | |
6534 left_side.Unuse(); | |
6535 dest->Split(cc); | |
6536 } | |
6537 } | |
6538 } | |
6539 | |
6540 | |
6541 // Load a comparison operand into into a XMM register. Jump to not_numbers jump | |
6542 // target passing the left and right result if the operand is not a number. | |
6543 static void LoadComparisonOperand(MacroAssembler* masm_, | |
6544 Result* operand, | |
6545 XMMRegister xmm_reg, | |
6546 Result* left_side, | |
6547 Result* right_side, | |
6548 JumpTarget* not_numbers) { | |
6549 Label done; | |
6550 if (operand->type_info().IsDouble()) { | |
6551 // Operand is known to be a heap number, just load it. | |
6552 __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); | |
6553 } else if (operand->type_info().IsSmi()) { | |
6554 // Operand is known to be a smi. Convert it to double and keep the original | |
6555 // smi. | |
6556 __ SmiToInteger32(kScratchRegister, operand->reg()); | |
6557 __ cvtlsi2sd(xmm_reg, kScratchRegister); | |
6558 } else { | |
6559 // Operand type not known, check for smi or heap number. | |
6560 Label smi; | |
6561 __ JumpIfSmi(operand->reg(), &smi); | |
6562 if (!operand->type_info().IsNumber()) { | |
6563 __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); | |
6564 __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset), | |
6565 kScratchRegister); | |
6566 not_numbers->Branch(not_equal, left_side, right_side, taken); | |
6567 } | |
6568 __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); | |
6569 __ jmp(&done); | |
6570 | |
6571 __ bind(&smi); | |
6572 // Comvert smi to float and keep the original smi. | |
6573 __ SmiToInteger32(kScratchRegister, operand->reg()); | |
6574 __ cvtlsi2sd(xmm_reg, kScratchRegister); | |
6575 __ jmp(&done); | |
6576 } | |
6577 __ bind(&done); | |
6578 } | |
6579 | |
6580 | |
6581 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, | |
6582 Result* right_side, | |
6583 Condition cc, | |
6584 ControlDestination* dest) { | |
6585 ASSERT(left_side->is_register()); | |
6586 ASSERT(right_side->is_register()); | |
6587 | |
6588 JumpTarget not_numbers; | |
6589 // Load left and right operand into registers xmm0 and xmm1 and compare. | |
6590 LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side, | |
6591 ¬_numbers); | |
6592 LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, | |
6593 ¬_numbers); | |
6594 __ ucomisd(xmm0, xmm1); | |
6595 // Bail out if a NaN is involved. | |
6596 not_numbers.Branch(parity_even, left_side, right_side); | |
6597 | |
6598 // Split to destination targets based on comparison. | |
6599 left_side->Unuse(); | |
6600 right_side->Unuse(); | |
6601 dest->true_target()->Branch(DoubleCondition(cc)); | |
6602 dest->false_target()->Jump(); | |
6603 | |
6604 not_numbers.Bind(left_side, right_side); | |
6605 } | |
6606 | |
6607 | |
6608 class DeferredInlineBinaryOperation: public DeferredCode { | |
6609 public: | |
6610 DeferredInlineBinaryOperation(Token::Value op, | |
6611 Register dst, | |
6612 Register left, | |
6613 Register right, | |
6614 OverwriteMode mode) | |
6615 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) { | |
6616 set_comment("[ DeferredInlineBinaryOperation"); | |
6617 } | |
6618 | |
6619 virtual void Generate(); | |
6620 | |
6621 private: | |
6622 Token::Value op_; | |
6623 Register dst_; | |
6624 Register left_; | |
6625 Register right_; | |
6626 OverwriteMode mode_; | |
6627 }; | |
6628 | |
6629 | |
6630 void DeferredInlineBinaryOperation::Generate() { | |
6631 Label done; | |
6632 if ((op_ == Token::ADD) | |
6633 || (op_ == Token::SUB) | |
6634 || (op_ == Token::MUL) | |
6635 || (op_ == Token::DIV)) { | |
6636 Label call_runtime; | |
6637 Label left_smi, right_smi, load_right, do_op; | |
6638 __ JumpIfSmi(left_, &left_smi); | |
6639 __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset), | |
6640 Heap::kHeapNumberMapRootIndex); | |
6641 __ j(not_equal, &call_runtime); | |
6642 __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset)); | |
6643 if (mode_ == OVERWRITE_LEFT) { | |
6644 __ movq(dst_, left_); | |
6645 } | |
6646 __ jmp(&load_right); | |
6647 | |
6648 __ bind(&left_smi); | |
6649 __ SmiToInteger32(left_, left_); | |
6650 __ cvtlsi2sd(xmm0, left_); | |
6651 __ Integer32ToSmi(left_, left_); | |
6652 if (mode_ == OVERWRITE_LEFT) { | |
6653 Label alloc_failure; | |
6654 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); | |
6655 } | |
6656 | |
6657 __ bind(&load_right); | |
6658 __ JumpIfSmi(right_, &right_smi); | |
6659 __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset), | |
6660 Heap::kHeapNumberMapRootIndex); | |
6661 __ j(not_equal, &call_runtime); | |
6662 __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset)); | |
6663 if (mode_ == OVERWRITE_RIGHT) { | |
6664 __ movq(dst_, right_); | |
6665 } else if (mode_ == NO_OVERWRITE) { | |
6666 Label alloc_failure; | |
6667 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); | |
6668 } | |
6669 __ jmp(&do_op); | |
6670 | |
6671 __ bind(&right_smi); | |
6672 __ SmiToInteger32(right_, right_); | |
6673 __ cvtlsi2sd(xmm1, right_); | |
6674 __ Integer32ToSmi(right_, right_); | |
6675 if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) { | |
6676 Label alloc_failure; | |
6677 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); | |
6678 } | |
6679 | |
6680 __ bind(&do_op); | |
6681 switch (op_) { | |
6682 case Token::ADD: __ addsd(xmm0, xmm1); break; | |
6683 case Token::SUB: __ subsd(xmm0, xmm1); break; | |
6684 case Token::MUL: __ mulsd(xmm0, xmm1); break; | |
6685 case Token::DIV: __ divsd(xmm0, xmm1); break; | |
6686 default: UNREACHABLE(); | |
6687 } | |
6688 __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0); | |
6689 __ jmp(&done); | |
6690 | |
6691 __ bind(&call_runtime); | |
6692 } | |
6693 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB); | |
6694 stub.GenerateCall(masm_, left_, right_); | |
6695 if (!dst_.is(rax)) __ movq(dst_, rax); | |
6696 __ bind(&done); | |
6697 } | |
6698 | |
6699 | |
6700 static TypeInfo CalculateTypeInfo(TypeInfo operands_type, | |
6701 Token::Value op, | |
6702 const Result& right, | |
6703 const Result& left) { | |
6704 // Set TypeInfo of result according to the operation performed. | |
6705 // We rely on the fact that smis have a 32 bit payload on x64. | |
6706 STATIC_ASSERT(kSmiValueSize == 32); | |
6707 switch (op) { | |
6708 case Token::COMMA: | |
6709 return right.type_info(); | |
6710 case Token::OR: | |
6711 case Token::AND: | |
6712 // Result type can be either of the two input types. | |
6713 return operands_type; | |
6714 case Token::BIT_OR: | |
6715 case Token::BIT_XOR: | |
6716 case Token::BIT_AND: | |
6717 // Result is always a smi. | |
6718 return TypeInfo::Smi(); | |
6719 case Token::SAR: | |
6720 case Token::SHL: | |
6721 // Result is always a smi. | |
6722 return TypeInfo::Smi(); | |
6723 case Token::SHR: | |
6724 // Result of x >>> y is always a smi if masked y >= 1, otherwise a number. | |
6725 return (right.is_constant() && right.handle()->IsSmi() | |
6726 && (Smi::cast(*right.handle())->value() & 0x1F) >= 1) | |
6727 ? TypeInfo::Smi() | |
6728 : TypeInfo::Number(); | |
6729 case Token::ADD: | |
6730 if (operands_type.IsNumber()) { | |
6731 return TypeInfo::Number(); | |
6732 } else if (left.type_info().IsString() || right.type_info().IsString()) { | |
6733 return TypeInfo::String(); | |
6734 } else { | |
6735 return TypeInfo::Unknown(); | |
6736 } | |
6737 case Token::SUB: | |
6738 case Token::MUL: | |
6739 case Token::DIV: | |
6740 case Token::MOD: | |
6741 // Result is always a number. | |
6742 return TypeInfo::Number(); | |
6743 default: | |
6744 UNREACHABLE(); | |
6745 } | |
6746 UNREACHABLE(); | |
6747 return TypeInfo::Unknown(); | |
6748 } | |
6749 | |
6750 | |
6751 void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, | |
6752 OverwriteMode overwrite_mode) { | |
6753 Comment cmnt(masm_, "[ BinaryOperation"); | |
6754 Token::Value op = expr->op(); | |
6755 Comment cmnt_token(masm_, Token::String(op)); | |
6756 | |
6757 if (op == Token::COMMA) { | |
6758 // Simply discard left value. | |
6759 frame_->Nip(1); | |
6760 return; | |
6761 } | |
6762 | |
6763 Result right = frame_->Pop(); | |
6764 Result left = frame_->Pop(); | |
6765 | |
6766 if (op == Token::ADD) { | |
6767 const bool left_is_string = left.type_info().IsString(); | |
6768 const bool right_is_string = right.type_info().IsString(); | |
6769 // Make sure constant strings have string type info. | |
6770 ASSERT(!(left.is_constant() && left.handle()->IsString()) || | |
6771 left_is_string); | |
6772 ASSERT(!(right.is_constant() && right.handle()->IsString()) || | |
6773 right_is_string); | |
6774 if (left_is_string || right_is_string) { | |
6775 frame_->Push(&left); | |
6776 frame_->Push(&right); | |
6777 Result answer; | |
6778 if (left_is_string) { | |
6779 if (right_is_string) { | |
6780 StringAddStub stub(NO_STRING_CHECK_IN_STUB); | |
6781 answer = frame_->CallStub(&stub, 2); | |
6782 } else { | |
6783 answer = | |
6784 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2); | |
6785 } | |
6786 } else if (right_is_string) { | |
6787 answer = | |
6788 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); | |
6789 } | |
6790 answer.set_type_info(TypeInfo::String()); | |
6791 frame_->Push(&answer); | |
6792 return; | |
6793 } | |
6794 // Neither operand is known to be a string. | |
6795 } | |
6796 | |
6797 bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi(); | |
6798 bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi(); | |
6799 bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi(); | |
6800 bool right_is_non_smi_constant = | |
6801 right.is_constant() && !right.handle()->IsSmi(); | |
6802 | |
6803 if (left_is_smi_constant && right_is_smi_constant) { | |
6804 // Compute the constant result at compile time, and leave it on the frame. | |
6805 int left_int = Smi::cast(*left.handle())->value(); | |
6806 int right_int = Smi::cast(*right.handle())->value(); | |
6807 if (FoldConstantSmis(op, left_int, right_int)) return; | |
6808 } | |
6809 | |
6810 // Get number type of left and right sub-expressions. | |
6811 TypeInfo operands_type = | |
6812 TypeInfo::Combine(left.type_info(), right.type_info()); | |
6813 | |
6814 TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left); | |
6815 | |
6816 Result answer; | |
6817 if (left_is_non_smi_constant || right_is_non_smi_constant) { | |
6818 // Go straight to the slow case, with no smi code. | |
6819 GenericBinaryOpStub stub(op, | |
6820 overwrite_mode, | |
6821 NO_SMI_CODE_IN_STUB, | |
6822 operands_type); | |
6823 answer = stub.GenerateCall(masm_, frame_, &left, &right); | |
6824 } else if (right_is_smi_constant) { | |
6825 answer = ConstantSmiBinaryOperation(expr, &left, right.handle(), | |
6826 false, overwrite_mode); | |
6827 } else if (left_is_smi_constant) { | |
6828 answer = ConstantSmiBinaryOperation(expr, &right, left.handle(), | |
6829 true, overwrite_mode); | |
6830 } else { | |
6831 // Set the flags based on the operation, type and loop nesting level. | |
6832 // Bit operations always assume they likely operate on Smis. Still only | |
6833 // generate the inline Smi check code if this operation is part of a loop. | |
6834 // For all other operations only inline the Smi check code for likely smis | |
6835 // if the operation is part of a loop. | |
6836 if (loop_nesting() > 0 && | |
6837 (Token::IsBitOp(op) || | |
6838 operands_type.IsInteger32() || | |
6839 expr->type()->IsLikelySmi())) { | |
6840 answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode); | |
6841 } else { | |
6842 GenericBinaryOpStub stub(op, | |
6843 overwrite_mode, | |
6844 NO_GENERIC_BINARY_FLAGS, | |
6845 operands_type); | |
6846 answer = stub.GenerateCall(masm_, frame_, &left, &right); | |
6847 } | |
6848 } | |
6849 | |
6850 answer.set_type_info(result_type); | |
6851 frame_->Push(&answer); | |
6852 } | |
6853 | 7449 |
6854 | 7450 |
6855 // Emit a LoadIC call to get the value from receiver and leave it in | 7451 // Emit a LoadIC call to get the value from receiver and leave it in |
6856 // dst. The receiver register is restored after the call. | 7452 // dst. The receiver register is restored after the call. |
6857 class DeferredReferenceGetNamedValue: public DeferredCode { | 7453 class DeferredReferenceGetNamedValue: public DeferredCode { |
6858 public: | 7454 public: |
6859 DeferredReferenceGetNamedValue(Register dst, | 7455 DeferredReferenceGetNamedValue(Register dst, |
6860 Register receiver, | 7456 Register receiver, |
6861 Handle<String> name) | 7457 Handle<String> name) |
6862 : dst_(dst), receiver_(receiver), name_(name) { | 7458 : dst_(dst), receiver_(receiver), name_(name) { |
(...skipping 28 matching lines...) Expand all Loading... |
6891 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | 7487 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
6892 // Here we use masm_-> instead of the __ macro because this is the | 7488 // Here we use masm_-> instead of the __ macro because this is the |
6893 // instruction that gets patched and coverage code gets in the way. | 7489 // instruction that gets patched and coverage code gets in the way. |
6894 masm_->testl(rax, Immediate(-delta_to_patch_site)); | 7490 masm_->testl(rax, Immediate(-delta_to_patch_site)); |
6895 __ IncrementCounter(&Counters::named_load_inline_miss, 1); | 7491 __ IncrementCounter(&Counters::named_load_inline_miss, 1); |
6896 | 7492 |
6897 if (!dst_.is(rax)) __ movq(dst_, rax); | 7493 if (!dst_.is(rax)) __ movq(dst_, rax); |
6898 } | 7494 } |
6899 | 7495 |
6900 | 7496 |
6901 void DeferredInlineSmiAdd::Generate() { | 7497 class DeferredReferenceGetKeyedValue: public DeferredCode { |
6902 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); | 7498 public: |
6903 igostub.GenerateCall(masm_, dst_, value_); | 7499 explicit DeferredReferenceGetKeyedValue(Register dst, |
| 7500 Register receiver, |
| 7501 Register key) |
| 7502 : dst_(dst), receiver_(receiver), key_(key) { |
| 7503 set_comment("[ DeferredReferenceGetKeyedValue"); |
| 7504 } |
| 7505 |
| 7506 virtual void Generate(); |
| 7507 |
| 7508 Label* patch_site() { return &patch_site_; } |
| 7509 |
| 7510 private: |
| 7511 Label patch_site_; |
| 7512 Register dst_; |
| 7513 Register receiver_; |
| 7514 Register key_; |
| 7515 }; |
| 7516 |
| 7517 |
| 7518 void DeferredReferenceGetKeyedValue::Generate() { |
| 7519 if (receiver_.is(rdx)) { |
| 7520 if (!key_.is(rax)) { |
| 7521 __ movq(rax, key_); |
| 7522 } // else do nothing. |
| 7523 } else if (receiver_.is(rax)) { |
| 7524 if (key_.is(rdx)) { |
| 7525 __ xchg(rax, rdx); |
| 7526 } else if (key_.is(rax)) { |
| 7527 __ movq(rdx, receiver_); |
| 7528 } else { |
| 7529 __ movq(rdx, receiver_); |
| 7530 __ movq(rax, key_); |
| 7531 } |
| 7532 } else if (key_.is(rax)) { |
| 7533 __ movq(rdx, receiver_); |
| 7534 } else { |
| 7535 __ movq(rax, key_); |
| 7536 __ movq(rdx, receiver_); |
| 7537 } |
| 7538 // Calculate the delta from the IC call instruction to the map check |
| 7539 // movq instruction in the inlined version. This delta is stored in |
| 7540 // a test(rax, delta) instruction after the call so that we can find |
| 7541 // it in the IC initialization code and patch the movq instruction. |
| 7542 // This means that we cannot allow test instructions after calls to |
| 7543 // KeyedLoadIC stubs in other places. |
| 7544 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); |
| 7545 __ Call(ic, RelocInfo::CODE_TARGET); |
| 7546 // The delta from the start of the map-compare instruction to the |
| 7547 // test instruction. We use masm_-> directly here instead of the __ |
| 7548 // macro because the macro sometimes uses macro expansion to turn |
| 7549 // into something that can't return a value. This is encountered |
| 7550 // when doing generated code coverage tests. |
| 7551 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
| 7552 // Here we use masm_-> instead of the __ macro because this is the |
| 7553 // instruction that gets patched and coverage code gets in the way. |
| 7554 // TODO(X64): Consider whether it's worth switching the test to a |
| 7555 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't |
| 7556 // be generated normally. |
| 7557 masm_->testl(rax, Immediate(-delta_to_patch_site)); |
| 7558 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); |
| 7559 |
6904 if (!dst_.is(rax)) __ movq(dst_, rax); | 7560 if (!dst_.is(rax)) __ movq(dst_, rax); |
6905 } | 7561 } |
6906 | 7562 |
6907 | 7563 |
6908 void DeferredInlineSmiAddReversed::Generate() { | 7564 class DeferredReferenceSetKeyedValue: public DeferredCode { |
6909 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); | 7565 public: |
6910 igostub.GenerateCall(masm_, value_, dst_); | 7566 DeferredReferenceSetKeyedValue(Register value, |
6911 if (!dst_.is(rax)) __ movq(dst_, rax); | 7567 Register key, |
| 7568 Register receiver) |
| 7569 : value_(value), key_(key), receiver_(receiver) { |
| 7570 set_comment("[ DeferredReferenceSetKeyedValue"); |
| 7571 } |
| 7572 |
| 7573 virtual void Generate(); |
| 7574 |
| 7575 Label* patch_site() { return &patch_site_; } |
| 7576 |
| 7577 private: |
| 7578 Register value_; |
| 7579 Register key_; |
| 7580 Register receiver_; |
| 7581 Label patch_site_; |
| 7582 }; |
| 7583 |
| 7584 |
| 7585 void DeferredReferenceSetKeyedValue::Generate() { |
| 7586 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1); |
| 7587 // Move value, receiver, and key to registers rax, rdx, and rcx, as |
| 7588 // the IC stub expects. |
| 7589 // Move value to rax, using xchg if the receiver or key is in rax. |
| 7590 if (!value_.is(rax)) { |
| 7591 if (!receiver_.is(rax) && !key_.is(rax)) { |
| 7592 __ movq(rax, value_); |
| 7593 } else { |
| 7594 __ xchg(rax, value_); |
| 7595 // Update receiver_ and key_ if they are affected by the swap. |
| 7596 if (receiver_.is(rax)) { |
| 7597 receiver_ = value_; |
| 7598 } else if (receiver_.is(value_)) { |
| 7599 receiver_ = rax; |
| 7600 } |
| 7601 if (key_.is(rax)) { |
| 7602 key_ = value_; |
| 7603 } else if (key_.is(value_)) { |
| 7604 key_ = rax; |
| 7605 } |
| 7606 } |
| 7607 } |
| 7608 // Value is now in rax. Its original location is remembered in value_, |
| 7609 // and the value is restored to value_ before returning. |
| 7610 // The variables receiver_ and key_ are not preserved. |
| 7611 // Move receiver and key to rdx and rcx, swapping if necessary. |
| 7612 if (receiver_.is(rdx)) { |
| 7613 if (!key_.is(rcx)) { |
| 7614 __ movq(rcx, key_); |
| 7615 } // Else everything is already in the right place. |
| 7616 } else if (receiver_.is(rcx)) { |
| 7617 if (key_.is(rdx)) { |
| 7618 __ xchg(rcx, rdx); |
| 7619 } else if (key_.is(rcx)) { |
| 7620 __ movq(rdx, receiver_); |
| 7621 } else { |
| 7622 __ movq(rdx, receiver_); |
| 7623 __ movq(rcx, key_); |
| 7624 } |
| 7625 } else if (key_.is(rcx)) { |
| 7626 __ movq(rdx, receiver_); |
| 7627 } else { |
| 7628 __ movq(rcx, key_); |
| 7629 __ movq(rdx, receiver_); |
| 7630 } |
| 7631 |
| 7632 // Call the IC stub. |
| 7633 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); |
| 7634 __ Call(ic, RelocInfo::CODE_TARGET); |
| 7635 // The delta from the start of the map-compare instructions (initial movq) |
| 7636 // to the test instruction. We use masm_-> directly here instead of the |
| 7637 // __ macro because the macro sometimes uses macro expansion to turn |
| 7638 // into something that can't return a value. This is encountered |
| 7639 // when doing generated code coverage tests. |
| 7640 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
| 7641 // Here we use masm_-> instead of the __ macro because this is the |
| 7642 // instruction that gets patched and coverage code gets in the way. |
| 7643 masm_->testl(rax, Immediate(-delta_to_patch_site)); |
| 7644 // Restore value (returned from store IC). |
| 7645 if (!value_.is(rax)) __ movq(value_, rax); |
6912 } | 7646 } |
6913 | 7647 |
6914 | 7648 |
6915 void DeferredInlineSmiSub::Generate() { | |
6916 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB); | |
6917 igostub.GenerateCall(masm_, dst_, value_); | |
6918 if (!dst_.is(rax)) __ movq(dst_, rax); | |
6919 } | |
6920 | |
6921 | |
6922 void DeferredInlineSmiOperation::Generate() { | |
6923 // For mod we don't generate all the Smi code inline. | |
6924 GenericBinaryOpStub stub( | |
6925 op_, | |
6926 overwrite_mode_, | |
6927 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB); | |
6928 stub.GenerateCall(masm_, src_, value_); | |
6929 if (!dst_.is(rax)) __ movq(dst_, rax); | |
6930 } | |
6931 | |
6932 | |
6933 void DeferredInlineSmiOperationReversed::Generate() { | |
6934 GenericBinaryOpStub stub( | |
6935 op_, | |
6936 overwrite_mode_, | |
6937 NO_SMI_CODE_IN_STUB); | |
6938 stub.GenerateCall(masm_, value_, src_); | |
6939 if (!dst_.is(rax)) __ movq(dst_, rax); | |
6940 } | |
6941 | |
6942 | |
6943 Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, | |
6944 Result* operand, | |
6945 Handle<Object> value, | |
6946 bool reversed, | |
6947 OverwriteMode overwrite_mode) { | |
6948 // Generate inline code for a binary operation when one of the | |
6949 // operands is a constant smi. Consumes the argument "operand". | |
6950 if (IsUnsafeSmi(value)) { | |
6951 Result unsafe_operand(value); | |
6952 if (reversed) { | |
6953 return LikelySmiBinaryOperation(expr, &unsafe_operand, operand, | |
6954 overwrite_mode); | |
6955 } else { | |
6956 return LikelySmiBinaryOperation(expr, operand, &unsafe_operand, | |
6957 overwrite_mode); | |
6958 } | |
6959 } | |
6960 | |
6961 // Get the literal value. | |
6962 Smi* smi_value = Smi::cast(*value); | |
6963 int int_value = smi_value->value(); | |
6964 | |
6965 Token::Value op = expr->op(); | |
6966 Result answer; | |
6967 switch (op) { | |
6968 case Token::ADD: { | |
6969 operand->ToRegister(); | |
6970 frame_->Spill(operand->reg()); | |
6971 DeferredCode* deferred = NULL; | |
6972 if (reversed) { | |
6973 deferred = new DeferredInlineSmiAddReversed(operand->reg(), | |
6974 smi_value, | |
6975 overwrite_mode); | |
6976 } else { | |
6977 deferred = new DeferredInlineSmiAdd(operand->reg(), | |
6978 smi_value, | |
6979 overwrite_mode); | |
6980 } | |
6981 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
6982 deferred); | |
6983 __ SmiAddConstant(operand->reg(), | |
6984 operand->reg(), | |
6985 smi_value, | |
6986 deferred->entry_label()); | |
6987 deferred->BindExit(); | |
6988 answer = *operand; | |
6989 break; | |
6990 } | |
6991 | |
6992 case Token::SUB: { | |
6993 if (reversed) { | |
6994 Result constant_operand(value); | |
6995 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, | |
6996 overwrite_mode); | |
6997 } else { | |
6998 operand->ToRegister(); | |
6999 frame_->Spill(operand->reg()); | |
7000 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(), | |
7001 smi_value, | |
7002 overwrite_mode); | |
7003 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
7004 deferred); | |
7005 // A smi currently fits in a 32-bit Immediate. | |
7006 __ SmiSubConstant(operand->reg(), | |
7007 operand->reg(), | |
7008 smi_value, | |
7009 deferred->entry_label()); | |
7010 deferred->BindExit(); | |
7011 answer = *operand; | |
7012 } | |
7013 break; | |
7014 } | |
7015 | |
7016 case Token::SAR: | |
7017 if (reversed) { | |
7018 Result constant_operand(value); | |
7019 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, | |
7020 overwrite_mode); | |
7021 } else { | |
7022 // Only the least significant 5 bits of the shift value are used. | |
7023 // In the slow case, this masking is done inside the runtime call. | |
7024 int shift_value = int_value & 0x1f; | |
7025 operand->ToRegister(); | |
7026 frame_->Spill(operand->reg()); | |
7027 DeferredInlineSmiOperation* deferred = | |
7028 new DeferredInlineSmiOperation(op, | |
7029 operand->reg(), | |
7030 operand->reg(), | |
7031 smi_value, | |
7032 overwrite_mode); | |
7033 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
7034 deferred); | |
7035 __ SmiShiftArithmeticRightConstant(operand->reg(), | |
7036 operand->reg(), | |
7037 shift_value); | |
7038 deferred->BindExit(); | |
7039 answer = *operand; | |
7040 } | |
7041 break; | |
7042 | |
7043 case Token::SHR: | |
7044 if (reversed) { | |
7045 Result constant_operand(value); | |
7046 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, | |
7047 overwrite_mode); | |
7048 } else { | |
7049 // Only the least significant 5 bits of the shift value are used. | |
7050 // In the slow case, this masking is done inside the runtime call. | |
7051 int shift_value = int_value & 0x1f; | |
7052 operand->ToRegister(); | |
7053 answer = allocator()->Allocate(); | |
7054 ASSERT(answer.is_valid()); | |
7055 DeferredInlineSmiOperation* deferred = | |
7056 new DeferredInlineSmiOperation(op, | |
7057 answer.reg(), | |
7058 operand->reg(), | |
7059 smi_value, | |
7060 overwrite_mode); | |
7061 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
7062 deferred); | |
7063 __ SmiShiftLogicalRightConstant(answer.reg(), | |
7064 operand->reg(), | |
7065 shift_value, | |
7066 deferred->entry_label()); | |
7067 deferred->BindExit(); | |
7068 operand->Unuse(); | |
7069 } | |
7070 break; | |
7071 | |
7072 case Token::SHL: | |
7073 if (reversed) { | |
7074 operand->ToRegister(); | |
7075 | |
7076 // We need rcx to be available to hold operand, and to be spilled. | |
7077 // SmiShiftLeft implicitly modifies rcx. | |
7078 if (operand->reg().is(rcx)) { | |
7079 frame_->Spill(operand->reg()); | |
7080 answer = allocator()->Allocate(); | |
7081 } else { | |
7082 Result rcx_reg = allocator()->Allocate(rcx); | |
7083 // answer must not be rcx. | |
7084 answer = allocator()->Allocate(); | |
7085 // rcx_reg goes out of scope. | |
7086 } | |
7087 | |
7088 DeferredInlineSmiOperationReversed* deferred = | |
7089 new DeferredInlineSmiOperationReversed(op, | |
7090 answer.reg(), | |
7091 smi_value, | |
7092 operand->reg(), | |
7093 overwrite_mode); | |
7094 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
7095 deferred); | |
7096 | |
7097 __ Move(answer.reg(), smi_value); | |
7098 __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg()); | |
7099 operand->Unuse(); | |
7100 | |
7101 deferred->BindExit(); | |
7102 } else { | |
7103 // Only the least significant 5 bits of the shift value are used. | |
7104 // In the slow case, this masking is done inside the runtime call. | |
7105 int shift_value = int_value & 0x1f; | |
7106 operand->ToRegister(); | |
7107 if (shift_value == 0) { | |
7108 // Spill operand so it can be overwritten in the slow case. | |
7109 frame_->Spill(operand->reg()); | |
7110 DeferredInlineSmiOperation* deferred = | |
7111 new DeferredInlineSmiOperation(op, | |
7112 operand->reg(), | |
7113 operand->reg(), | |
7114 smi_value, | |
7115 overwrite_mode); | |
7116 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
7117 deferred); | |
7118 deferred->BindExit(); | |
7119 answer = *operand; | |
7120 } else { | |
7121 // Use a fresh temporary for nonzero shift values. | |
7122 answer = allocator()->Allocate(); | |
7123 ASSERT(answer.is_valid()); | |
7124 DeferredInlineSmiOperation* deferred = | |
7125 new DeferredInlineSmiOperation(op, | |
7126 answer.reg(), | |
7127 operand->reg(), | |
7128 smi_value, | |
7129 overwrite_mode); | |
7130 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
7131 deferred); | |
7132 __ SmiShiftLeftConstant(answer.reg(), | |
7133 operand->reg(), | |
7134 shift_value); | |
7135 deferred->BindExit(); | |
7136 operand->Unuse(); | |
7137 } | |
7138 } | |
7139 break; | |
7140 | |
7141 case Token::BIT_OR: | |
7142 case Token::BIT_XOR: | |
7143 case Token::BIT_AND: { | |
7144 operand->ToRegister(); | |
7145 frame_->Spill(operand->reg()); | |
7146 if (reversed) { | |
7147 // Bit operations with a constant smi are commutative. | |
7148 // We can swap left and right operands with no problem. | |
7149 // Swap left and right overwrite modes. 0->0, 1->2, 2->1. | |
7150 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3); | |
7151 } | |
7152 DeferredCode* deferred = new DeferredInlineSmiOperation(op, | |
7153 operand->reg(), | |
7154 operand->reg(), | |
7155 smi_value, | |
7156 overwrite_mode); | |
7157 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
7158 deferred); | |
7159 if (op == Token::BIT_AND) { | |
7160 __ SmiAndConstant(operand->reg(), operand->reg(), smi_value); | |
7161 } else if (op == Token::BIT_XOR) { | |
7162 if (int_value != 0) { | |
7163 __ SmiXorConstant(operand->reg(), operand->reg(), smi_value); | |
7164 } | |
7165 } else { | |
7166 ASSERT(op == Token::BIT_OR); | |
7167 if (int_value != 0) { | |
7168 __ SmiOrConstant(operand->reg(), operand->reg(), smi_value); | |
7169 } | |
7170 } | |
7171 deferred->BindExit(); | |
7172 answer = *operand; | |
7173 break; | |
7174 } | |
7175 | |
7176 // Generate inline code for mod of powers of 2 and negative powers of 2. | |
7177 case Token::MOD: | |
7178 if (!reversed && | |
7179 int_value != 0 && | |
7180 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) { | |
7181 operand->ToRegister(); | |
7182 frame_->Spill(operand->reg()); | |
7183 DeferredCode* deferred = | |
7184 new DeferredInlineSmiOperation(op, | |
7185 operand->reg(), | |
7186 operand->reg(), | |
7187 smi_value, | |
7188 overwrite_mode); | |
7189 // Check for negative or non-Smi left hand side. | |
7190 __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label()); | |
7191 if (int_value < 0) int_value = -int_value; | |
7192 if (int_value == 1) { | |
7193 __ Move(operand->reg(), Smi::FromInt(0)); | |
7194 } else { | |
7195 __ SmiAndConstant(operand->reg(), | |
7196 operand->reg(), | |
7197 Smi::FromInt(int_value - 1)); | |
7198 } | |
7199 deferred->BindExit(); | |
7200 answer = *operand; | |
7201 break; // This break only applies if we generated code for MOD. | |
7202 } | |
7203 // Fall through if we did not find a power of 2 on the right hand side! | |
7204 // The next case must be the default. | |
7205 | |
7206 default: { | |
7207 Result constant_operand(value); | |
7208 if (reversed) { | |
7209 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, | |
7210 overwrite_mode); | |
7211 } else { | |
7212 answer = LikelySmiBinaryOperation(expr, operand, &constant_operand, | |
7213 overwrite_mode); | |
7214 } | |
7215 break; | |
7216 } | |
7217 } | |
7218 ASSERT(answer.is_valid()); | |
7219 return answer; | |
7220 } | |
7221 | |
7222 | |
7223 void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg, | |
7224 TypeInfo type, | |
7225 DeferredCode* deferred) { | |
7226 if (!type.IsSmi()) { | |
7227 __ JumpIfNotSmi(reg, deferred->entry_label()); | |
7228 } | |
7229 if (FLAG_debug_code) { | |
7230 __ AbortIfNotSmi(reg); | |
7231 } | |
7232 } | |
7233 | |
7234 | |
7235 void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, | |
7236 Register right, | |
7237 TypeInfo left_info, | |
7238 TypeInfo right_info, | |
7239 DeferredCode* deferred) { | |
7240 if (!left_info.IsSmi() && !right_info.IsSmi()) { | |
7241 __ JumpIfNotBothSmi(left, right, deferred->entry_label()); | |
7242 } else if (!left_info.IsSmi()) { | |
7243 __ JumpIfNotSmi(left, deferred->entry_label()); | |
7244 } else if (!right_info.IsSmi()) { | |
7245 __ JumpIfNotSmi(right, deferred->entry_label()); | |
7246 } | |
7247 if (FLAG_debug_code) { | |
7248 __ AbortIfNotSmi(left); | |
7249 __ AbortIfNotSmi(right); | |
7250 } | |
7251 } | |
7252 | |
7253 | |
7254 // Implements a binary operation using a deferred code object and some | |
7255 // inline code to operate on smis quickly. | |
7256 Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, | |
7257 Result* left, | |
7258 Result* right, | |
7259 OverwriteMode overwrite_mode) { | |
7260 // Copy the type info because left and right may be overwritten. | |
7261 TypeInfo left_type_info = left->type_info(); | |
7262 TypeInfo right_type_info = right->type_info(); | |
7263 Token::Value op = expr->op(); | |
7264 Result answer; | |
7265 // Special handling of div and mod because they use fixed registers. | |
7266 if (op == Token::DIV || op == Token::MOD) { | |
7267 // We need rax as the quotient register, rdx as the remainder | |
7268 // register, neither left nor right in rax or rdx, and left copied | |
7269 // to rax. | |
7270 Result quotient; | |
7271 Result remainder; | |
7272 bool left_is_in_rax = false; | |
7273 // Step 1: get rax for quotient. | |
7274 if ((left->is_register() && left->reg().is(rax)) || | |
7275 (right->is_register() && right->reg().is(rax))) { | |
7276 // One or both is in rax. Use a fresh non-rdx register for | |
7277 // them. | |
7278 Result fresh = allocator_->Allocate(); | |
7279 ASSERT(fresh.is_valid()); | |
7280 if (fresh.reg().is(rdx)) { | |
7281 remainder = fresh; | |
7282 fresh = allocator_->Allocate(); | |
7283 ASSERT(fresh.is_valid()); | |
7284 } | |
7285 if (left->is_register() && left->reg().is(rax)) { | |
7286 quotient = *left; | |
7287 *left = fresh; | |
7288 left_is_in_rax = true; | |
7289 } | |
7290 if (right->is_register() && right->reg().is(rax)) { | |
7291 quotient = *right; | |
7292 *right = fresh; | |
7293 } | |
7294 __ movq(fresh.reg(), rax); | |
7295 } else { | |
7296 // Neither left nor right is in rax. | |
7297 quotient = allocator_->Allocate(rax); | |
7298 } | |
7299 ASSERT(quotient.is_register() && quotient.reg().is(rax)); | |
7300 ASSERT(!(left->is_register() && left->reg().is(rax))); | |
7301 ASSERT(!(right->is_register() && right->reg().is(rax))); | |
7302 | |
7303 // Step 2: get rdx for remainder if necessary. | |
7304 if (!remainder.is_valid()) { | |
7305 if ((left->is_register() && left->reg().is(rdx)) || | |
7306 (right->is_register() && right->reg().is(rdx))) { | |
7307 Result fresh = allocator_->Allocate(); | |
7308 ASSERT(fresh.is_valid()); | |
7309 if (left->is_register() && left->reg().is(rdx)) { | |
7310 remainder = *left; | |
7311 *left = fresh; | |
7312 } | |
7313 if (right->is_register() && right->reg().is(rdx)) { | |
7314 remainder = *right; | |
7315 *right = fresh; | |
7316 } | |
7317 __ movq(fresh.reg(), rdx); | |
7318 } else { | |
7319 // Neither left nor right is in rdx. | |
7320 remainder = allocator_->Allocate(rdx); | |
7321 } | |
7322 } | |
7323 ASSERT(remainder.is_register() && remainder.reg().is(rdx)); | |
7324 ASSERT(!(left->is_register() && left->reg().is(rdx))); | |
7325 ASSERT(!(right->is_register() && right->reg().is(rdx))); | |
7326 | |
7327 left->ToRegister(); | |
7328 right->ToRegister(); | |
7329 frame_->Spill(rax); | |
7330 frame_->Spill(rdx); | |
7331 | |
7332 // Check that left and right are smi tagged. | |
7333 DeferredInlineBinaryOperation* deferred = | |
7334 new DeferredInlineBinaryOperation(op, | |
7335 (op == Token::DIV) ? rax : rdx, | |
7336 left->reg(), | |
7337 right->reg(), | |
7338 overwrite_mode); | |
7339 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), | |
7340 left_type_info, right_type_info, deferred); | |
7341 | |
7342 if (op == Token::DIV) { | |
7343 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label()); | |
7344 deferred->BindExit(); | |
7345 left->Unuse(); | |
7346 right->Unuse(); | |
7347 answer = quotient; | |
7348 } else { | |
7349 ASSERT(op == Token::MOD); | |
7350 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label()); | |
7351 deferred->BindExit(); | |
7352 left->Unuse(); | |
7353 right->Unuse(); | |
7354 answer = remainder; | |
7355 } | |
7356 ASSERT(answer.is_valid()); | |
7357 return answer; | |
7358 } | |
7359 | |
7360 // Special handling of shift operations because they use fixed | |
7361 // registers. | |
7362 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) { | |
7363 // Move left out of rcx if necessary. | |
7364 if (left->is_register() && left->reg().is(rcx)) { | |
7365 *left = allocator_->Allocate(); | |
7366 ASSERT(left->is_valid()); | |
7367 __ movq(left->reg(), rcx); | |
7368 } | |
7369 right->ToRegister(rcx); | |
7370 left->ToRegister(); | |
7371 ASSERT(left->is_register() && !left->reg().is(rcx)); | |
7372 ASSERT(right->is_register() && right->reg().is(rcx)); | |
7373 | |
7374 // We will modify right, it must be spilled. | |
7375 frame_->Spill(rcx); | |
7376 | |
7377 // Use a fresh answer register to avoid spilling the left operand. | |
7378 answer = allocator_->Allocate(); | |
7379 ASSERT(answer.is_valid()); | |
7380 // Check that both operands are smis using the answer register as a | |
7381 // temporary. | |
7382 DeferredInlineBinaryOperation* deferred = | |
7383 new DeferredInlineBinaryOperation(op, | |
7384 answer.reg(), | |
7385 left->reg(), | |
7386 rcx, | |
7387 overwrite_mode); | |
7388 | |
7389 Label do_op; | |
7390 if (right_type_info.IsSmi()) { | |
7391 if (FLAG_debug_code) { | |
7392 __ AbortIfNotSmi(right->reg()); | |
7393 } | |
7394 __ movq(answer.reg(), left->reg()); | |
7395 // If left is not known to be a smi, check if it is. | |
7396 // If left is not known to be a number, and it isn't a smi, check if | |
7397 // it is a HeapNumber. | |
7398 if (!left_type_info.IsSmi()) { | |
7399 __ JumpIfSmi(answer.reg(), &do_op); | |
7400 if (!left_type_info.IsNumber()) { | |
7401 // Branch if not a heapnumber. | |
7402 __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset), | |
7403 Factory::heap_number_map()); | |
7404 deferred->Branch(not_equal); | |
7405 } | |
7406 // Load integer value into answer register using truncation. | |
7407 __ cvttsd2si(answer.reg(), | |
7408 FieldOperand(answer.reg(), HeapNumber::kValueOffset)); | |
7409 // Branch if we might have overflowed. | |
7410 // (False negative for Smi::kMinValue) | |
7411 __ cmpq(answer.reg(), Immediate(0x80000000)); | |
7412 deferred->Branch(equal); | |
7413 // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging. | |
7414 __ Integer32ToSmi(answer.reg(), answer.reg()); | |
7415 } else { | |
7416 // Fast case - both are actually smis. | |
7417 if (FLAG_debug_code) { | |
7418 __ AbortIfNotSmi(left->reg()); | |
7419 } | |
7420 } | |
7421 } else { | |
7422 JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx, | |
7423 left_type_info, right_type_info, deferred); | |
7424 } | |
7425 __ bind(&do_op); | |
7426 | |
7427 // Perform the operation. | |
7428 switch (op) { | |
7429 case Token::SAR: | |
7430 __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx); | |
7431 break; | |
7432 case Token::SHR: { | |
7433 __ SmiShiftLogicalRight(answer.reg(), | |
7434 left->reg(), | |
7435 rcx, | |
7436 deferred->entry_label()); | |
7437 break; | |
7438 } | |
7439 case Token::SHL: { | |
7440 __ SmiShiftLeft(answer.reg(), | |
7441 left->reg(), | |
7442 rcx); | |
7443 break; | |
7444 } | |
7445 default: | |
7446 UNREACHABLE(); | |
7447 } | |
7448 deferred->BindExit(); | |
7449 left->Unuse(); | |
7450 right->Unuse(); | |
7451 ASSERT(answer.is_valid()); | |
7452 return answer; | |
7453 } | |
7454 | |
7455 // Handle the other binary operations. | |
7456 left->ToRegister(); | |
7457 right->ToRegister(); | |
7458 // A newly allocated register answer is used to hold the answer. The | |
7459 // registers containing left and right are not modified so they don't | |
7460 // need to be spilled in the fast case. | |
7461 answer = allocator_->Allocate(); | |
7462 ASSERT(answer.is_valid()); | |
7463 | |
7464 // Perform the smi tag check. | |
7465 DeferredInlineBinaryOperation* deferred = | |
7466 new DeferredInlineBinaryOperation(op, | |
7467 answer.reg(), | |
7468 left->reg(), | |
7469 right->reg(), | |
7470 overwrite_mode); | |
7471 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), | |
7472 left_type_info, right_type_info, deferred); | |
7473 | |
7474 switch (op) { | |
7475 case Token::ADD: | |
7476 __ SmiAdd(answer.reg(), | |
7477 left->reg(), | |
7478 right->reg(), | |
7479 deferred->entry_label()); | |
7480 break; | |
7481 | |
7482 case Token::SUB: | |
7483 __ SmiSub(answer.reg(), | |
7484 left->reg(), | |
7485 right->reg(), | |
7486 deferred->entry_label()); | |
7487 break; | |
7488 | |
7489 case Token::MUL: { | |
7490 __ SmiMul(answer.reg(), | |
7491 left->reg(), | |
7492 right->reg(), | |
7493 deferred->entry_label()); | |
7494 break; | |
7495 } | |
7496 | |
7497 case Token::BIT_OR: | |
7498 __ SmiOr(answer.reg(), left->reg(), right->reg()); | |
7499 break; | |
7500 | |
7501 case Token::BIT_AND: | |
7502 __ SmiAnd(answer.reg(), left->reg(), right->reg()); | |
7503 break; | |
7504 | |
7505 case Token::BIT_XOR: | |
7506 __ SmiXor(answer.reg(), left->reg(), right->reg()); | |
7507 break; | |
7508 | |
7509 default: | |
7510 UNREACHABLE(); | |
7511 break; | |
7512 } | |
7513 deferred->BindExit(); | |
7514 left->Unuse(); | |
7515 right->Unuse(); | |
7516 ASSERT(answer.is_valid()); | |
7517 return answer; | |
7518 } | |
7519 | |
7520 | |
7521 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { | 7649 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { |
7522 #ifdef DEBUG | 7650 #ifdef DEBUG |
7523 int original_height = frame()->height(); | 7651 int original_height = frame()->height(); |
7524 #endif | 7652 #endif |
7525 Result result; | 7653 Result result; |
7526 // Do not inline the inobject property case for loads from the global | 7654 // Do not inline the inobject property case for loads from the global |
7527 // object. Also do not inline for unoptimized code. This saves time | 7655 // object. Also do not inline for unoptimized code. This saves time |
7528 // in the code generator. Unoptimized code is toplevel code or code | 7656 // in the code generator. Unoptimized code is toplevel code or code |
7529 // that is not in a loop. | 7657 // that is not in a loop. |
7530 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { | 7658 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { |
(...skipping 602 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8133 // Return 1/0 for true/false in rax. | 8261 // Return 1/0 for true/false in rax. |
8134 __ bind(&true_result); | 8262 __ bind(&true_result); |
8135 __ movq(rax, Immediate(1)); | 8263 __ movq(rax, Immediate(1)); |
8136 __ ret(1 * kPointerSize); | 8264 __ ret(1 * kPointerSize); |
8137 __ bind(&false_result); | 8265 __ bind(&false_result); |
8138 __ xor_(rax, rax); | 8266 __ xor_(rax, rax); |
8139 __ ret(1 * kPointerSize); | 8267 __ ret(1 * kPointerSize); |
8140 } | 8268 } |
8141 | 8269 |
8142 | 8270 |
8143 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { | 8271 void GenericBinaryOpStub::GenerateCall( |
8144 Object* answer_object = Heap::undefined_value(); | 8272 MacroAssembler* masm, |
8145 switch (op) { | 8273 Register left, |
| 8274 Register right) { |
| 8275 if (!ArgsInRegistersSupported()) { |
| 8276 // Pass arguments on the stack. |
| 8277 __ push(left); |
| 8278 __ push(right); |
| 8279 } else { |
| 8280 // The calling convention with registers is left in rdx and right in rax. |
| 8281 Register left_arg = rdx; |
| 8282 Register right_arg = rax; |
| 8283 if (!(left.is(left_arg) && right.is(right_arg))) { |
| 8284 if (left.is(right_arg) && right.is(left_arg)) { |
| 8285 if (IsOperationCommutative()) { |
| 8286 SetArgsReversed(); |
| 8287 } else { |
| 8288 __ xchg(left, right); |
| 8289 } |
| 8290 } else if (left.is(left_arg)) { |
| 8291 __ movq(right_arg, right); |
| 8292 } else if (right.is(right_arg)) { |
| 8293 __ movq(left_arg, left); |
| 8294 } else if (left.is(right_arg)) { |
| 8295 if (IsOperationCommutative()) { |
| 8296 __ movq(left_arg, right); |
| 8297 SetArgsReversed(); |
| 8298 } else { |
| 8299 // Order of moves important to avoid destroying left argument. |
| 8300 __ movq(left_arg, left); |
| 8301 __ movq(right_arg, right); |
| 8302 } |
| 8303 } else if (right.is(left_arg)) { |
| 8304 if (IsOperationCommutative()) { |
| 8305 __ movq(right_arg, left); |
| 8306 SetArgsReversed(); |
| 8307 } else { |
| 8308 // Order of moves important to avoid destroying right argument. |
| 8309 __ movq(right_arg, right); |
| 8310 __ movq(left_arg, left); |
| 8311 } |
| 8312 } else { |
| 8313 // Order of moves is not important. |
| 8314 __ movq(left_arg, left); |
| 8315 __ movq(right_arg, right); |
| 8316 } |
| 8317 } |
| 8318 |
| 8319 // Update flags to indicate that arguments are in registers. |
| 8320 SetArgsInRegisters(); |
| 8321 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); |
| 8322 } |
| 8323 |
| 8324 // Call the stub. |
| 8325 __ CallStub(this); |
| 8326 } |
| 8327 |
| 8328 |
| 8329 void GenericBinaryOpStub::GenerateCall( |
| 8330 MacroAssembler* masm, |
| 8331 Register left, |
| 8332 Smi* right) { |
| 8333 if (!ArgsInRegistersSupported()) { |
| 8334 // Pass arguments on the stack. |
| 8335 __ push(left); |
| 8336 __ Push(right); |
| 8337 } else { |
| 8338 // The calling convention with registers is left in rdx and right in rax. |
| 8339 Register left_arg = rdx; |
| 8340 Register right_arg = rax; |
| 8341 if (left.is(left_arg)) { |
| 8342 __ Move(right_arg, right); |
| 8343 } else if (left.is(right_arg) && IsOperationCommutative()) { |
| 8344 __ Move(left_arg, right); |
| 8345 SetArgsReversed(); |
| 8346 } else { |
| 8347 // For non-commutative operations, left and right_arg might be |
| 8348 // the same register. Therefore, the order of the moves is |
| 8349 // important here in order to not overwrite left before moving |
| 8350 // it to left_arg. |
| 8351 __ movq(left_arg, left); |
| 8352 __ Move(right_arg, right); |
| 8353 } |
| 8354 |
| 8355 // Update flags to indicate that arguments are in registers. |
| 8356 SetArgsInRegisters(); |
| 8357 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); |
| 8358 } |
| 8359 |
| 8360 // Call the stub. |
| 8361 __ CallStub(this); |
| 8362 } |
| 8363 |
| 8364 |
| 8365 void GenericBinaryOpStub::GenerateCall( |
| 8366 MacroAssembler* masm, |
| 8367 Smi* left, |
| 8368 Register right) { |
| 8369 if (!ArgsInRegistersSupported()) { |
| 8370 // Pass arguments on the stack. |
| 8371 __ Push(left); |
| 8372 __ push(right); |
| 8373 } else { |
| 8374 // The calling convention with registers is left in rdx and right in rax. |
| 8375 Register left_arg = rdx; |
| 8376 Register right_arg = rax; |
| 8377 if (right.is(right_arg)) { |
| 8378 __ Move(left_arg, left); |
| 8379 } else if (right.is(left_arg) && IsOperationCommutative()) { |
| 8380 __ Move(right_arg, left); |
| 8381 SetArgsReversed(); |
| 8382 } else { |
| 8383 // For non-commutative operations, right and left_arg might be |
| 8384 // the same register. Therefore, the order of the moves is |
| 8385 // important here in order to not overwrite right before moving |
| 8386 // it to right_arg. |
| 8387 __ movq(right_arg, right); |
| 8388 __ Move(left_arg, left); |
| 8389 } |
| 8390 // Update flags to indicate that arguments are in registers. |
| 8391 SetArgsInRegisters(); |
| 8392 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); |
| 8393 } |
| 8394 |
| 8395 // Call the stub. |
| 8396 __ CallStub(this); |
| 8397 } |
| 8398 |
| 8399 |
| 8400 Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, |
| 8401 VirtualFrame* frame, |
| 8402 Result* left, |
| 8403 Result* right) { |
| 8404 if (ArgsInRegistersSupported()) { |
| 8405 SetArgsInRegisters(); |
| 8406 return frame->CallStub(this, left, right); |
| 8407 } else { |
| 8408 frame->Push(left); |
| 8409 frame->Push(right); |
| 8410 return frame->CallStub(this, 2); |
| 8411 } |
| 8412 } |
| 8413 |
| 8414 |
| 8415 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
| 8416 // 1. Move arguments into rdx, rax except for DIV and MOD, which need the |
| 8417 // dividend in rax and rdx free for the division. Use rax, rbx for those. |
| 8418 Comment load_comment(masm, "-- Load arguments"); |
| 8419 Register left = rdx; |
| 8420 Register right = rax; |
| 8421 if (op_ == Token::DIV || op_ == Token::MOD) { |
| 8422 left = rax; |
| 8423 right = rbx; |
| 8424 if (HasArgsInRegisters()) { |
| 8425 __ movq(rbx, rax); |
| 8426 __ movq(rax, rdx); |
| 8427 } |
| 8428 } |
| 8429 if (!HasArgsInRegisters()) { |
| 8430 __ movq(right, Operand(rsp, 1 * kPointerSize)); |
| 8431 __ movq(left, Operand(rsp, 2 * kPointerSize)); |
| 8432 } |
| 8433 |
| 8434 Label not_smis; |
| 8435 // 2. Smi check both operands. |
| 8436 if (static_operands_type_.IsSmi()) { |
| 8437 // Skip smi check if we know that both arguments are smis. |
| 8438 if (FLAG_debug_code) { |
| 8439 __ AbortIfNotSmi(left); |
| 8440 __ AbortIfNotSmi(right); |
| 8441 } |
| 8442 if (op_ == Token::BIT_OR) { |
| 8443 // Handle OR here, since we do extra smi-checking in the or code below. |
| 8444 __ SmiOr(right, right, left); |
| 8445 GenerateReturn(masm); |
| 8446 return; |
| 8447 } |
| 8448 } else { |
| 8449 if (op_ != Token::BIT_OR) { |
| 8450 // Skip the check for OR as it is better combined with the |
| 8451 // actual operation. |
| 8452 Comment smi_check_comment(masm, "-- Smi check arguments"); |
| 8453 __ JumpIfNotBothSmi(left, right, ¬_smis); |
| 8454 } |
| 8455 } |
| 8456 |
| 8457 // 3. Operands are both smis (except for OR), perform the operation leaving |
| 8458 // the result in rax and check the result if necessary. |
| 8459 Comment perform_smi(masm, "-- Perform smi operation"); |
| 8460 Label use_fp_on_smis; |
| 8461 switch (op_) { |
| 8462 case Token::ADD: { |
| 8463 ASSERT(right.is(rax)); |
| 8464 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. |
| 8465 break; |
| 8466 } |
| 8467 |
| 8468 case Token::SUB: { |
| 8469 __ SmiSub(left, left, right, &use_fp_on_smis); |
| 8470 __ movq(rax, left); |
| 8471 break; |
| 8472 } |
| 8473 |
| 8474 case Token::MUL: |
| 8475 ASSERT(right.is(rax)); |
| 8476 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. |
| 8477 break; |
| 8478 |
| 8479 case Token::DIV: |
| 8480 ASSERT(left.is(rax)); |
| 8481 __ SmiDiv(left, left, right, &use_fp_on_smis); |
| 8482 break; |
| 8483 |
| 8484 case Token::MOD: |
| 8485 ASSERT(left.is(rax)); |
| 8486 __ SmiMod(left, left, right, slow); |
| 8487 break; |
| 8488 |
| 8489 case Token::BIT_OR: |
| 8490 ASSERT(right.is(rax)); |
| 8491 __ movq(rcx, right); // Save the right operand. |
| 8492 __ SmiOr(right, right, left); // BIT_OR is commutative. |
| 8493 __ testb(right, Immediate(kSmiTagMask)); |
| 8494 __ j(not_zero, ¬_smis); |
| 8495 break; |
| 8496 |
| 8497 case Token::BIT_AND: |
| 8498 ASSERT(right.is(rax)); |
| 8499 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
| 8500 break; |
| 8501 |
| 8502 case Token::BIT_XOR: |
| 8503 ASSERT(right.is(rax)); |
| 8504 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
| 8505 break; |
| 8506 |
| 8507 case Token::SHL: |
| 8508 case Token::SHR: |
| 8509 case Token::SAR: |
| 8510 switch (op_) { |
| 8511 case Token::SAR: |
| 8512 __ SmiShiftArithmeticRight(left, left, right); |
| 8513 break; |
| 8514 case Token::SHR: |
| 8515 __ SmiShiftLogicalRight(left, left, right, slow); |
| 8516 break; |
| 8517 case Token::SHL: |
| 8518 __ SmiShiftLeft(left, left, right); |
| 8519 break; |
| 8520 default: |
| 8521 UNREACHABLE(); |
| 8522 } |
| 8523 __ movq(rax, left); |
| 8524 break; |
| 8525 |
| 8526 default: |
| 8527 UNREACHABLE(); |
| 8528 break; |
| 8529 } |
| 8530 |
| 8531 // 4. Emit return of result in rax. |
| 8532 GenerateReturn(masm); |
| 8533 |
| 8534 // 5. For some operations emit inline code to perform floating point |
| 8535 // operations on known smis (e.g., if the result of the operation |
| 8536 // overflowed the smi range). |
| 8537 switch (op_) { |
8146 case Token::ADD: | 8538 case Token::ADD: |
8147 // Use intptr_t to detect overflow of 32-bit int. | |
8148 if (Smi::IsValid(static_cast<intptr_t>(left) + right)) { | |
8149 answer_object = Smi::FromInt(left + right); | |
8150 } | |
8151 break; | |
8152 case Token::SUB: | 8539 case Token::SUB: |
8153 // Use intptr_t to detect overflow of 32-bit int. | 8540 case Token::MUL: |
8154 if (Smi::IsValid(static_cast<intptr_t>(left) - right)) { | 8541 case Token::DIV: { |
8155 answer_object = Smi::FromInt(left - right); | 8542 ASSERT(use_fp_on_smis.is_linked()); |
8156 } | 8543 __ bind(&use_fp_on_smis); |
8157 break; | 8544 if (op_ == Token::DIV) { |
8158 case Token::MUL: { | 8545 __ movq(rdx, rax); |
8159 double answer = static_cast<double>(left) * right; | 8546 __ movq(rax, rbx); |
8160 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { | 8547 } |
8161 // If the product is zero and the non-zero factor is negative, | 8548 // left is rdx, right is rax. |
8162 // the spec requires us to return floating point negative zero. | 8549 __ AllocateHeapNumber(rbx, rcx, slow); |
8163 if (answer != 0 || (left + right) >= 0) { | 8550 FloatingPointHelper::LoadSSE2SmiOperands(masm); |
8164 answer_object = Smi::FromInt(static_cast<int>(answer)); | 8551 switch (op_) { |
8165 } | 8552 case Token::ADD: __ addsd(xmm0, xmm1); break; |
8166 } | 8553 case Token::SUB: __ subsd(xmm0, xmm1); break; |
8167 } | 8554 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
8168 break; | 8555 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 8556 default: UNREACHABLE(); |
| 8557 } |
| 8558 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); |
| 8559 __ movq(rax, rbx); |
| 8560 GenerateReturn(masm); |
| 8561 } |
| 8562 default: |
| 8563 break; |
| 8564 } |
| 8565 |
| 8566 // 6. Non-smi operands, fall out to the non-smi code with the operands in |
| 8567 // rdx and rax. |
| 8568 Comment done_comment(masm, "-- Enter non-smi code"); |
| 8569 __ bind(¬_smis); |
| 8570 |
| 8571 switch (op_) { |
8169 case Token::DIV: | 8572 case Token::DIV: |
8170 case Token::MOD: | 8573 case Token::MOD: |
8171 break; | 8574 // Operands are in rax, rbx at this point. |
| 8575 __ movq(rdx, rax); |
| 8576 __ movq(rax, rbx); |
| 8577 break; |
| 8578 |
8172 case Token::BIT_OR: | 8579 case Token::BIT_OR: |
8173 answer_object = Smi::FromInt(left | right); | 8580 // Right operand is saved in rcx and rax was destroyed by the smi |
| 8581 // operation. |
| 8582 __ movq(rax, rcx); |
| 8583 break; |
| 8584 |
| 8585 default: |
| 8586 break; |
| 8587 } |
| 8588 } |
| 8589 |
| 8590 |
| 8591 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
| 8592 Label call_runtime; |
| 8593 |
| 8594 if (ShouldGenerateSmiCode()) { |
| 8595 GenerateSmiCode(masm, &call_runtime); |
| 8596 } else if (op_ != Token::MOD) { |
| 8597 if (!HasArgsInRegisters()) { |
| 8598 GenerateLoadArguments(masm); |
| 8599 } |
| 8600 } |
| 8601 // Floating point case. |
| 8602 if (ShouldGenerateFPCode()) { |
| 8603 switch (op_) { |
| 8604 case Token::ADD: |
| 8605 case Token::SUB: |
| 8606 case Token::MUL: |
| 8607 case Token::DIV: { |
| 8608 if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
| 8609 HasSmiCodeInStub()) { |
| 8610 // Execution reaches this point when the first non-smi argument occurs |
| 8611 // (and only if smi code is generated). This is the right moment to |
| 8612 // patch to HEAP_NUMBERS state. The transition is attempted only for |
| 8613 // the four basic operations. The stub stays in the DEFAULT state |
| 8614 // forever for all other operations (also if smi code is skipped). |
| 8615 GenerateTypeTransition(masm); |
| 8616 break; |
| 8617 } |
| 8618 |
| 8619 Label not_floats; |
| 8620 // rax: y |
| 8621 // rdx: x |
| 8622 if (static_operands_type_.IsNumber()) { |
| 8623 if (FLAG_debug_code) { |
| 8624 // Assert at runtime that inputs are only numbers. |
| 8625 __ AbortIfNotNumber(rdx); |
| 8626 __ AbortIfNotNumber(rax); |
| 8627 } |
| 8628 FloatingPointHelper::LoadSSE2NumberOperands(masm); |
| 8629 } else { |
| 8630 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); |
| 8631 } |
| 8632 |
| 8633 switch (op_) { |
| 8634 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 8635 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 8636 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 8637 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 8638 default: UNREACHABLE(); |
| 8639 } |
| 8640 // Allocate a heap number, if needed. |
| 8641 Label skip_allocation; |
| 8642 OverwriteMode mode = mode_; |
| 8643 if (HasArgsReversed()) { |
| 8644 if (mode == OVERWRITE_RIGHT) { |
| 8645 mode = OVERWRITE_LEFT; |
| 8646 } else if (mode == OVERWRITE_LEFT) { |
| 8647 mode = OVERWRITE_RIGHT; |
| 8648 } |
| 8649 } |
| 8650 switch (mode) { |
| 8651 case OVERWRITE_LEFT: |
| 8652 __ JumpIfNotSmi(rdx, &skip_allocation); |
| 8653 __ AllocateHeapNumber(rbx, rcx, &call_runtime); |
| 8654 __ movq(rdx, rbx); |
| 8655 __ bind(&skip_allocation); |
| 8656 __ movq(rax, rdx); |
| 8657 break; |
| 8658 case OVERWRITE_RIGHT: |
| 8659 // If the argument in rax is already an object, we skip the |
| 8660 // allocation of a heap number. |
| 8661 __ JumpIfNotSmi(rax, &skip_allocation); |
| 8662 // Fall through! |
| 8663 case NO_OVERWRITE: |
| 8664 // Allocate a heap number for the result. Keep rax and rdx intact |
| 8665 // for the possible runtime call. |
| 8666 __ AllocateHeapNumber(rbx, rcx, &call_runtime); |
| 8667 __ movq(rax, rbx); |
| 8668 __ bind(&skip_allocation); |
| 8669 break; |
| 8670 default: UNREACHABLE(); |
| 8671 } |
| 8672 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 8673 GenerateReturn(masm); |
| 8674 __ bind(¬_floats); |
| 8675 if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
| 8676 !HasSmiCodeInStub()) { |
| 8677 // Execution reaches this point when the first non-number argument |
| 8678 // occurs (and only if smi code is skipped from the stub, otherwise |
| 8679 // the patching has already been done earlier in this case branch). |
| 8680 // A perfect moment to try patching to STRINGS for ADD operation. |
| 8681 if (op_ == Token::ADD) { |
| 8682 GenerateTypeTransition(masm); |
| 8683 } |
| 8684 } |
| 8685 break; |
| 8686 } |
| 8687 case Token::MOD: { |
| 8688 // For MOD we go directly to runtime in the non-smi case. |
| 8689 break; |
| 8690 } |
| 8691 case Token::BIT_OR: |
| 8692 case Token::BIT_AND: |
| 8693 case Token::BIT_XOR: |
| 8694 case Token::SAR: |
| 8695 case Token::SHL: |
| 8696 case Token::SHR: { |
| 8697 Label skip_allocation, non_smi_shr_result; |
| 8698 Register heap_number_map = r9; |
| 8699 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 8700 if (static_operands_type_.IsNumber()) { |
| 8701 if (FLAG_debug_code) { |
| 8702 // Assert at runtime that inputs are only numbers. |
| 8703 __ AbortIfNotNumber(rdx); |
| 8704 __ AbortIfNotNumber(rax); |
| 8705 } |
| 8706 FloatingPointHelper::LoadNumbersAsIntegers(masm); |
| 8707 } else { |
| 8708 FloatingPointHelper::LoadAsIntegers(masm, |
| 8709 &call_runtime, |
| 8710 heap_number_map); |
| 8711 } |
| 8712 switch (op_) { |
| 8713 case Token::BIT_OR: __ orl(rax, rcx); break; |
| 8714 case Token::BIT_AND: __ andl(rax, rcx); break; |
| 8715 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
| 8716 case Token::SAR: __ sarl_cl(rax); break; |
| 8717 case Token::SHL: __ shll_cl(rax); break; |
| 8718 case Token::SHR: { |
| 8719 __ shrl_cl(rax); |
| 8720 // Check if result is negative. This can only happen for a shift |
| 8721 // by zero. |
| 8722 __ testl(rax, rax); |
| 8723 __ j(negative, &non_smi_shr_result); |
| 8724 break; |
| 8725 } |
| 8726 default: UNREACHABLE(); |
| 8727 } |
| 8728 |
| 8729 STATIC_ASSERT(kSmiValueSize == 32); |
| 8730 // Tag smi result and return. |
| 8731 __ Integer32ToSmi(rax, rax); |
| 8732 GenerateReturn(masm); |
| 8733 |
| 8734 // All bit-ops except SHR return a signed int32 that can be |
| 8735 // returned immediately as a smi. |
| 8736 // We might need to allocate a HeapNumber if we shift a negative |
| 8737 // number right by zero (i.e., convert to UInt32). |
| 8738 if (op_ == Token::SHR) { |
| 8739 ASSERT(non_smi_shr_result.is_linked()); |
| 8740 __ bind(&non_smi_shr_result); |
| 8741 // Allocate a heap number if needed. |
| 8742 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
| 8743 switch (mode_) { |
| 8744 case OVERWRITE_LEFT: |
| 8745 case OVERWRITE_RIGHT: |
| 8746 // If the operand was an object, we skip the |
| 8747 // allocation of a heap number. |
| 8748 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? |
| 8749 1 * kPointerSize : 2 * kPointerSize)); |
| 8750 __ JumpIfNotSmi(rax, &skip_allocation); |
| 8751 // Fall through! |
| 8752 case NO_OVERWRITE: |
| 8753 // Allocate heap number in new space. |
| 8754 // Not using AllocateHeapNumber macro in order to reuse |
| 8755 // already loaded heap_number_map. |
| 8756 __ AllocateInNewSpace(HeapNumber::kSize, |
| 8757 rax, |
| 8758 rcx, |
| 8759 no_reg, |
| 8760 &call_runtime, |
| 8761 TAG_OBJECT); |
| 8762 // Set the map. |
| 8763 if (FLAG_debug_code) { |
| 8764 __ AbortIfNotRootValue(heap_number_map, |
| 8765 Heap::kHeapNumberMapRootIndex, |
| 8766 "HeapNumberMap register clobbered."); |
| 8767 } |
| 8768 __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| 8769 heap_number_map); |
| 8770 __ bind(&skip_allocation); |
| 8771 break; |
| 8772 default: UNREACHABLE(); |
| 8773 } |
| 8774 // Store the result in the HeapNumber and return. |
| 8775 __ cvtqsi2sd(xmm0, rbx); |
| 8776 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 8777 GenerateReturn(masm); |
| 8778 } |
| 8779 |
| 8780 break; |
| 8781 } |
| 8782 default: UNREACHABLE(); break; |
| 8783 } |
| 8784 } |
| 8785 |
| 8786 // If all else fails, use the runtime system to get the correct |
| 8787 // result. If arguments was passed in registers now place them on the |
| 8788 // stack in the correct order below the return address. |
| 8789 __ bind(&call_runtime); |
| 8790 |
| 8791 if (HasArgsInRegisters()) { |
| 8792 GenerateRegisterArgsPush(masm); |
| 8793 } |
| 8794 |
| 8795 switch (op_) { |
| 8796 case Token::ADD: { |
| 8797 // Registers containing left and right operands respectively. |
| 8798 Register lhs, rhs; |
| 8799 |
| 8800 if (HasArgsReversed()) { |
| 8801 lhs = rax; |
| 8802 rhs = rdx; |
| 8803 } else { |
| 8804 lhs = rdx; |
| 8805 rhs = rax; |
| 8806 } |
| 8807 |
| 8808 // Test for string arguments before calling runtime. |
| 8809 Label not_strings, both_strings, not_string1, string1, string1_smi2; |
| 8810 |
| 8811 // If this stub has already generated FP-specific code then the arguments |
| 8812 // are already in rdx and rax. |
| 8813 if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { |
| 8814 GenerateLoadArguments(masm); |
| 8815 } |
| 8816 |
| 8817 Condition is_smi; |
| 8818 is_smi = masm->CheckSmi(lhs); |
| 8819 __ j(is_smi, ¬_string1); |
| 8820 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8); |
| 8821 __ j(above_equal, ¬_string1); |
| 8822 |
| 8823 // First argument is a a string, test second. |
| 8824 is_smi = masm->CheckSmi(rhs); |
| 8825 __ j(is_smi, &string1_smi2); |
| 8826 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); |
| 8827 __ j(above_equal, &string1); |
| 8828 |
| 8829 // First and second argument are strings. |
| 8830 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
| 8831 __ TailCallStub(&string_add_stub); |
| 8832 |
| 8833 __ bind(&string1_smi2); |
| 8834 // First argument is a string, second is a smi. Try to lookup the number |
| 8835 // string for the smi in the number string cache. |
| 8836 NumberToStringStub::GenerateLookupNumberStringCache( |
| 8837 masm, rhs, rbx, rcx, r8, true, &string1); |
| 8838 |
| 8839 // Replace second argument on stack and tailcall string add stub to make |
| 8840 // the result. |
| 8841 __ movq(Operand(rsp, 1 * kPointerSize), rbx); |
| 8842 __ TailCallStub(&string_add_stub); |
| 8843 |
| 8844 // Only first argument is a string. |
| 8845 __ bind(&string1); |
| 8846 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); |
| 8847 |
| 8848 // First argument was not a string, test second. |
| 8849 __ bind(¬_string1); |
| 8850 is_smi = masm->CheckSmi(rhs); |
| 8851 __ j(is_smi, ¬_strings); |
| 8852 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs); |
| 8853 __ j(above_equal, ¬_strings); |
| 8854 |
| 8855 // Only second argument is a string. |
| 8856 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); |
| 8857 |
| 8858 __ bind(¬_strings); |
| 8859 // Neither argument is a string. |
| 8860 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
| 8861 break; |
| 8862 } |
| 8863 case Token::SUB: |
| 8864 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
| 8865 break; |
| 8866 case Token::MUL: |
| 8867 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
| 8868 break; |
| 8869 case Token::DIV: |
| 8870 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
| 8871 break; |
| 8872 case Token::MOD: |
| 8873 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
| 8874 break; |
| 8875 case Token::BIT_OR: |
| 8876 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
8174 break; | 8877 break; |
8175 case Token::BIT_AND: | 8878 case Token::BIT_AND: |
8176 answer_object = Smi::FromInt(left & right); | 8879 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
8177 break; | 8880 break; |
8178 case Token::BIT_XOR: | 8881 case Token::BIT_XOR: |
8179 answer_object = Smi::FromInt(left ^ right); | 8882 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
8180 break; | 8883 break; |
8181 | 8884 case Token::SAR: |
8182 case Token::SHL: { | 8885 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
8183 int shift_amount = right & 0x1F; | 8886 break; |
8184 if (Smi::IsValid(left << shift_amount)) { | 8887 case Token::SHL: |
8185 answer_object = Smi::FromInt(left << shift_amount); | 8888 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
8186 } | 8889 break; |
8187 break; | 8890 case Token::SHR: |
8188 } | 8891 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
8189 case Token::SHR: { | 8892 break; |
8190 int shift_amount = right & 0x1F; | |
8191 unsigned int unsigned_left = left; | |
8192 unsigned_left >>= shift_amount; | |
8193 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) { | |
8194 answer_object = Smi::FromInt(unsigned_left); | |
8195 } | |
8196 break; | |
8197 } | |
8198 case Token::SAR: { | |
8199 int shift_amount = right & 0x1F; | |
8200 unsigned int unsigned_left = left; | |
8201 if (left < 0) { | |
8202 // Perform arithmetic shift of a negative number by | |
8203 // complementing number, logical shifting, complementing again. | |
8204 unsigned_left = ~unsigned_left; | |
8205 unsigned_left >>= shift_amount; | |
8206 unsigned_left = ~unsigned_left; | |
8207 } else { | |
8208 unsigned_left >>= shift_amount; | |
8209 } | |
8210 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left))); | |
8211 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left)); | |
8212 break; | |
8213 } | |
8214 default: | 8893 default: |
8215 UNREACHABLE(); | 8894 UNREACHABLE(); |
8216 break; | 8895 } |
8217 } | 8896 } |
8218 if (answer_object == Heap::undefined_value()) { | 8897 |
8219 return false; | 8898 |
8220 } | 8899 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { |
8221 frame_->Push(Handle<Object>(answer_object)); | 8900 ASSERT(!HasArgsInRegisters()); |
8222 return true; | 8901 __ movq(rax, Operand(rsp, 1 * kPointerSize)); |
8223 } | 8902 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |
8224 | 8903 } |
8225 | 8904 |
8226 // End of CodeGenerator implementation. | 8905 |
| 8906 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { |
| 8907 // If arguments are not passed in registers remove them from the stack before |
| 8908 // returning. |
| 8909 if (!HasArgsInRegisters()) { |
| 8910 __ ret(2 * kPointerSize); // Remove both operands |
| 8911 } else { |
| 8912 __ ret(0); |
| 8913 } |
| 8914 } |
| 8915 |
| 8916 |
| 8917 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 8918 ASSERT(HasArgsInRegisters()); |
| 8919 __ pop(rcx); |
| 8920 if (HasArgsReversed()) { |
| 8921 __ push(rax); |
| 8922 __ push(rdx); |
| 8923 } else { |
| 8924 __ push(rdx); |
| 8925 __ push(rax); |
| 8926 } |
| 8927 __ push(rcx); |
| 8928 } |
| 8929 |
| 8930 |
| 8931 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 8932 Label get_result; |
| 8933 |
| 8934 // Ensure the operands are on the stack. |
| 8935 if (HasArgsInRegisters()) { |
| 8936 GenerateRegisterArgsPush(masm); |
| 8937 } |
| 8938 |
| 8939 // Left and right arguments are already on stack. |
| 8940 __ pop(rcx); // Save the return address. |
| 8941 |
| 8942 // Push this stub's key. |
| 8943 __ Push(Smi::FromInt(MinorKey())); |
| 8944 |
| 8945 // Although the operation and the type info are encoded into the key, |
| 8946 // the encoding is opaque, so push them too. |
| 8947 __ Push(Smi::FromInt(op_)); |
| 8948 |
| 8949 __ Push(Smi::FromInt(runtime_operands_type_)); |
| 8950 |
| 8951 __ push(rcx); // The return address. |
| 8952 |
| 8953 // Perform patching to an appropriate fast case and return the result. |
| 8954 __ TailCallExternalReference( |
| 8955 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), |
| 8956 5, |
| 8957 1); |
| 8958 } |
| 8959 |
| 8960 |
| 8961 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |
| 8962 GenericBinaryOpStub stub(key, type_info); |
| 8963 return stub.GetCode(); |
| 8964 } |
| 8965 |
8227 | 8966 |
8228 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 8967 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
8229 // Input on stack: | 8968 // Input on stack: |
8230 // rsp[8]: argument (should be number). | 8969 // rsp[8]: argument (should be number). |
8231 // rsp[0]: return address. | 8970 // rsp[0]: return address. |
8232 Label runtime_call; | 8971 Label runtime_call; |
8233 Label runtime_call_clear_stack; | 8972 Label runtime_call_clear_stack; |
8234 Label input_not_smi; | 8973 Label input_not_smi; |
8235 Label loaded; | 8974 Label loaded; |
8236 // Test that rax is a number. | 8975 // Test that rax is a number. |
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8494 // As the then-branch, but move double-value to result before shifting. | 9233 // As the then-branch, but move double-value to result before shifting. |
8495 __ xorl(result, double_value); | 9234 __ xorl(result, double_value); |
8496 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1)); | 9235 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1)); |
8497 __ shll_cl(result); | 9236 __ shll_cl(result); |
8498 } | 9237 } |
8499 | 9238 |
8500 __ bind(&done); | 9239 __ bind(&done); |
8501 } | 9240 } |
8502 | 9241 |
8503 | 9242 |
| 9243 // Input: rdx, rax are the left and right objects of a bit op. |
| 9244 // Output: rax, rcx are left and right integers for a bit op. |
| 9245 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { |
| 9246 // Check float operands. |
| 9247 Label done; |
| 9248 Label rax_is_smi; |
| 9249 Label rax_is_object; |
| 9250 Label rdx_is_object; |
| 9251 |
| 9252 __ JumpIfNotSmi(rdx, &rdx_is_object); |
| 9253 __ SmiToInteger32(rdx, rdx); |
| 9254 __ JumpIfSmi(rax, &rax_is_smi); |
| 9255 |
| 9256 __ bind(&rax_is_object); |
| 9257 IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx. |
| 9258 __ jmp(&done); |
| 9259 |
| 9260 __ bind(&rdx_is_object); |
| 9261 IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx. |
| 9262 __ JumpIfNotSmi(rax, &rax_is_object); |
| 9263 __ bind(&rax_is_smi); |
| 9264 __ SmiToInteger32(rcx, rax); |
| 9265 |
| 9266 __ bind(&done); |
| 9267 __ movl(rax, rdx); |
| 9268 } |
| 9269 |
| 9270 |
| 9271 // Input: rdx, rax are the left and right objects of a bit op. |
| 9272 // Output: rax, rcx are left and right integers for a bit op. |
| 9273 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
| 9274 Label* conversion_failure, |
| 9275 Register heap_number_map) { |
| 9276 // Check float operands. |
| 9277 Label arg1_is_object, check_undefined_arg1; |
| 9278 Label arg2_is_object, check_undefined_arg2; |
| 9279 Label load_arg2, done; |
| 9280 |
| 9281 __ JumpIfNotSmi(rdx, &arg1_is_object); |
| 9282 __ SmiToInteger32(rdx, rdx); |
| 9283 __ jmp(&load_arg2); |
| 9284 |
| 9285 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
| 9286 __ bind(&check_undefined_arg1); |
| 9287 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); |
| 9288 __ j(not_equal, conversion_failure); |
| 9289 __ movl(rdx, Immediate(0)); |
| 9290 __ jmp(&load_arg2); |
| 9291 |
| 9292 __ bind(&arg1_is_object); |
| 9293 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); |
| 9294 __ j(not_equal, &check_undefined_arg1); |
| 9295 // Get the untagged integer version of the edx heap number in rcx. |
| 9296 IntegerConvert(masm, rdx, rdx); |
| 9297 |
| 9298 // Here rdx has the untagged integer, rax has a Smi or a heap number. |
| 9299 __ bind(&load_arg2); |
| 9300 // Test if arg2 is a Smi. |
| 9301 __ JumpIfNotSmi(rax, &arg2_is_object); |
| 9302 __ SmiToInteger32(rax, rax); |
| 9303 __ movl(rcx, rax); |
| 9304 __ jmp(&done); |
| 9305 |
| 9306 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
| 9307 __ bind(&check_undefined_arg2); |
| 9308 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
| 9309 __ j(not_equal, conversion_failure); |
| 9310 __ movl(rcx, Immediate(0)); |
| 9311 __ jmp(&done); |
| 9312 |
| 9313 __ bind(&arg2_is_object); |
| 9314 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); |
| 9315 __ j(not_equal, &check_undefined_arg2); |
| 9316 // Get the untagged integer version of the eax heap number in ecx. |
| 9317 IntegerConvert(masm, rcx, rax); |
| 9318 __ bind(&done); |
| 9319 __ movl(rax, rdx); |
| 9320 } |
| 9321 |
| 9322 |
| 9323 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { |
| 9324 __ SmiToInteger32(kScratchRegister, rdx); |
| 9325 __ cvtlsi2sd(xmm0, kScratchRegister); |
| 9326 __ SmiToInteger32(kScratchRegister, rax); |
| 9327 __ cvtlsi2sd(xmm1, kScratchRegister); |
| 9328 } |
| 9329 |
| 9330 |
| 9331 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { |
| 9332 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; |
| 9333 // Load operand in rdx into xmm0. |
| 9334 __ JumpIfSmi(rdx, &load_smi_rdx); |
| 9335 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); |
| 9336 // Load operand in rax into xmm1. |
| 9337 __ JumpIfSmi(rax, &load_smi_rax); |
| 9338 __ bind(&load_nonsmi_rax); |
| 9339 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
| 9340 __ jmp(&done); |
| 9341 |
| 9342 __ bind(&load_smi_rdx); |
| 9343 __ SmiToInteger32(kScratchRegister, rdx); |
| 9344 __ cvtlsi2sd(xmm0, kScratchRegister); |
| 9345 __ JumpIfNotSmi(rax, &load_nonsmi_rax); |
| 9346 |
| 9347 __ bind(&load_smi_rax); |
| 9348 __ SmiToInteger32(kScratchRegister, rax); |
| 9349 __ cvtlsi2sd(xmm1, kScratchRegister); |
| 9350 |
| 9351 __ bind(&done); |
| 9352 } |
| 9353 |
| 9354 |
| 9355 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, |
| 9356 Label* not_numbers) { |
| 9357 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; |
| 9358 // Load operand in rdx into xmm0, or branch to not_numbers. |
| 9359 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); |
| 9360 __ JumpIfSmi(rdx, &load_smi_rdx); |
| 9361 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); |
| 9362 __ j(not_equal, not_numbers); // Argument in rdx is not a number. |
| 9363 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); |
| 9364 // Load operand in rax into xmm1, or branch to not_numbers. |
| 9365 __ JumpIfSmi(rax, &load_smi_rax); |
| 9366 |
| 9367 __ bind(&load_nonsmi_rax); |
| 9368 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); |
| 9369 __ j(not_equal, not_numbers); |
| 9370 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
| 9371 __ jmp(&done); |
| 9372 |
| 9373 __ bind(&load_smi_rdx); |
| 9374 __ SmiToInteger32(kScratchRegister, rdx); |
| 9375 __ cvtlsi2sd(xmm0, kScratchRegister); |
| 9376 __ JumpIfNotSmi(rax, &load_nonsmi_rax); |
| 9377 |
| 9378 __ bind(&load_smi_rax); |
| 9379 __ SmiToInteger32(kScratchRegister, rax); |
| 9380 __ cvtlsi2sd(xmm1, kScratchRegister); |
| 9381 __ bind(&done); |
| 9382 } |
| 9383 |
| 9384 |
8504 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { | 9385 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { |
8505 Label slow, done; | 9386 Label slow, done; |
8506 | 9387 |
8507 if (op_ == Token::SUB) { | 9388 if (op_ == Token::SUB) { |
8508 // Check whether the value is a smi. | 9389 // Check whether the value is a smi. |
8509 Label try_float; | 9390 Label try_float; |
8510 __ JumpIfNotSmi(rax, &try_float); | 9391 __ JumpIfNotSmi(rax, &try_float); |
8511 | 9392 |
8512 if (negative_zero_ == kIgnoreNegativeZero) { | 9393 if (negative_zero_ == kIgnoreNegativeZero) { |
8513 __ SmiCompare(rax, Smi::FromInt(0)); | 9394 __ SmiCompare(rax, Smi::FromInt(0)); |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8578 break; | 9459 break; |
8579 case Token::BIT_NOT: | 9460 case Token::BIT_NOT: |
8580 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); | 9461 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
8581 break; | 9462 break; |
8582 default: | 9463 default: |
8583 UNREACHABLE(); | 9464 UNREACHABLE(); |
8584 } | 9465 } |
8585 } | 9466 } |
8586 | 9467 |
8587 | 9468 |
| 9469 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
| 9470 // The key is in rdx and the parameter count is in rax. |
| 9471 |
| 9472 // The displacement is used for skipping the frame pointer on the |
| 9473 // stack. It is the offset of the last parameter (if any) relative |
| 9474 // to the frame pointer. |
| 9475 static const int kDisplacement = 1 * kPointerSize; |
| 9476 |
| 9477 // Check that the key is a smi. |
| 9478 Label slow; |
| 9479 __ JumpIfNotSmi(rdx, &slow); |
| 9480 |
| 9481 // Check if the calling frame is an arguments adaptor frame. |
| 9482 Label adaptor; |
| 9483 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| 9484 __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset), |
| 9485 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
| 9486 __ j(equal, &adaptor); |
| 9487 |
| 9488 // Check index against formal parameters count limit passed in |
| 9489 // through register rax. Use unsigned comparison to get negative |
| 9490 // check for free. |
| 9491 __ cmpq(rdx, rax); |
| 9492 __ j(above_equal, &slow); |
| 9493 |
| 9494 // Read the argument from the stack and return it. |
| 9495 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); |
| 9496 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0)); |
| 9497 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); |
| 9498 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); |
| 9499 __ Ret(); |
| 9500 |
| 9501 // Arguments adaptor case: Check index against actual arguments |
| 9502 // limit found in the arguments adaptor frame. Use unsigned |
| 9503 // comparison to get negative check for free. |
| 9504 __ bind(&adaptor); |
| 9505 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 9506 __ cmpq(rdx, rcx); |
| 9507 __ j(above_equal, &slow); |
| 9508 |
| 9509 // Read the argument from the stack and return it. |
| 9510 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); |
| 9511 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); |
| 9512 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); |
| 9513 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); |
| 9514 __ Ret(); |
| 9515 |
| 9516 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
| 9517 // by calling the runtime system. |
| 9518 __ bind(&slow); |
| 9519 __ pop(rbx); // Return address. |
| 9520 __ push(rdx); |
| 9521 __ push(rbx); |
| 9522 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
| 9523 } |
| 9524 |
| 9525 |
| 9526 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
| 9527 // rsp[0] : return address |
| 9528 // rsp[8] : number of parameters |
| 9529 // rsp[16] : receiver displacement |
| 9530 // rsp[24] : function |
| 9531 |
| 9532 // The displacement is used for skipping the return address and the |
| 9533 // frame pointer on the stack. It is the offset of the last |
| 9534 // parameter (if any) relative to the frame pointer. |
| 9535 static const int kDisplacement = 2 * kPointerSize; |
| 9536 |
| 9537 // Check if the calling frame is an arguments adaptor frame. |
| 9538 Label adaptor_frame, try_allocate, runtime; |
| 9539 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| 9540 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset), |
| 9541 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
| 9542 __ j(equal, &adaptor_frame); |
| 9543 |
| 9544 // Get the length from the frame. |
| 9545 __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); |
| 9546 __ jmp(&try_allocate); |
| 9547 |
| 9548 // Patch the arguments.length and the parameters pointer. |
| 9549 __ bind(&adaptor_frame); |
| 9550 __ SmiToInteger32(rcx, |
| 9551 Operand(rdx, |
| 9552 ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 9553 // Space on stack must already hold a smi. |
| 9554 __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx); |
| 9555 // Do not clobber the length index for the indexing operation since |
| 9556 // it is used compute the size for allocation later. |
| 9557 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement)); |
| 9558 __ movq(Operand(rsp, 2 * kPointerSize), rdx); |
| 9559 |
| 9560 // Try the new space allocation. Start out with computing the size of |
| 9561 // the arguments object and the elements array. |
| 9562 Label add_arguments_object; |
| 9563 __ bind(&try_allocate); |
| 9564 __ testl(rcx, rcx); |
| 9565 __ j(zero, &add_arguments_object); |
| 9566 __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); |
| 9567 __ bind(&add_arguments_object); |
| 9568 __ addl(rcx, Immediate(Heap::kArgumentsObjectSize)); |
| 9569 |
| 9570 // Do the allocation of both objects in one go. |
| 9571 __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); |
| 9572 |
| 9573 // Get the arguments boilerplate from the current (global) context. |
| 9574 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); |
| 9575 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 9576 __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset)); |
| 9577 __ movq(rdi, Operand(rdi, offset)); |
| 9578 |
| 9579 // Copy the JS object part. |
| 9580 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); |
| 9581 __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize)); |
| 9582 __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize)); |
| 9583 __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize)); |
| 9584 __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister); |
| 9585 __ movq(FieldOperand(rax, 1 * kPointerSize), rdx); |
| 9586 __ movq(FieldOperand(rax, 2 * kPointerSize), rbx); |
| 9587 |
| 9588 // Setup the callee in-object property. |
| 9589 ASSERT(Heap::arguments_callee_index == 0); |
| 9590 __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize)); |
| 9591 __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister); |
| 9592 |
| 9593 // Get the length (smi tagged) and set that as an in-object property too. |
| 9594 ASSERT(Heap::arguments_length_index == 1); |
| 9595 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
| 9596 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); |
| 9597 |
| 9598 // If there are no actual arguments, we're done. |
| 9599 Label done; |
| 9600 __ SmiTest(rcx); |
| 9601 __ j(zero, &done); |
| 9602 |
| 9603 // Get the parameters pointer from the stack and untag the length. |
| 9604 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |
| 9605 |
| 9606 // Setup the elements pointer in the allocated arguments object and |
| 9607 // initialize the header in the elements fixed array. |
| 9608 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); |
| 9609 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); |
| 9610 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); |
| 9611 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); |
| 9612 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); |
| 9613 __ SmiToInteger32(rcx, rcx); // Untag length for the loop below. |
| 9614 |
| 9615 // Copy the fixed array slots. |
| 9616 Label loop; |
| 9617 __ bind(&loop); |
| 9618 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. |
| 9619 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); |
| 9620 __ addq(rdi, Immediate(kPointerSize)); |
| 9621 __ subq(rdx, Immediate(kPointerSize)); |
| 9622 __ decl(rcx); |
| 9623 __ j(not_zero, &loop); |
| 9624 |
| 9625 // Return and remove the on-stack parameters. |
| 9626 __ bind(&done); |
| 9627 __ ret(3 * kPointerSize); |
| 9628 |
| 9629 // Do the runtime call to allocate the arguments object. |
| 9630 __ bind(&runtime); |
| 9631 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
| 9632 } |
| 9633 |
| 9634 |
8588 void RegExpExecStub::Generate(MacroAssembler* masm) { | 9635 void RegExpExecStub::Generate(MacroAssembler* masm) { |
8589 // Just jump directly to runtime if native RegExp is not selected at compile | 9636 // Just jump directly to runtime if native RegExp is not selected at compile |
8590 // time or if regexp entry in generated code is turned off runtime switch or | 9637 // time or if regexp entry in generated code is turned off runtime switch or |
8591 // at compilation. | 9638 // at compilation. |
8592 #ifdef V8_INTERPRETED_REGEXP | 9639 #ifdef V8_INTERPRETED_REGEXP |
8593 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 9640 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
8594 #else // V8_INTERPRETED_REGEXP | 9641 #else // V8_INTERPRETED_REGEXP |
8595 if (!FLAG_regexp_entry_native) { | 9642 if (!FLAG_regexp_entry_native) { |
8596 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 9643 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
8597 return; | 9644 return; |
(...skipping 327 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8925 __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); | 9972 __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); |
8926 __ ret(4 * kPointerSize); | 9973 __ ret(4 * kPointerSize); |
8927 | 9974 |
8928 // Do the runtime call to execute the regexp. | 9975 // Do the runtime call to execute the regexp. |
8929 __ bind(&runtime); | 9976 __ bind(&runtime); |
8930 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 9977 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
8931 #endif // V8_INTERPRETED_REGEXP | 9978 #endif // V8_INTERPRETED_REGEXP |
8932 } | 9979 } |
8933 | 9980 |
8934 | 9981 |
8935 void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm, | |
8936 Register hash, | |
8937 Register mask) { | |
8938 __ and_(hash, mask); | |
8939 // Each entry in string cache consists of two pointer sized fields, | |
8940 // but times_twice_pointer_size (multiplication by 16) scale factor | |
8941 // is not supported by addrmode on x64 platform. | |
8942 // So we have to premultiply entry index before lookup. | |
8943 __ shl(hash, Immediate(kPointerSizeLog2 + 1)); | |
8944 } | |
8945 | |
8946 | |
8947 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, | 9982 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
8948 Register object, | 9983 Register object, |
8949 Register result, | 9984 Register result, |
8950 Register scratch1, | 9985 Register scratch1, |
8951 Register scratch2, | 9986 Register scratch2, |
8952 bool object_is_smi, | 9987 bool object_is_smi, |
8953 Label* not_found) { | 9988 Label* not_found) { |
8954 // Use of registers. Register result is used as a temporary. | 9989 // Use of registers. Register result is used as a temporary. |
8955 Register number_string_cache = result; | 9990 Register number_string_cache = result; |
8956 Register mask = scratch1; | 9991 Register mask = scratch1; |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9016 __ bind(&load_result_from_cache); | 10051 __ bind(&load_result_from_cache); |
9017 __ movq(result, | 10052 __ movq(result, |
9018 FieldOperand(number_string_cache, | 10053 FieldOperand(number_string_cache, |
9019 index, | 10054 index, |
9020 times_1, | 10055 times_1, |
9021 FixedArray::kHeaderSize + kPointerSize)); | 10056 FixedArray::kHeaderSize + kPointerSize)); |
9022 __ IncrementCounter(&Counters::number_to_string_native, 1); | 10057 __ IncrementCounter(&Counters::number_to_string_native, 1); |
9023 } | 10058 } |
9024 | 10059 |
9025 | 10060 |
| 10061 void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm, |
| 10062 Register hash, |
| 10063 Register mask) { |
| 10064 __ and_(hash, mask); |
| 10065 // Each entry in string cache consists of two pointer sized fields, |
| 10066 // but times_twice_pointer_size (multiplication by 16) scale factor |
| 10067 // is not supported by addrmode on x64 platform. |
| 10068 // So we have to premultiply entry index before lookup. |
| 10069 __ shl(hash, Immediate(kPointerSizeLog2 + 1)); |
| 10070 } |
| 10071 |
| 10072 |
9026 void NumberToStringStub::Generate(MacroAssembler* masm) { | 10073 void NumberToStringStub::Generate(MacroAssembler* masm) { |
9027 Label runtime; | 10074 Label runtime; |
9028 | 10075 |
9029 __ movq(rbx, Operand(rsp, kPointerSize)); | 10076 __ movq(rbx, Operand(rsp, kPointerSize)); |
9030 | 10077 |
9031 // Generate code to lookup number in the number string cache. | 10078 // Generate code to lookup number in the number string cache. |
9032 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime); | 10079 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime); |
9033 __ ret(1 * kPointerSize); | 10080 __ ret(1 * kPointerSize); |
9034 | 10081 |
9035 __ bind(&runtime); | 10082 __ bind(&runtime); |
9036 // Handle number to string in the runtime system if not found in the cache. | 10083 // Handle number to string in the runtime system if not found in the cache. |
9037 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); | 10084 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); |
9038 } | 10085 } |
9039 | 10086 |
9040 | 10087 |
9041 void RecordWriteStub::Generate(MacroAssembler* masm) { | |
9042 masm->RecordWriteHelper(object_, addr_, scratch_); | |
9043 masm->ret(0); | |
9044 } | |
9045 | |
9046 | |
9047 static int NegativeComparisonResult(Condition cc) { | 10088 static int NegativeComparisonResult(Condition cc) { |
9048 ASSERT(cc != equal); | 10089 ASSERT(cc != equal); |
9049 ASSERT((cc == less) || (cc == less_equal) | 10090 ASSERT((cc == less) || (cc == less_equal) |
9050 || (cc == greater) || (cc == greater_equal)); | 10091 || (cc == greater) || (cc == greater_equal)); |
9051 return (cc == greater || cc == greater_equal) ? LESS : GREATER; | 10092 return (cc == greater || cc == greater_equal) ? LESS : GREATER; |
9052 } | 10093 } |
9053 | 10094 |
9054 | 10095 |
9055 void CompareStub::Generate(MacroAssembler* masm) { | 10096 void CompareStub::Generate(MacroAssembler* masm) { |
9056 Label check_unequal_objects, done; | 10097 Label check_unequal_objects, done; |
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9312 __ movzxbq(scratch, | 10353 __ movzxbq(scratch, |
9313 FieldOperand(scratch, Map::kInstanceTypeOffset)); | 10354 FieldOperand(scratch, Map::kInstanceTypeOffset)); |
9314 // Ensure that no non-strings have the symbol bit set. | 10355 // Ensure that no non-strings have the symbol bit set. |
9315 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); | 10356 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); |
9316 ASSERT(kSymbolTag != 0); | 10357 ASSERT(kSymbolTag != 0); |
9317 __ testb(scratch, Immediate(kIsSymbolMask)); | 10358 __ testb(scratch, Immediate(kIsSymbolMask)); |
9318 __ j(zero, label); | 10359 __ j(zero, label); |
9319 } | 10360 } |
9320 | 10361 |
9321 | 10362 |
9322 // Call the function just below TOS on the stack with the given | 10363 void StackCheckStub::Generate(MacroAssembler* masm) { |
9323 // arguments. The receiver is the TOS. | 10364 // Because builtins always remove the receiver from the stack, we |
9324 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, | 10365 // have to fake one to avoid underflowing the stack. The receiver |
9325 CallFunctionFlags flags, | 10366 // must be inserted below the return address on the stack so we |
9326 int position) { | 10367 // temporarily store that in a register. |
9327 // Push the arguments ("left-to-right") on the stack. | 10368 __ pop(rax); |
9328 int arg_count = args->length(); | 10369 __ Push(Smi::FromInt(0)); |
9329 for (int i = 0; i < arg_count; i++) { | 10370 __ push(rax); |
9330 Load(args->at(i)); | |
9331 frame_->SpillTop(); | |
9332 } | |
9333 | 10371 |
9334 // Record the position for debugging purposes. | 10372 // Do tail-call to runtime routine. |
9335 CodeForSourcePosition(position); | 10373 __ TailCallRuntime(Runtime::kStackGuard, 1, 1); |
9336 | |
9337 // Use the shared code stub to call the function. | |
9338 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; | |
9339 CallFunctionStub call_function(arg_count, in_loop, flags); | |
9340 Result answer = frame_->CallStub(&call_function, arg_count + 1); | |
9341 // Restore context and replace function on the stack with the | |
9342 // result of the stub invocation. | |
9343 frame_->RestoreContextRegister(); | |
9344 frame_->SetElementAt(0, &answer); | |
9345 } | 10374 } |
9346 | 10375 |
9347 | 10376 |
9348 void InstanceofStub::Generate(MacroAssembler* masm) { | 10377 void CallFunctionStub::Generate(MacroAssembler* masm) { |
9349 // Implements "value instanceof function" operator. | 10378 Label slow; |
9350 // Expected input state: | |
9351 // rsp[0] : return address | |
9352 // rsp[1] : function pointer | |
9353 // rsp[2] : value | |
9354 // Returns a bitwise zero to indicate that the value | |
9355 // is and instance of the function and anything else to | |
9356 // indicate that the value is not an instance. | |
9357 | 10379 |
9358 // Get the object - go slow case if it's a smi. | 10380 // If the receiver might be a value (string, number or boolean) check for this |
9359 Label slow; | 10381 // and box it if it is. |
9360 __ movq(rax, Operand(rsp, 2 * kPointerSize)); | 10382 if (ReceiverMightBeValue()) { |
9361 __ JumpIfSmi(rax, &slow); | 10383 // Get the receiver from the stack. |
| 10384 // +1 ~ return address |
| 10385 Label receiver_is_value, receiver_is_js_object; |
| 10386 __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); |
9362 | 10387 |
9363 // Check that the left hand is a JS object. Leave its map in rax. | 10388 // Check if receiver is a smi (which is a number value). |
9364 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); | 10389 __ JumpIfSmi(rax, &receiver_is_value); |
9365 __ j(below, &slow); | |
9366 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE); | |
9367 __ j(above, &slow); | |
9368 | 10390 |
9369 // Get the prototype of the function. | 10391 // Check if the receiver is a valid JS object. |
9370 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); | 10392 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi); |
9371 // rdx is function, rax is map. | 10393 __ j(above_equal, &receiver_is_js_object); |
9372 | 10394 |
9373 // Look up the function and the map in the instanceof cache. | 10395 // Call the runtime to box the value. |
9374 Label miss; | 10396 __ bind(&receiver_is_value); |
9375 __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); | 10397 __ EnterInternalFrame(); |
9376 __ j(not_equal, &miss); | 10398 __ push(rax); |
9377 __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); | 10399 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
9378 __ j(not_equal, &miss); | 10400 __ LeaveInternalFrame(); |
9379 __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); | 10401 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax); |
9380 __ ret(2 * kPointerSize); | |
9381 | 10402 |
9382 __ bind(&miss); | 10403 __ bind(&receiver_is_js_object); |
9383 __ TryGetFunctionPrototype(rdx, rbx, &slow); | 10404 } |
9384 | 10405 |
9385 // Check that the function prototype is a JS object. | 10406 // Get the function to call from the stack. |
9386 __ JumpIfSmi(rbx, &slow); | 10407 // +2 ~ receiver, return address |
9387 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister); | 10408 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize)); |
9388 __ j(below, &slow); | |
9389 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); | |
9390 __ j(above, &slow); | |
9391 | 10409 |
9392 // Register mapping: | 10410 // Check that the function really is a JavaScript function. |
9393 // rax is object map. | 10411 __ JumpIfSmi(rdi, &slow); |
9394 // rdx is function. | 10412 // Goto slow case if we do not have a function. |
9395 // rbx is function prototype. | 10413 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); |
9396 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); | 10414 __ j(not_equal, &slow); |
9397 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); | |
9398 | 10415 |
9399 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); | 10416 // Fast-case: Just invoke the function. |
| 10417 ParameterCount actual(argc_); |
| 10418 __ InvokeFunction(rdi, actual, JUMP_FUNCTION); |
9400 | 10419 |
9401 // Loop through the prototype chain looking for the function prototype. | 10420 // Slow-case: Non-function called. |
9402 Label loop, is_instance, is_not_instance; | |
9403 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); | |
9404 __ bind(&loop); | |
9405 __ cmpq(rcx, rbx); | |
9406 __ j(equal, &is_instance); | |
9407 __ cmpq(rcx, kScratchRegister); | |
9408 // The code at is_not_instance assumes that kScratchRegister contains a | |
9409 // non-zero GCable value (the null object in this case). | |
9410 __ j(equal, &is_not_instance); | |
9411 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); | |
9412 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset)); | |
9413 __ jmp(&loop); | |
9414 | |
9415 __ bind(&is_instance); | |
9416 __ xorl(rax, rax); | |
9417 // Store bitwise zero in the cache. This is a Smi in GC terms. | |
9418 ASSERT_EQ(0, kSmiTag); | |
9419 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); | |
9420 __ ret(2 * kPointerSize); | |
9421 | |
9422 __ bind(&is_not_instance); | |
9423 // We have to store a non-zero value in the cache. | |
9424 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); | |
9425 __ ret(2 * kPointerSize); | |
9426 | |
9427 // Slow-case: Go through the JavaScript implementation. | |
9428 __ bind(&slow); | 10421 __ bind(&slow); |
9429 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 10422 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
9430 } | 10423 // of the original receiver from the call site). |
9431 | 10424 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); |
9432 | 10425 __ Set(rax, argc_); |
9433 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { | 10426 __ Set(rbx, 0); |
9434 // rsp[0] : return address | 10427 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); |
9435 // rsp[8] : number of parameters | 10428 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); |
9436 // rsp[16] : receiver displacement | 10429 __ Jump(adaptor, RelocInfo::CODE_TARGET); |
9437 // rsp[24] : function | |
9438 | |
9439 // The displacement is used for skipping the return address and the | |
9440 // frame pointer on the stack. It is the offset of the last | |
9441 // parameter (if any) relative to the frame pointer. | |
9442 static const int kDisplacement = 2 * kPointerSize; | |
9443 | |
9444 // Check if the calling frame is an arguments adaptor frame. | |
9445 Label adaptor_frame, try_allocate, runtime; | |
9446 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | |
9447 __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset), | |
9448 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
9449 __ j(equal, &adaptor_frame); | |
9450 | |
9451 // Get the length from the frame. | |
9452 __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); | |
9453 __ jmp(&try_allocate); | |
9454 | |
9455 // Patch the arguments.length and the parameters pointer. | |
9456 __ bind(&adaptor_frame); | |
9457 __ SmiToInteger32(rcx, | |
9458 Operand(rdx, | |
9459 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
9460 // Space on stack must already hold a smi. | |
9461 __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx); | |
9462 // Do not clobber the length index for the indexing operation since | |
9463 // it is used compute the size for allocation later. | |
9464 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement)); | |
9465 __ movq(Operand(rsp, 2 * kPointerSize), rdx); | |
9466 | |
9467 // Try the new space allocation. Start out with computing the size of | |
9468 // the arguments object and the elements array. | |
9469 Label add_arguments_object; | |
9470 __ bind(&try_allocate); | |
9471 __ testl(rcx, rcx); | |
9472 __ j(zero, &add_arguments_object); | |
9473 __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); | |
9474 __ bind(&add_arguments_object); | |
9475 __ addl(rcx, Immediate(Heap::kArgumentsObjectSize)); | |
9476 | |
9477 // Do the allocation of both objects in one go. | |
9478 __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); | |
9479 | |
9480 // Get the arguments boilerplate from the current (global) context. | |
9481 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); | |
9482 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
9483 __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset)); | |
9484 __ movq(rdi, Operand(rdi, offset)); | |
9485 | |
9486 // Copy the JS object part. | |
9487 STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); | |
9488 __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize)); | |
9489 __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize)); | |
9490 __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize)); | |
9491 __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister); | |
9492 __ movq(FieldOperand(rax, 1 * kPointerSize), rdx); | |
9493 __ movq(FieldOperand(rax, 2 * kPointerSize), rbx); | |
9494 | |
9495 // Setup the callee in-object property. | |
9496 ASSERT(Heap::arguments_callee_index == 0); | |
9497 __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize)); | |
9498 __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister); | |
9499 | |
9500 // Get the length (smi tagged) and set that as an in-object property too. | |
9501 ASSERT(Heap::arguments_length_index == 1); | |
9502 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | |
9503 __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx); | |
9504 | |
9505 // If there are no actual arguments, we're done. | |
9506 Label done; | |
9507 __ SmiTest(rcx); | |
9508 __ j(zero, &done); | |
9509 | |
9510 // Get the parameters pointer from the stack and untag the length. | |
9511 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | |
9512 | |
9513 // Setup the elements pointer in the allocated arguments object and | |
9514 // initialize the header in the elements fixed array. | |
9515 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize)); | |
9516 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); | |
9517 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); | |
9518 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); | |
9519 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); | |
9520 __ SmiToInteger32(rcx, rcx); // Untag length for the loop below. | |
9521 | |
9522 // Copy the fixed array slots. | |
9523 Label loop; | |
9524 __ bind(&loop); | |
9525 __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver. | |
9526 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); | |
9527 __ addq(rdi, Immediate(kPointerSize)); | |
9528 __ subq(rdx, Immediate(kPointerSize)); | |
9529 __ decl(rcx); | |
9530 __ j(not_zero, &loop); | |
9531 | |
9532 // Return and remove the on-stack parameters. | |
9533 __ bind(&done); | |
9534 __ ret(3 * kPointerSize); | |
9535 | |
9536 // Do the runtime call to allocate the arguments object. | |
9537 __ bind(&runtime); | |
9538 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | |
9539 } | |
9540 | |
9541 | |
9542 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | |
9543 // The key is in rdx and the parameter count is in rax. | |
9544 | |
9545 // The displacement is used for skipping the frame pointer on the | |
9546 // stack. It is the offset of the last parameter (if any) relative | |
9547 // to the frame pointer. | |
9548 static const int kDisplacement = 1 * kPointerSize; | |
9549 | |
9550 // Check that the key is a smi. | |
9551 Label slow; | |
9552 __ JumpIfNotSmi(rdx, &slow); | |
9553 | |
9554 // Check if the calling frame is an arguments adaptor frame. | |
9555 Label adaptor; | |
9556 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | |
9557 __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset), | |
9558 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
9559 __ j(equal, &adaptor); | |
9560 | |
9561 // Check index against formal parameters count limit passed in | |
9562 // through register rax. Use unsigned comparison to get negative | |
9563 // check for free. | |
9564 __ cmpq(rdx, rax); | |
9565 __ j(above_equal, &slow); | |
9566 | |
9567 // Read the argument from the stack and return it. | |
9568 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); | |
9569 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0)); | |
9570 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); | |
9571 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); | |
9572 __ Ret(); | |
9573 | |
9574 // Arguments adaptor case: Check index against actual arguments | |
9575 // limit found in the arguments adaptor frame. Use unsigned | |
9576 // comparison to get negative check for free. | |
9577 __ bind(&adaptor); | |
9578 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
9579 __ cmpq(rdx, rcx); | |
9580 __ j(above_equal, &slow); | |
9581 | |
9582 // Read the argument from the stack and return it. | |
9583 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); | |
9584 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); | |
9585 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); | |
9586 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); | |
9587 __ Ret(); | |
9588 | |
9589 // Slow-case: Handle non-smi or out-of-bounds access to arguments | |
9590 // by calling the runtime system. | |
9591 __ bind(&slow); | |
9592 __ pop(rbx); // Return address. | |
9593 __ push(rdx); | |
9594 __ push(rbx); | |
9595 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | |
9596 } | 10430 } |
9597 | 10431 |
9598 | 10432 |
9599 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | 10433 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { |
9600 // Check that stack should contain next handler, frame pointer, state and | 10434 // Check that stack should contain next handler, frame pointer, state and |
9601 // return address in that order. | 10435 // return address in that order. |
9602 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize, | 10436 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize, |
9603 StackHandlerConstants::kStateOffset); | 10437 StackHandlerConstants::kStateOffset); |
9604 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize, | 10438 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize, |
9605 StackHandlerConstants::kPCOffset); | 10439 StackHandlerConstants::kPCOffset); |
(...skipping 12 matching lines...) Expand all Loading... |
9618 __ xor_(rsi, rsi); // tentatively set context pointer to NULL | 10452 __ xor_(rsi, rsi); // tentatively set context pointer to NULL |
9619 Label skip; | 10453 Label skip; |
9620 __ cmpq(rbp, Immediate(0)); | 10454 __ cmpq(rbp, Immediate(0)); |
9621 __ j(equal, &skip); | 10455 __ j(equal, &skip); |
9622 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); | 10456 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
9623 __ bind(&skip); | 10457 __ bind(&skip); |
9624 __ ret(0); | 10458 __ ret(0); |
9625 } | 10459 } |
9626 | 10460 |
9627 | 10461 |
| 10462 void ApiGetterEntryStub::Generate(MacroAssembler* masm) { |
| 10463 UNREACHABLE(); |
| 10464 } |
| 10465 |
| 10466 |
9628 void CEntryStub::GenerateCore(MacroAssembler* masm, | 10467 void CEntryStub::GenerateCore(MacroAssembler* masm, |
9629 Label* throw_normal_exception, | 10468 Label* throw_normal_exception, |
9630 Label* throw_termination_exception, | 10469 Label* throw_termination_exception, |
9631 Label* throw_out_of_memory_exception, | 10470 Label* throw_out_of_memory_exception, |
9632 bool do_gc, | 10471 bool do_gc, |
9633 bool always_allocate_scope, | 10472 bool always_allocate_scope, |
9634 int /* alignment_skew */) { | 10473 int /* alignment_skew */) { |
9635 // rax: result parameter for PerformGC, if any. | 10474 // rax: result parameter for PerformGC, if any. |
9636 // rbx: pointer to C function (C callee-saved). | 10475 // rbx: pointer to C function (C callee-saved). |
9637 // rbp: frame pointer (restored after C call). | 10476 // rbp: frame pointer (restored after C call). |
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9808 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize, | 10647 ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize, |
9809 StackHandlerConstants::kStateOffset); | 10648 StackHandlerConstants::kStateOffset); |
9810 __ pop(rdx); // State | 10649 __ pop(rdx); // State |
9811 | 10650 |
9812 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize, | 10651 ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize, |
9813 StackHandlerConstants::kPCOffset); | 10652 StackHandlerConstants::kPCOffset); |
9814 __ ret(0); | 10653 __ ret(0); |
9815 } | 10654 } |
9816 | 10655 |
9817 | 10656 |
9818 void CallFunctionStub::Generate(MacroAssembler* masm) { | |
9819 Label slow; | |
9820 | |
9821 // If the receiver might be a value (string, number or boolean) check for this | |
9822 // and box it if it is. | |
9823 if (ReceiverMightBeValue()) { | |
9824 // Get the receiver from the stack. | |
9825 // +1 ~ return address | |
9826 Label receiver_is_value, receiver_is_js_object; | |
9827 __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); | |
9828 | |
9829 // Check if receiver is a smi (which is a number value). | |
9830 __ JumpIfSmi(rax, &receiver_is_value); | |
9831 | |
9832 // Check if the receiver is a valid JS object. | |
9833 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi); | |
9834 __ j(above_equal, &receiver_is_js_object); | |
9835 | |
9836 // Call the runtime to box the value. | |
9837 __ bind(&receiver_is_value); | |
9838 __ EnterInternalFrame(); | |
9839 __ push(rax); | |
9840 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); | |
9841 __ LeaveInternalFrame(); | |
9842 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax); | |
9843 | |
9844 __ bind(&receiver_is_js_object); | |
9845 } | |
9846 | |
9847 // Get the function to call from the stack. | |
9848 // +2 ~ receiver, return address | |
9849 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize)); | |
9850 | |
9851 // Check that the function really is a JavaScript function. | |
9852 __ JumpIfSmi(rdi, &slow); | |
9853 // Goto slow case if we do not have a function. | |
9854 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); | |
9855 __ j(not_equal, &slow); | |
9856 | |
9857 // Fast-case: Just invoke the function. | |
9858 ParameterCount actual(argc_); | |
9859 __ InvokeFunction(rdi, actual, JUMP_FUNCTION); | |
9860 | |
9861 // Slow-case: Non-function called. | |
9862 __ bind(&slow); | |
9863 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | |
9864 // of the original receiver from the call site). | |
9865 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); | |
9866 __ Set(rax, argc_); | |
9867 __ Set(rbx, 0); | |
9868 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); | |
9869 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); | |
9870 __ Jump(adaptor, RelocInfo::CODE_TARGET); | |
9871 } | |
9872 | |
9873 | |
9874 void CEntryStub::Generate(MacroAssembler* masm) { | 10657 void CEntryStub::Generate(MacroAssembler* masm) { |
9875 // rax: number of arguments including receiver | 10658 // rax: number of arguments including receiver |
9876 // rbx: pointer to C function (C callee-saved) | 10659 // rbx: pointer to C function (C callee-saved) |
9877 // rbp: frame pointer of calling JS frame (restored after C call) | 10660 // rbp: frame pointer of calling JS frame (restored after C call) |
9878 // rsp: stack pointer (restored after C call) | 10661 // rsp: stack pointer (restored after C call) |
9879 // rsi: current context (restored) | 10662 // rsi: current context (restored) |
9880 | 10663 |
9881 // NOTE: Invocations of builtins may return failure objects | 10664 // NOTE: Invocations of builtins may return failure objects |
9882 // instead of a proper result. The builtin entry handles | 10665 // instead of a proper result. The builtin entry handles |
9883 // this by performing a garbage collection and retrying the | 10666 // this by performing a garbage collection and retrying the |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9932 GenerateThrowUncatchable(masm, OUT_OF_MEMORY); | 10715 GenerateThrowUncatchable(masm, OUT_OF_MEMORY); |
9933 | 10716 |
9934 __ bind(&throw_termination_exception); | 10717 __ bind(&throw_termination_exception); |
9935 GenerateThrowUncatchable(masm, TERMINATION); | 10718 GenerateThrowUncatchable(masm, TERMINATION); |
9936 | 10719 |
9937 __ bind(&throw_normal_exception); | 10720 __ bind(&throw_normal_exception); |
9938 GenerateThrowTOS(masm); | 10721 GenerateThrowTOS(masm); |
9939 } | 10722 } |
9940 | 10723 |
9941 | 10724 |
9942 void ApiGetterEntryStub::Generate(MacroAssembler* masm) { | |
9943 UNREACHABLE(); | |
9944 } | |
9945 | |
9946 | |
9947 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 10725 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
9948 Label invoke, exit; | 10726 Label invoke, exit; |
9949 #ifdef ENABLE_LOGGING_AND_PROFILING | 10727 #ifdef ENABLE_LOGGING_AND_PROFILING |
9950 Label not_outermost_js, not_outermost_js_2; | 10728 Label not_outermost_js, not_outermost_js_2; |
9951 #endif | 10729 #endif |
9952 | 10730 |
9953 // Setup frame. | 10731 // Setup frame. |
9954 __ push(rbp); | 10732 __ push(rbp); |
9955 __ movq(rbp, rsp); | 10733 __ movq(rbp, rsp); |
9956 | 10734 |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
10068 __ pop(r13); | 10846 __ pop(r13); |
10069 __ pop(r12); | 10847 __ pop(r12); |
10070 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers | 10848 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers |
10071 | 10849 |
10072 // Restore frame pointer and return. | 10850 // Restore frame pointer and return. |
10073 __ pop(rbp); | 10851 __ pop(rbp); |
10074 __ ret(0); | 10852 __ ret(0); |
10075 } | 10853 } |
10076 | 10854 |
10077 | 10855 |
10078 // ----------------------------------------------------------------------------- | 10856 void InstanceofStub::Generate(MacroAssembler* masm) { |
10079 // Implementation of stubs. | 10857 // Implements "value instanceof function" operator. |
| 10858 // Expected input state: |
| 10859 // rsp[0] : return address |
| 10860 // rsp[1] : function pointer |
| 10861 // rsp[2] : value |
| 10862 // Returns a bitwise zero to indicate that the value |
| 10863 // is and instance of the function and anything else to |
| 10864 // indicate that the value is not an instance. |
10080 | 10865 |
10081 // Stub classes have public member named masm, not masm_. | 10866 // Get the object - go slow case if it's a smi. |
| 10867 Label slow; |
| 10868 __ movq(rax, Operand(rsp, 2 * kPointerSize)); |
| 10869 __ JumpIfSmi(rax, &slow); |
10082 | 10870 |
10083 void StackCheckStub::Generate(MacroAssembler* masm) { | 10871 // Check that the left hand is a JS object. Leave its map in rax. |
10084 // Because builtins always remove the receiver from the stack, we | 10872 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); |
10085 // have to fake one to avoid underflowing the stack. The receiver | 10873 __ j(below, &slow); |
10086 // must be inserted below the return address on the stack so we | 10874 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE); |
10087 // temporarily store that in a register. | 10875 __ j(above, &slow); |
10088 __ pop(rax); | |
10089 __ Push(Smi::FromInt(0)); | |
10090 __ push(rax); | |
10091 | 10876 |
10092 // Do tail-call to runtime routine. | 10877 // Get the prototype of the function. |
10093 __ TailCallRuntime(Runtime::kStackGuard, 1, 1); | 10878 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); |
10094 } | 10879 // rdx is function, rax is map. |
10095 | 10880 |
| 10881 // Look up the function and the map in the instanceof cache. |
| 10882 Label miss; |
| 10883 __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); |
| 10884 __ j(not_equal, &miss); |
| 10885 __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); |
| 10886 __ j(not_equal, &miss); |
| 10887 __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); |
| 10888 __ ret(2 * kPointerSize); |
10096 | 10889 |
10097 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { | 10890 __ bind(&miss); |
10098 __ SmiToInteger32(kScratchRegister, rdx); | 10891 __ TryGetFunctionPrototype(rdx, rbx, &slow); |
10099 __ cvtlsi2sd(xmm0, kScratchRegister); | |
10100 __ SmiToInteger32(kScratchRegister, rax); | |
10101 __ cvtlsi2sd(xmm1, kScratchRegister); | |
10102 } | |
10103 | 10892 |
| 10893 // Check that the function prototype is a JS object. |
| 10894 __ JumpIfSmi(rbx, &slow); |
| 10895 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister); |
| 10896 __ j(below, &slow); |
| 10897 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); |
| 10898 __ j(above, &slow); |
10104 | 10899 |
10105 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) { | 10900 // Register mapping: |
10106 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done; | 10901 // rax is object map. |
10107 // Load operand in rdx into xmm0. | 10902 // rdx is function. |
10108 __ JumpIfSmi(rdx, &load_smi_rdx); | 10903 // rbx is function prototype. |
10109 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); | 10904 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); |
10110 // Load operand in rax into xmm1. | 10905 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); |
10111 __ JumpIfSmi(rax, &load_smi_rax); | |
10112 __ bind(&load_nonsmi_rax); | |
10113 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); | |
10114 __ jmp(&done); | |
10115 | 10906 |
10116 __ bind(&load_smi_rdx); | 10907 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); |
10117 __ SmiToInteger32(kScratchRegister, rdx); | |
10118 __ cvtlsi2sd(xmm0, kScratchRegister); | |
10119 __ JumpIfNotSmi(rax, &load_nonsmi_rax); | |
10120 | 10908 |
10121 __ bind(&load_smi_rax); | 10909 // Loop through the prototype chain looking for the function prototype. |
10122 __ SmiToInteger32(kScratchRegister, rax); | 10910 Label loop, is_instance, is_not_instance; |
10123 __ cvtlsi2sd(xmm1, kScratchRegister); | 10911 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex); |
| 10912 __ bind(&loop); |
| 10913 __ cmpq(rcx, rbx); |
| 10914 __ j(equal, &is_instance); |
| 10915 __ cmpq(rcx, kScratchRegister); |
| 10916 // The code at is_not_instance assumes that kScratchRegister contains a |
| 10917 // non-zero GCable value (the null object in this case). |
| 10918 __ j(equal, &is_not_instance); |
| 10919 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); |
| 10920 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset)); |
| 10921 __ jmp(&loop); |
10124 | 10922 |
10125 __ bind(&done); | 10923 __ bind(&is_instance); |
10126 } | 10924 __ xorl(rax, rax); |
| 10925 // Store bitwise zero in the cache. This is a Smi in GC terms. |
| 10926 ASSERT_EQ(0, kSmiTag); |
| 10927 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); |
| 10928 __ ret(2 * kPointerSize); |
10127 | 10929 |
| 10930 __ bind(&is_not_instance); |
| 10931 // We have to store a non-zero value in the cache. |
| 10932 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); |
| 10933 __ ret(2 * kPointerSize); |
10128 | 10934 |
10129 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, | 10935 // Slow-case: Go through the JavaScript implementation. |
10130 Label* not_numbers) { | 10936 __ bind(&slow); |
10131 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; | 10937 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
10132 // Load operand in rdx into xmm0, or branch to not_numbers. | |
10133 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); | |
10134 __ JumpIfSmi(rdx, &load_smi_rdx); | |
10135 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); | |
10136 __ j(not_equal, not_numbers); // Argument in rdx is not a number. | |
10137 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); | |
10138 // Load operand in rax into xmm1, or branch to not_numbers. | |
10139 __ JumpIfSmi(rax, &load_smi_rax); | |
10140 | |
10141 __ bind(&load_nonsmi_rax); | |
10142 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); | |
10143 __ j(not_equal, not_numbers); | |
10144 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); | |
10145 __ jmp(&done); | |
10146 | |
10147 __ bind(&load_smi_rdx); | |
10148 __ SmiToInteger32(kScratchRegister, rdx); | |
10149 __ cvtlsi2sd(xmm0, kScratchRegister); | |
10150 __ JumpIfNotSmi(rax, &load_nonsmi_rax); | |
10151 | |
10152 __ bind(&load_smi_rax); | |
10153 __ SmiToInteger32(kScratchRegister, rax); | |
10154 __ cvtlsi2sd(xmm1, kScratchRegister); | |
10155 __ bind(&done); | |
10156 } | |
10157 | |
10158 | |
10159 // Input: rdx, rax are the left and right objects of a bit op. | |
10160 // Output: rax, rcx are left and right integers for a bit op. | |
10161 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, | |
10162 Label* conversion_failure, | |
10163 Register heap_number_map) { | |
10164 // Check float operands. | |
10165 Label arg1_is_object, check_undefined_arg1; | |
10166 Label arg2_is_object, check_undefined_arg2; | |
10167 Label load_arg2, done; | |
10168 | |
10169 __ JumpIfNotSmi(rdx, &arg1_is_object); | |
10170 __ SmiToInteger32(rdx, rdx); | |
10171 __ jmp(&load_arg2); | |
10172 | |
10173 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | |
10174 __ bind(&check_undefined_arg1); | |
10175 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); | |
10176 __ j(not_equal, conversion_failure); | |
10177 __ movl(rdx, Immediate(0)); | |
10178 __ jmp(&load_arg2); | |
10179 | |
10180 __ bind(&arg1_is_object); | |
10181 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); | |
10182 __ j(not_equal, &check_undefined_arg1); | |
10183 // Get the untagged integer version of the edx heap number in rcx. | |
10184 IntegerConvert(masm, rdx, rdx); | |
10185 | |
10186 // Here rdx has the untagged integer, rax has a Smi or a heap number. | |
10187 __ bind(&load_arg2); | |
10188 // Test if arg2 is a Smi. | |
10189 __ JumpIfNotSmi(rax, &arg2_is_object); | |
10190 __ SmiToInteger32(rax, rax); | |
10191 __ movl(rcx, rax); | |
10192 __ jmp(&done); | |
10193 | |
10194 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | |
10195 __ bind(&check_undefined_arg2); | |
10196 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); | |
10197 __ j(not_equal, conversion_failure); | |
10198 __ movl(rcx, Immediate(0)); | |
10199 __ jmp(&done); | |
10200 | |
10201 __ bind(&arg2_is_object); | |
10202 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); | |
10203 __ j(not_equal, &check_undefined_arg2); | |
10204 // Get the untagged integer version of the eax heap number in ecx. | |
10205 IntegerConvert(masm, rcx, rax); | |
10206 __ bind(&done); | |
10207 __ movl(rax, rdx); | |
10208 } | |
10209 | |
10210 | |
10211 // Input: rdx, rax are the left and right objects of a bit op. | |
10212 // Output: rax, rcx are left and right integers for a bit op. | |
10213 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) { | |
10214 // Check float operands. | |
10215 Label done; | |
10216 Label rax_is_smi; | |
10217 Label rax_is_object; | |
10218 Label rdx_is_object; | |
10219 | |
10220 __ JumpIfNotSmi(rdx, &rdx_is_object); | |
10221 __ SmiToInteger32(rdx, rdx); | |
10222 __ JumpIfSmi(rax, &rax_is_smi); | |
10223 | |
10224 __ bind(&rax_is_object); | |
10225 IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx. | |
10226 __ jmp(&done); | |
10227 | |
10228 __ bind(&rdx_is_object); | |
10229 IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx. | |
10230 __ JumpIfNotSmi(rax, &rax_is_object); | |
10231 __ bind(&rax_is_smi); | |
10232 __ SmiToInteger32(rcx, rax); | |
10233 | |
10234 __ bind(&done); | |
10235 __ movl(rax, rdx); | |
10236 } | |
10237 | |
10238 | |
10239 const char* GenericBinaryOpStub::GetName() { | |
10240 if (name_ != NULL) return name_; | |
10241 const int len = 100; | |
10242 name_ = Bootstrapper::AllocateAutoDeletedArray(len); | |
10243 if (name_ == NULL) return "OOM"; | |
10244 const char* op_name = Token::Name(op_); | |
10245 const char* overwrite_name; | |
10246 switch (mode_) { | |
10247 case NO_OVERWRITE: overwrite_name = "Alloc"; break; | |
10248 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; | |
10249 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; | |
10250 default: overwrite_name = "UnknownOverwrite"; break; | |
10251 } | |
10252 | |
10253 OS::SNPrintF(Vector<char>(name_, len), | |
10254 "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", | |
10255 op_name, | |
10256 overwrite_name, | |
10257 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", | |
10258 args_in_registers_ ? "RegArgs" : "StackArgs", | |
10259 args_reversed_ ? "_R" : "", | |
10260 static_operands_type_.ToString(), | |
10261 BinaryOpIC::GetName(runtime_operands_type_)); | |
10262 return name_; | |
10263 } | |
10264 | |
10265 | |
10266 void GenericBinaryOpStub::GenerateCall( | |
10267 MacroAssembler* masm, | |
10268 Register left, | |
10269 Register right) { | |
10270 if (!ArgsInRegistersSupported()) { | |
10271 // Pass arguments on the stack. | |
10272 __ push(left); | |
10273 __ push(right); | |
10274 } else { | |
10275 // The calling convention with registers is left in rdx and right in rax. | |
10276 Register left_arg = rdx; | |
10277 Register right_arg = rax; | |
10278 if (!(left.is(left_arg) && right.is(right_arg))) { | |
10279 if (left.is(right_arg) && right.is(left_arg)) { | |
10280 if (IsOperationCommutative()) { | |
10281 SetArgsReversed(); | |
10282 } else { | |
10283 __ xchg(left, right); | |
10284 } | |
10285 } else if (left.is(left_arg)) { | |
10286 __ movq(right_arg, right); | |
10287 } else if (right.is(right_arg)) { | |
10288 __ movq(left_arg, left); | |
10289 } else if (left.is(right_arg)) { | |
10290 if (IsOperationCommutative()) { | |
10291 __ movq(left_arg, right); | |
10292 SetArgsReversed(); | |
10293 } else { | |
10294 // Order of moves important to avoid destroying left argument. | |
10295 __ movq(left_arg, left); | |
10296 __ movq(right_arg, right); | |
10297 } | |
10298 } else if (right.is(left_arg)) { | |
10299 if (IsOperationCommutative()) { | |
10300 __ movq(right_arg, left); | |
10301 SetArgsReversed(); | |
10302 } else { | |
10303 // Order of moves important to avoid destroying right argument. | |
10304 __ movq(right_arg, right); | |
10305 __ movq(left_arg, left); | |
10306 } | |
10307 } else { | |
10308 // Order of moves is not important. | |
10309 __ movq(left_arg, left); | |
10310 __ movq(right_arg, right); | |
10311 } | |
10312 } | |
10313 | |
10314 // Update flags to indicate that arguments are in registers. | |
10315 SetArgsInRegisters(); | |
10316 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); | |
10317 } | |
10318 | |
10319 // Call the stub. | |
10320 __ CallStub(this); | |
10321 } | |
10322 | |
10323 | |
10324 void GenericBinaryOpStub::GenerateCall( | |
10325 MacroAssembler* masm, | |
10326 Register left, | |
10327 Smi* right) { | |
10328 if (!ArgsInRegistersSupported()) { | |
10329 // Pass arguments on the stack. | |
10330 __ push(left); | |
10331 __ Push(right); | |
10332 } else { | |
10333 // The calling convention with registers is left in rdx and right in rax. | |
10334 Register left_arg = rdx; | |
10335 Register right_arg = rax; | |
10336 if (left.is(left_arg)) { | |
10337 __ Move(right_arg, right); | |
10338 } else if (left.is(right_arg) && IsOperationCommutative()) { | |
10339 __ Move(left_arg, right); | |
10340 SetArgsReversed(); | |
10341 } else { | |
10342 // For non-commutative operations, left and right_arg might be | |
10343 // the same register. Therefore, the order of the moves is | |
10344 // important here in order to not overwrite left before moving | |
10345 // it to left_arg. | |
10346 __ movq(left_arg, left); | |
10347 __ Move(right_arg, right); | |
10348 } | |
10349 | |
10350 // Update flags to indicate that arguments are in registers. | |
10351 SetArgsInRegisters(); | |
10352 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); | |
10353 } | |
10354 | |
10355 // Call the stub. | |
10356 __ CallStub(this); | |
10357 } | |
10358 | |
10359 | |
10360 void GenericBinaryOpStub::GenerateCall( | |
10361 MacroAssembler* masm, | |
10362 Smi* left, | |
10363 Register right) { | |
10364 if (!ArgsInRegistersSupported()) { | |
10365 // Pass arguments on the stack. | |
10366 __ Push(left); | |
10367 __ push(right); | |
10368 } else { | |
10369 // The calling convention with registers is left in rdx and right in rax. | |
10370 Register left_arg = rdx; | |
10371 Register right_arg = rax; | |
10372 if (right.is(right_arg)) { | |
10373 __ Move(left_arg, left); | |
10374 } else if (right.is(left_arg) && IsOperationCommutative()) { | |
10375 __ Move(right_arg, left); | |
10376 SetArgsReversed(); | |
10377 } else { | |
10378 // For non-commutative operations, right and left_arg might be | |
10379 // the same register. Therefore, the order of the moves is | |
10380 // important here in order to not overwrite right before moving | |
10381 // it to right_arg. | |
10382 __ movq(right_arg, right); | |
10383 __ Move(left_arg, left); | |
10384 } | |
10385 // Update flags to indicate that arguments are in registers. | |
10386 SetArgsInRegisters(); | |
10387 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); | |
10388 } | |
10389 | |
10390 // Call the stub. | |
10391 __ CallStub(this); | |
10392 } | |
10393 | |
10394 | |
10395 Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, | |
10396 VirtualFrame* frame, | |
10397 Result* left, | |
10398 Result* right) { | |
10399 if (ArgsInRegistersSupported()) { | |
10400 SetArgsInRegisters(); | |
10401 return frame->CallStub(this, left, right); | |
10402 } else { | |
10403 frame->Push(left); | |
10404 frame->Push(right); | |
10405 return frame->CallStub(this, 2); | |
10406 } | |
10407 } | |
10408 | |
10409 | |
10410 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | |
10411 // 1. Move arguments into rdx, rax except for DIV and MOD, which need the | |
10412 // dividend in rax and rdx free for the division. Use rax, rbx for those. | |
10413 Comment load_comment(masm, "-- Load arguments"); | |
10414 Register left = rdx; | |
10415 Register right = rax; | |
10416 if (op_ == Token::DIV || op_ == Token::MOD) { | |
10417 left = rax; | |
10418 right = rbx; | |
10419 if (HasArgsInRegisters()) { | |
10420 __ movq(rbx, rax); | |
10421 __ movq(rax, rdx); | |
10422 } | |
10423 } | |
10424 if (!HasArgsInRegisters()) { | |
10425 __ movq(right, Operand(rsp, 1 * kPointerSize)); | |
10426 __ movq(left, Operand(rsp, 2 * kPointerSize)); | |
10427 } | |
10428 | |
10429 Label not_smis; | |
10430 // 2. Smi check both operands. | |
10431 if (static_operands_type_.IsSmi()) { | |
10432 // Skip smi check if we know that both arguments are smis. | |
10433 if (FLAG_debug_code) { | |
10434 __ AbortIfNotSmi(left); | |
10435 __ AbortIfNotSmi(right); | |
10436 } | |
10437 if (op_ == Token::BIT_OR) { | |
10438 // Handle OR here, since we do extra smi-checking in the or code below. | |
10439 __ SmiOr(right, right, left); | |
10440 GenerateReturn(masm); | |
10441 return; | |
10442 } | |
10443 } else { | |
10444 if (op_ != Token::BIT_OR) { | |
10445 // Skip the check for OR as it is better combined with the | |
10446 // actual operation. | |
10447 Comment smi_check_comment(masm, "-- Smi check arguments"); | |
10448 __ JumpIfNotBothSmi(left, right, ¬_smis); | |
10449 } | |
10450 } | |
10451 | |
10452 // 3. Operands are both smis (except for OR), perform the operation leaving | |
10453 // the result in rax and check the result if necessary. | |
10454 Comment perform_smi(masm, "-- Perform smi operation"); | |
10455 Label use_fp_on_smis; | |
10456 switch (op_) { | |
10457 case Token::ADD: { | |
10458 ASSERT(right.is(rax)); | |
10459 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. | |
10460 break; | |
10461 } | |
10462 | |
10463 case Token::SUB: { | |
10464 __ SmiSub(left, left, right, &use_fp_on_smis); | |
10465 __ movq(rax, left); | |
10466 break; | |
10467 } | |
10468 | |
10469 case Token::MUL: | |
10470 ASSERT(right.is(rax)); | |
10471 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. | |
10472 break; | |
10473 | |
10474 case Token::DIV: | |
10475 ASSERT(left.is(rax)); | |
10476 __ SmiDiv(left, left, right, &use_fp_on_smis); | |
10477 break; | |
10478 | |
10479 case Token::MOD: | |
10480 ASSERT(left.is(rax)); | |
10481 __ SmiMod(left, left, right, slow); | |
10482 break; | |
10483 | |
10484 case Token::BIT_OR: | |
10485 ASSERT(right.is(rax)); | |
10486 __ movq(rcx, right); // Save the right operand. | |
10487 __ SmiOr(right, right, left); // BIT_OR is commutative. | |
10488 __ testb(right, Immediate(kSmiTagMask)); | |
10489 __ j(not_zero, ¬_smis); | |
10490 break; | |
10491 | |
10492 case Token::BIT_AND: | |
10493 ASSERT(right.is(rax)); | |
10494 __ SmiAnd(right, right, left); // BIT_AND is commutative. | |
10495 break; | |
10496 | |
10497 case Token::BIT_XOR: | |
10498 ASSERT(right.is(rax)); | |
10499 __ SmiXor(right, right, left); // BIT_XOR is commutative. | |
10500 break; | |
10501 | |
10502 case Token::SHL: | |
10503 case Token::SHR: | |
10504 case Token::SAR: | |
10505 switch (op_) { | |
10506 case Token::SAR: | |
10507 __ SmiShiftArithmeticRight(left, left, right); | |
10508 break; | |
10509 case Token::SHR: | |
10510 __ SmiShiftLogicalRight(left, left, right, slow); | |
10511 break; | |
10512 case Token::SHL: | |
10513 __ SmiShiftLeft(left, left, right); | |
10514 break; | |
10515 default: | |
10516 UNREACHABLE(); | |
10517 } | |
10518 __ movq(rax, left); | |
10519 break; | |
10520 | |
10521 default: | |
10522 UNREACHABLE(); | |
10523 break; | |
10524 } | |
10525 | |
10526 // 4. Emit return of result in rax. | |
10527 GenerateReturn(masm); | |
10528 | |
10529 // 5. For some operations emit inline code to perform floating point | |
10530 // operations on known smis (e.g., if the result of the operation | |
10531 // overflowed the smi range). | |
10532 switch (op_) { | |
10533 case Token::ADD: | |
10534 case Token::SUB: | |
10535 case Token::MUL: | |
10536 case Token::DIV: { | |
10537 ASSERT(use_fp_on_smis.is_linked()); | |
10538 __ bind(&use_fp_on_smis); | |
10539 if (op_ == Token::DIV) { | |
10540 __ movq(rdx, rax); | |
10541 __ movq(rax, rbx); | |
10542 } | |
10543 // left is rdx, right is rax. | |
10544 __ AllocateHeapNumber(rbx, rcx, slow); | |
10545 FloatingPointHelper::LoadSSE2SmiOperands(masm); | |
10546 switch (op_) { | |
10547 case Token::ADD: __ addsd(xmm0, xmm1); break; | |
10548 case Token::SUB: __ subsd(xmm0, xmm1); break; | |
10549 case Token::MUL: __ mulsd(xmm0, xmm1); break; | |
10550 case Token::DIV: __ divsd(xmm0, xmm1); break; | |
10551 default: UNREACHABLE(); | |
10552 } | |
10553 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); | |
10554 __ movq(rax, rbx); | |
10555 GenerateReturn(masm); | |
10556 } | |
10557 default: | |
10558 break; | |
10559 } | |
10560 | |
10561 // 6. Non-smi operands, fall out to the non-smi code with the operands in | |
10562 // rdx and rax. | |
10563 Comment done_comment(masm, "-- Enter non-smi code"); | |
10564 __ bind(¬_smis); | |
10565 | |
10566 switch (op_) { | |
10567 case Token::DIV: | |
10568 case Token::MOD: | |
10569 // Operands are in rax, rbx at this point. | |
10570 __ movq(rdx, rax); | |
10571 __ movq(rax, rbx); | |
10572 break; | |
10573 | |
10574 case Token::BIT_OR: | |
10575 // Right operand is saved in rcx and rax was destroyed by the smi | |
10576 // operation. | |
10577 __ movq(rax, rcx); | |
10578 break; | |
10579 | |
10580 default: | |
10581 break; | |
10582 } | |
10583 } | |
10584 | |
10585 | |
10586 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | |
10587 Label call_runtime; | |
10588 | |
10589 if (ShouldGenerateSmiCode()) { | |
10590 GenerateSmiCode(masm, &call_runtime); | |
10591 } else if (op_ != Token::MOD) { | |
10592 if (!HasArgsInRegisters()) { | |
10593 GenerateLoadArguments(masm); | |
10594 } | |
10595 } | |
10596 // Floating point case. | |
10597 if (ShouldGenerateFPCode()) { | |
10598 switch (op_) { | |
10599 case Token::ADD: | |
10600 case Token::SUB: | |
10601 case Token::MUL: | |
10602 case Token::DIV: { | |
10603 if (runtime_operands_type_ == BinaryOpIC::DEFAULT && | |
10604 HasSmiCodeInStub()) { | |
10605 // Execution reaches this point when the first non-smi argument occurs | |
10606 // (and only if smi code is generated). This is the right moment to | |
10607 // patch to HEAP_NUMBERS state. The transition is attempted only for | |
10608 // the four basic operations. The stub stays in the DEFAULT state | |
10609 // forever for all other operations (also if smi code is skipped). | |
10610 GenerateTypeTransition(masm); | |
10611 break; | |
10612 } | |
10613 | |
10614 Label not_floats; | |
10615 // rax: y | |
10616 // rdx: x | |
10617 if (static_operands_type_.IsNumber()) { | |
10618 if (FLAG_debug_code) { | |
10619 // Assert at runtime that inputs are only numbers. | |
10620 __ AbortIfNotNumber(rdx); | |
10621 __ AbortIfNotNumber(rax); | |
10622 } | |
10623 FloatingPointHelper::LoadSSE2NumberOperands(masm); | |
10624 } else { | |
10625 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); | |
10626 } | |
10627 | |
10628 switch (op_) { | |
10629 case Token::ADD: __ addsd(xmm0, xmm1); break; | |
10630 case Token::SUB: __ subsd(xmm0, xmm1); break; | |
10631 case Token::MUL: __ mulsd(xmm0, xmm1); break; | |
10632 case Token::DIV: __ divsd(xmm0, xmm1); break; | |
10633 default: UNREACHABLE(); | |
10634 } | |
10635 // Allocate a heap number, if needed. | |
10636 Label skip_allocation; | |
10637 OverwriteMode mode = mode_; | |
10638 if (HasArgsReversed()) { | |
10639 if (mode == OVERWRITE_RIGHT) { | |
10640 mode = OVERWRITE_LEFT; | |
10641 } else if (mode == OVERWRITE_LEFT) { | |
10642 mode = OVERWRITE_RIGHT; | |
10643 } | |
10644 } | |
10645 switch (mode) { | |
10646 case OVERWRITE_LEFT: | |
10647 __ JumpIfNotSmi(rdx, &skip_allocation); | |
10648 __ AllocateHeapNumber(rbx, rcx, &call_runtime); | |
10649 __ movq(rdx, rbx); | |
10650 __ bind(&skip_allocation); | |
10651 __ movq(rax, rdx); | |
10652 break; | |
10653 case OVERWRITE_RIGHT: | |
10654 // If the argument in rax is already an object, we skip the | |
10655 // allocation of a heap number. | |
10656 __ JumpIfNotSmi(rax, &skip_allocation); | |
10657 // Fall through! | |
10658 case NO_OVERWRITE: | |
10659 // Allocate a heap number for the result. Keep rax and rdx intact | |
10660 // for the possible runtime call. | |
10661 __ AllocateHeapNumber(rbx, rcx, &call_runtime); | |
10662 __ movq(rax, rbx); | |
10663 __ bind(&skip_allocation); | |
10664 break; | |
10665 default: UNREACHABLE(); | |
10666 } | |
10667 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | |
10668 GenerateReturn(masm); | |
10669 __ bind(¬_floats); | |
10670 if (runtime_operands_type_ == BinaryOpIC::DEFAULT && | |
10671 !HasSmiCodeInStub()) { | |
10672 // Execution reaches this point when the first non-number argument | |
10673 // occurs (and only if smi code is skipped from the stub, otherwise | |
10674 // the patching has already been done earlier in this case branch). | |
10675 // A perfect moment to try patching to STRINGS for ADD operation. | |
10676 if (op_ == Token::ADD) { | |
10677 GenerateTypeTransition(masm); | |
10678 } | |
10679 } | |
10680 break; | |
10681 } | |
10682 case Token::MOD: { | |
10683 // For MOD we go directly to runtime in the non-smi case. | |
10684 break; | |
10685 } | |
10686 case Token::BIT_OR: | |
10687 case Token::BIT_AND: | |
10688 case Token::BIT_XOR: | |
10689 case Token::SAR: | |
10690 case Token::SHL: | |
10691 case Token::SHR: { | |
10692 Label skip_allocation, non_smi_shr_result; | |
10693 Register heap_number_map = r9; | |
10694 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
10695 if (static_operands_type_.IsNumber()) { | |
10696 if (FLAG_debug_code) { | |
10697 // Assert at runtime that inputs are only numbers. | |
10698 __ AbortIfNotNumber(rdx); | |
10699 __ AbortIfNotNumber(rax); | |
10700 } | |
10701 FloatingPointHelper::LoadNumbersAsIntegers(masm); | |
10702 } else { | |
10703 FloatingPointHelper::LoadAsIntegers(masm, | |
10704 &call_runtime, | |
10705 heap_number_map); | |
10706 } | |
10707 switch (op_) { | |
10708 case Token::BIT_OR: __ orl(rax, rcx); break; | |
10709 case Token::BIT_AND: __ andl(rax, rcx); break; | |
10710 case Token::BIT_XOR: __ xorl(rax, rcx); break; | |
10711 case Token::SAR: __ sarl_cl(rax); break; | |
10712 case Token::SHL: __ shll_cl(rax); break; | |
10713 case Token::SHR: { | |
10714 __ shrl_cl(rax); | |
10715 // Check if result is negative. This can only happen for a shift | |
10716 // by zero. | |
10717 __ testl(rax, rax); | |
10718 __ j(negative, &non_smi_shr_result); | |
10719 break; | |
10720 } | |
10721 default: UNREACHABLE(); | |
10722 } | |
10723 | |
10724 STATIC_ASSERT(kSmiValueSize == 32); | |
10725 // Tag smi result and return. | |
10726 __ Integer32ToSmi(rax, rax); | |
10727 GenerateReturn(masm); | |
10728 | |
10729 // All bit-ops except SHR return a signed int32 that can be | |
10730 // returned immediately as a smi. | |
10731 // We might need to allocate a HeapNumber if we shift a negative | |
10732 // number right by zero (i.e., convert to UInt32). | |
10733 if (op_ == Token::SHR) { | |
10734 ASSERT(non_smi_shr_result.is_linked()); | |
10735 __ bind(&non_smi_shr_result); | |
10736 // Allocate a heap number if needed. | |
10737 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). | |
10738 switch (mode_) { | |
10739 case OVERWRITE_LEFT: | |
10740 case OVERWRITE_RIGHT: | |
10741 // If the operand was an object, we skip the | |
10742 // allocation of a heap number. | |
10743 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? | |
10744 1 * kPointerSize : 2 * kPointerSize)); | |
10745 __ JumpIfNotSmi(rax, &skip_allocation); | |
10746 // Fall through! | |
10747 case NO_OVERWRITE: | |
10748 // Allocate heap number in new space. | |
10749 // Not using AllocateHeapNumber macro in order to reuse | |
10750 // already loaded heap_number_map. | |
10751 __ AllocateInNewSpace(HeapNumber::kSize, | |
10752 rax, | |
10753 rcx, | |
10754 no_reg, | |
10755 &call_runtime, | |
10756 TAG_OBJECT); | |
10757 // Set the map. | |
10758 if (FLAG_debug_code) { | |
10759 __ AbortIfNotRootValue(heap_number_map, | |
10760 Heap::kHeapNumberMapRootIndex, | |
10761 "HeapNumberMap register clobbered."); | |
10762 } | |
10763 __ movq(FieldOperand(rax, HeapObject::kMapOffset), | |
10764 heap_number_map); | |
10765 __ bind(&skip_allocation); | |
10766 break; | |
10767 default: UNREACHABLE(); | |
10768 } | |
10769 // Store the result in the HeapNumber and return. | |
10770 __ cvtqsi2sd(xmm0, rbx); | |
10771 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | |
10772 GenerateReturn(masm); | |
10773 } | |
10774 | |
10775 break; | |
10776 } | |
10777 default: UNREACHABLE(); break; | |
10778 } | |
10779 } | |
10780 | |
10781 // If all else fails, use the runtime system to get the correct | |
10782 // result. If arguments was passed in registers now place them on the | |
10783 // stack in the correct order below the return address. | |
10784 __ bind(&call_runtime); | |
10785 | |
10786 if (HasArgsInRegisters()) { | |
10787 GenerateRegisterArgsPush(masm); | |
10788 } | |
10789 | |
10790 switch (op_) { | |
10791 case Token::ADD: { | |
10792 // Registers containing left and right operands respectively. | |
10793 Register lhs, rhs; | |
10794 | |
10795 if (HasArgsReversed()) { | |
10796 lhs = rax; | |
10797 rhs = rdx; | |
10798 } else { | |
10799 lhs = rdx; | |
10800 rhs = rax; | |
10801 } | |
10802 | |
10803 // Test for string arguments before calling runtime. | |
10804 Label not_strings, both_strings, not_string1, string1, string1_smi2; | |
10805 | |
10806 // If this stub has already generated FP-specific code then the arguments | |
10807 // are already in rdx and rax. | |
10808 if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { | |
10809 GenerateLoadArguments(masm); | |
10810 } | |
10811 | |
10812 Condition is_smi; | |
10813 is_smi = masm->CheckSmi(lhs); | |
10814 __ j(is_smi, ¬_string1); | |
10815 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8); | |
10816 __ j(above_equal, ¬_string1); | |
10817 | |
10818 // First argument is a a string, test second. | |
10819 is_smi = masm->CheckSmi(rhs); | |
10820 __ j(is_smi, &string1_smi2); | |
10821 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); | |
10822 __ j(above_equal, &string1); | |
10823 | |
10824 // First and second argument are strings. | |
10825 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); | |
10826 __ TailCallStub(&string_add_stub); | |
10827 | |
10828 __ bind(&string1_smi2); | |
10829 // First argument is a string, second is a smi. Try to lookup the number | |
10830 // string for the smi in the number string cache. | |
10831 NumberToStringStub::GenerateLookupNumberStringCache( | |
10832 masm, rhs, rbx, rcx, r8, true, &string1); | |
10833 | |
10834 // Replace second argument on stack and tailcall string add stub to make | |
10835 // the result. | |
10836 __ movq(Operand(rsp, 1 * kPointerSize), rbx); | |
10837 __ TailCallStub(&string_add_stub); | |
10838 | |
10839 // Only first argument is a string. | |
10840 __ bind(&string1); | |
10841 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); | |
10842 | |
10843 // First argument was not a string, test second. | |
10844 __ bind(¬_string1); | |
10845 is_smi = masm->CheckSmi(rhs); | |
10846 __ j(is_smi, ¬_strings); | |
10847 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs); | |
10848 __ j(above_equal, ¬_strings); | |
10849 | |
10850 // Only second argument is a string. | |
10851 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); | |
10852 | |
10853 __ bind(¬_strings); | |
10854 // Neither argument is a string. | |
10855 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); | |
10856 break; | |
10857 } | |
10858 case Token::SUB: | |
10859 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); | |
10860 break; | |
10861 case Token::MUL: | |
10862 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); | |
10863 break; | |
10864 case Token::DIV: | |
10865 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); | |
10866 break; | |
10867 case Token::MOD: | |
10868 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); | |
10869 break; | |
10870 case Token::BIT_OR: | |
10871 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); | |
10872 break; | |
10873 case Token::BIT_AND: | |
10874 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); | |
10875 break; | |
10876 case Token::BIT_XOR: | |
10877 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); | |
10878 break; | |
10879 case Token::SAR: | |
10880 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); | |
10881 break; | |
10882 case Token::SHL: | |
10883 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); | |
10884 break; | |
10885 case Token::SHR: | |
10886 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); | |
10887 break; | |
10888 default: | |
10889 UNREACHABLE(); | |
10890 } | |
10891 } | |
10892 | |
10893 | |
10894 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { | |
10895 ASSERT(!HasArgsInRegisters()); | |
10896 __ movq(rax, Operand(rsp, 1 * kPointerSize)); | |
10897 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | |
10898 } | |
10899 | |
10900 | |
10901 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { | |
10902 // If arguments are not passed in registers remove them from the stack before | |
10903 // returning. | |
10904 if (!HasArgsInRegisters()) { | |
10905 __ ret(2 * kPointerSize); // Remove both operands | |
10906 } else { | |
10907 __ ret(0); | |
10908 } | |
10909 } | |
10910 | |
10911 | |
10912 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | |
10913 ASSERT(HasArgsInRegisters()); | |
10914 __ pop(rcx); | |
10915 if (HasArgsReversed()) { | |
10916 __ push(rax); | |
10917 __ push(rdx); | |
10918 } else { | |
10919 __ push(rdx); | |
10920 __ push(rax); | |
10921 } | |
10922 __ push(rcx); | |
10923 } | |
10924 | |
10925 | |
10926 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | |
10927 Label get_result; | |
10928 | |
10929 // Ensure the operands are on the stack. | |
10930 if (HasArgsInRegisters()) { | |
10931 GenerateRegisterArgsPush(masm); | |
10932 } | |
10933 | |
10934 // Left and right arguments are already on stack. | |
10935 __ pop(rcx); // Save the return address. | |
10936 | |
10937 // Push this stub's key. | |
10938 __ Push(Smi::FromInt(MinorKey())); | |
10939 | |
10940 // Although the operation and the type info are encoded into the key, | |
10941 // the encoding is opaque, so push them too. | |
10942 __ Push(Smi::FromInt(op_)); | |
10943 | |
10944 __ Push(Smi::FromInt(runtime_operands_type_)); | |
10945 | |
10946 __ push(rcx); // The return address. | |
10947 | |
10948 // Perform patching to an appropriate fast case and return the result. | |
10949 __ TailCallExternalReference( | |
10950 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), | |
10951 5, | |
10952 1); | |
10953 } | |
10954 | |
10955 | |
10956 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { | |
10957 GenericBinaryOpStub stub(key, type_info); | |
10958 return stub.GetCode(); | |
10959 } | 10938 } |
10960 | 10939 |
10961 | 10940 |
10962 int CompareStub::MinorKey() { | 10941 int CompareStub::MinorKey() { |
10963 // Encode the three parameters in a unique 16 bit value. To avoid duplicate | 10942 // Encode the three parameters in a unique 16 bit value. To avoid duplicate |
10964 // stubs the never NaN NaN condition is only taken into account if the | 10943 // stubs the never NaN NaN condition is only taken into account if the |
10965 // condition is equals. | 10944 // condition is equals. |
10966 ASSERT(static_cast<unsigned>(cc_) < (1 << 13)); | 10945 ASSERT(static_cast<unsigned>(cc_) < (1 << 13)); |
10967 return ConditionField::encode(static_cast<unsigned>(cc_)) | 10946 return ConditionField::encode(static_cast<unsigned>(cc_)) |
10968 | StrictField::encode(strict_) | 10947 | StrictField::encode(strict_) |
(...skipping 1088 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
12057 masm.GetCode(&desc); | 12036 masm.GetCode(&desc); |
12058 // Call the function from C++. | 12037 // Call the function from C++. |
12059 return FUNCTION_CAST<ModuloFunction>(buffer); | 12038 return FUNCTION_CAST<ModuloFunction>(buffer); |
12060 } | 12039 } |
12061 | 12040 |
12062 #endif | 12041 #endif |
12063 | 12042 |
12064 | 12043 |
12065 #undef __ | 12044 #undef __ |
12066 | 12045 |
| 12046 void RecordWriteStub::Generate(MacroAssembler* masm) { |
| 12047 masm->RecordWriteHelper(object_, addr_, scratch_); |
| 12048 masm->ret(0); |
| 12049 } |
| 12050 |
12067 } } // namespace v8::internal | 12051 } } // namespace v8::internal |
12068 | 12052 |
12069 #endif // V8_TARGET_ARCH_X64 | 12053 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |