Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 6685088: Merge isolates to bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/codegen-ia32.h ('k') | src/ia32/cpu-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
147 masm_(masm), 147 masm_(masm),
148 info_(NULL), 148 info_(NULL),
149 frame_(NULL), 149 frame_(NULL),
150 allocator_(NULL), 150 allocator_(NULL),
151 state_(NULL), 151 state_(NULL),
152 loop_nesting_(0), 152 loop_nesting_(0),
153 in_safe_int32_mode_(false), 153 in_safe_int32_mode_(false),
154 safe_int32_mode_enabled_(true), 154 safe_int32_mode_enabled_(true),
155 function_return_is_shadowed_(false), 155 function_return_is_shadowed_(false),
156 in_spilled_code_(false), 156 in_spilled_code_(false),
157 jit_cookie_((FLAG_mask_constants_with_cookie) ? V8::RandomPrivate() : 0) { 157 jit_cookie_((FLAG_mask_constants_with_cookie) ?
158 V8::RandomPrivate(Isolate::Current()) : 0) {
158 } 159 }
159 160
160 161
161 // Calling conventions: 162 // Calling conventions:
162 // ebp: caller's frame pointer 163 // ebp: caller's frame pointer
163 // esp: stack pointer 164 // esp: stack pointer
164 // edi: called JS function 165 // edi: called JS function
165 // esi: callee's context 166 // esi: callee's context
166 167
167 void CodeGenerator::Generate(CompilationInfo* info) { 168 void CodeGenerator::Generate(CompilationInfo* info) {
168 // Record the position for debugging purposes. 169 // Record the position for debugging purposes.
169 CodeForFunctionPosition(info->function()); 170 CodeForFunctionPosition(info->function());
170 Comment cmnt(masm_, "[ function compiled by virtual frame code generator"); 171 Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
171 172
172 // Initialize state. 173 // Initialize state.
173 info_ = info; 174 info_ = info;
174 ASSERT(allocator_ == NULL); 175 ASSERT(allocator_ == NULL);
175 RegisterAllocator register_allocator(this); 176 RegisterAllocator register_allocator(this);
176 allocator_ = &register_allocator; 177 allocator_ = &register_allocator;
177 ASSERT(frame_ == NULL); 178 ASSERT(frame_ == NULL);
178 frame_ = new VirtualFrame(); 179 frame_ = new VirtualFrame();
179 set_in_spilled_code(false); 180 set_in_spilled_code(false);
180 181
181 // Adjust for function-level loop nesting. 182 // Adjust for function-level loop nesting.
182 ASSERT_EQ(0, loop_nesting_); 183 ASSERT_EQ(0, loop_nesting_);
183 loop_nesting_ = info->is_in_loop() ? 1 : 0; 184 loop_nesting_ = info->is_in_loop() ? 1 : 0;
184 185
185 JumpTarget::set_compiling_deferred_code(false); 186 Isolate::Current()->set_jump_target_compiling_deferred_code(false);
186 187
187 { 188 {
188 CodeGenState state(this); 189 CodeGenState state(this);
189 190
190 // Entry: 191 // Entry:
191 // Stack: receiver, arguments, return address. 192 // Stack: receiver, arguments, return address.
192 // ebp: caller's frame pointer 193 // ebp: caller's frame pointer
193 // esp: stack pointer 194 // esp: stack pointer
194 // edi: called JS function 195 // edi: called JS function
195 // esi: callee's context 196 // esi: callee's context
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
277 278
278 // Store the arguments object. This must happen after context 279 // Store the arguments object. This must happen after context
279 // initialization because the arguments object may be stored in 280 // initialization because the arguments object may be stored in
280 // the context. 281 // the context.
281 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) { 282 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
282 StoreArgumentsObject(true); 283 StoreArgumentsObject(true);
283 } 284 }
284 285
285 // Initialize ThisFunction reference if present. 286 // Initialize ThisFunction reference if present.
286 if (scope()->is_function_scope() && scope()->function() != NULL) { 287 if (scope()->is_function_scope() && scope()->function() != NULL) {
287 frame_->Push(Factory::the_hole_value()); 288 frame_->Push(FACTORY->the_hole_value());
288 StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT); 289 StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
289 } 290 }
290 291
291 292
292 // Initialize the function return target after the locals are set 293 // Initialize the function return target after the locals are set
293 // up, because it needs the expected frame height from the frame. 294 // up, because it needs the expected frame height from the frame.
294 function_return_.set_direction(JumpTarget::BIDIRECTIONAL); 295 function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
295 function_return_is_shadowed_ = false; 296 function_return_is_shadowed_ = false;
296 297
297 // Generate code to 'execute' declarations and initialize functions 298 // Generate code to 'execute' declarations and initialize functions
(...skipping 15 matching lines...) Expand all
313 // Ignore the return value. 314 // Ignore the return value.
314 } 315 }
315 CheckStack(); 316 CheckStack();
316 317
317 // Compile the body of the function in a vanilla state. Don't 318 // Compile the body of the function in a vanilla state. Don't
318 // bother compiling all the code if the scope has an illegal 319 // bother compiling all the code if the scope has an illegal
319 // redeclaration. 320 // redeclaration.
320 if (!scope()->HasIllegalRedeclaration()) { 321 if (!scope()->HasIllegalRedeclaration()) {
321 Comment cmnt(masm_, "[ function body"); 322 Comment cmnt(masm_, "[ function body");
322 #ifdef DEBUG 323 #ifdef DEBUG
323 bool is_builtin = Bootstrapper::IsActive(); 324 bool is_builtin = info->isolate()->bootstrapper()->IsActive();
324 bool should_trace = 325 bool should_trace =
325 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls; 326 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
326 if (should_trace) { 327 if (should_trace) {
327 frame_->CallRuntime(Runtime::kDebugTrace, 0); 328 frame_->CallRuntime(Runtime::kDebugTrace, 0);
328 // Ignore the return value. 329 // Ignore the return value.
329 } 330 }
330 #endif 331 #endif
331 VisitStatements(info->function()->body()); 332 VisitStatements(info->function()->body());
332 333
333 // Handle the return from the function. 334 // Handle the return from the function.
334 if (has_valid_frame()) { 335 if (has_valid_frame()) {
335 // If there is a valid frame, control flow can fall off the end of 336 // If there is a valid frame, control flow can fall off the end of
336 // the body. In that case there is an implicit return statement. 337 // the body. In that case there is an implicit return statement.
337 ASSERT(!function_return_is_shadowed_); 338 ASSERT(!function_return_is_shadowed_);
338 CodeForReturnPosition(info->function()); 339 CodeForReturnPosition(info->function());
339 frame_->PrepareForReturn(); 340 frame_->PrepareForReturn();
340 Result undefined(Factory::undefined_value()); 341 Result undefined(FACTORY->undefined_value());
341 if (function_return_.is_bound()) { 342 if (function_return_.is_bound()) {
342 function_return_.Jump(&undefined); 343 function_return_.Jump(&undefined);
343 } else { 344 } else {
344 function_return_.Bind(&undefined); 345 function_return_.Bind(&undefined);
345 GenerateReturnSequence(&undefined); 346 GenerateReturnSequence(&undefined);
346 } 347 }
347 } else if (function_return_.is_linked()) { 348 } else if (function_return_.is_linked()) {
348 // If the return target has dangling jumps to it, then we have not 349 // If the return target has dangling jumps to it, then we have not
349 // yet generated the return sequence. This can happen when (a) 350 // yet generated the return sequence. This can happen when (a)
350 // control does not flow off the end of the body so we did not 351 // control does not flow off the end of the body so we did not
(...skipping 11 matching lines...) Expand all
362 loop_nesting_ = 0; 363 loop_nesting_ = 0;
363 364
364 // Code generation state must be reset. 365 // Code generation state must be reset.
365 ASSERT(state_ == NULL); 366 ASSERT(state_ == NULL);
366 ASSERT(!function_return_is_shadowed_); 367 ASSERT(!function_return_is_shadowed_);
367 function_return_.Unuse(); 368 function_return_.Unuse();
368 DeleteFrame(); 369 DeleteFrame();
369 370
370 // Process any deferred code using the register allocator. 371 // Process any deferred code using the register allocator.
371 if (!HasStackOverflow()) { 372 if (!HasStackOverflow()) {
372 JumpTarget::set_compiling_deferred_code(true); 373 info->isolate()->set_jump_target_compiling_deferred_code(true);
373 ProcessDeferred(); 374 ProcessDeferred();
374 JumpTarget::set_compiling_deferred_code(false); 375 info->isolate()->set_jump_target_compiling_deferred_code(false);
375 } 376 }
376 377
377 // There is no need to delete the register allocator, it is a 378 // There is no need to delete the register allocator, it is a
378 // stack-allocated local. 379 // stack-allocated local.
379 allocator_ = NULL; 380 allocator_ = NULL;
380 } 381 }
381 382
382 383
383 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { 384 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
384 // Currently, this assertion will fail if we try to assign to 385 // Currently, this assertion will fail if we try to assign to
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
548 void CodeGenerator::ConvertInt32ResultToNumber(Result* value) { 549 void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
549 ASSERT(value->is_untagged_int32()); 550 ASSERT(value->is_untagged_int32());
550 if (value->is_register()) { 551 if (value->is_register()) {
551 Register val = value->reg(); 552 Register val = value->reg();
552 JumpTarget done; 553 JumpTarget done;
553 __ add(val, Operand(val)); 554 __ add(val, Operand(val));
554 done.Branch(no_overflow, value); 555 done.Branch(no_overflow, value);
555 __ sar(val, 1); 556 __ sar(val, 1);
556 // If there was an overflow, bits 30 and 31 of the original number disagree. 557 // If there was an overflow, bits 30 and 31 of the original number disagree.
557 __ xor_(val, 0x80000000u); 558 __ xor_(val, 0x80000000u);
558 if (CpuFeatures::IsSupported(SSE2)) { 559 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
559 CpuFeatures::Scope fscope(SSE2); 560 CpuFeatures::Scope fscope(SSE2);
560 __ cvtsi2sd(xmm0, Operand(val)); 561 __ cvtsi2sd(xmm0, Operand(val));
561 } else { 562 } else {
562 // Move val to ST[0] in the FPU 563 // Move val to ST[0] in the FPU
563 // Push and pop are safe with respect to the virtual frame because 564 // Push and pop are safe with respect to the virtual frame because
564 // all synced elements are below the actual stack pointer. 565 // all synced elements are below the actual stack pointer.
565 __ push(val); 566 __ push(val);
566 __ fild_s(Operand(esp, 0)); 567 __ fild_s(Operand(esp, 0));
567 __ pop(val); 568 __ pop(val);
568 } 569 }
569 Result scratch = allocator_->Allocate(); 570 Result scratch = allocator_->Allocate();
570 ASSERT(scratch.is_register()); 571 ASSERT(scratch.is_register());
571 Label allocation_failed; 572 Label allocation_failed;
572 __ AllocateHeapNumber(val, scratch.reg(), 573 __ AllocateHeapNumber(val, scratch.reg(),
573 no_reg, &allocation_failed); 574 no_reg, &allocation_failed);
574 VirtualFrame* clone = new VirtualFrame(frame_); 575 VirtualFrame* clone = new VirtualFrame(frame_);
575 scratch.Unuse(); 576 scratch.Unuse();
576 if (CpuFeatures::IsSupported(SSE2)) { 577 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
577 CpuFeatures::Scope fscope(SSE2); 578 CpuFeatures::Scope fscope(SSE2);
578 __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0); 579 __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
579 } else { 580 } else {
580 __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset)); 581 __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
581 } 582 }
582 done.Jump(value); 583 done.Jump(value);
583 584
584 // Establish the virtual frame, cloned from where AllocateHeapNumber 585 // Establish the virtual frame, cloned from where AllocateHeapNumber
585 // jumped to allocation_failed. 586 // jumped to allocation_failed.
586 RegisterFile empty_regs; 587 RegisterFile empty_regs;
587 SetFrame(clone, &empty_regs); 588 SetFrame(clone, &empty_regs);
588 __ bind(&allocation_failed); 589 __ bind(&allocation_failed);
589 if (!CpuFeatures::IsSupported(SSE2)) { 590 if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
590 // Pop the value from the floating point stack. 591 // Pop the value from the floating point stack.
591 __ fstp(0); 592 __ fstp(0);
592 } 593 }
593 unsafe_bailout_->Jump(); 594 unsafe_bailout_->Jump();
594 595
595 done.Bind(value); 596 done.Bind(value);
596 } else { 597 } else {
597 ASSERT(value->is_constant()); 598 ASSERT(value->is_constant());
598 } 599 }
599 value->set_untagged_int32(false); 600 value->set_untagged_int32(false);
600 value->set_type_info(TypeInfo::Integer32()); 601 value->set_type_info(TypeInfo::Integer32());
601 } 602 }
602 603
603 604
604 void CodeGenerator::Load(Expression* expr) { 605 void CodeGenerator::Load(Expression* expr) {
605 #ifdef DEBUG 606 #ifdef DEBUG
606 int original_height = frame_->height(); 607 int original_height = frame_->height();
607 #endif 608 #endif
608 ASSERT(!in_spilled_code()); 609 ASSERT(!in_spilled_code());
609 610
610 // If the expression should be a side-effect-free 32-bit int computation, 611 // If the expression should be a side-effect-free 32-bit int computation,
611 // compile that SafeInt32 path, and a bailout path. 612 // compile that SafeInt32 path, and a bailout path.
612 if (!in_safe_int32_mode() && 613 if (!in_safe_int32_mode() &&
613 safe_int32_mode_enabled() && 614 safe_int32_mode_enabled() &&
614 expr->side_effect_free() && 615 expr->side_effect_free() &&
615 expr->num_bit_ops() > 2 && 616 expr->num_bit_ops() > 2 &&
616 CpuFeatures::IsSupported(SSE2)) { 617 Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
617 BreakTarget unsafe_bailout; 618 BreakTarget unsafe_bailout;
618 JumpTarget done; 619 JumpTarget done;
619 unsafe_bailout.set_expected_height(frame_->height()); 620 unsafe_bailout.set_expected_height(frame_->height());
620 LoadInSafeInt32Mode(expr, &unsafe_bailout); 621 LoadInSafeInt32Mode(expr, &unsafe_bailout);
621 done.Jump(); 622 done.Jump();
622 623
623 if (unsafe_bailout.is_linked()) { 624 if (unsafe_bailout.is_linked()) {
624 unsafe_bailout.Bind(); 625 unsafe_bailout.Bind();
625 LoadWithSafeInt32ModeDisabled(expr); 626 LoadWithSafeInt32ModeDisabled(expr);
626 } 627 }
627 done.Bind(); 628 done.Bind();
628 } else { 629 } else {
629 JumpTarget true_target; 630 JumpTarget true_target;
630 JumpTarget false_target; 631 JumpTarget false_target;
631 ControlDestination dest(&true_target, &false_target, true); 632 ControlDestination dest(&true_target, &false_target, true);
632 LoadCondition(expr, &dest, false); 633 LoadCondition(expr, &dest, false);
633 634
634 if (dest.false_was_fall_through()) { 635 if (dest.false_was_fall_through()) {
635 // The false target was just bound. 636 // The false target was just bound.
636 JumpTarget loaded; 637 JumpTarget loaded;
637 frame_->Push(Factory::false_value()); 638 frame_->Push(FACTORY->false_value());
638 // There may be dangling jumps to the true target. 639 // There may be dangling jumps to the true target.
639 if (true_target.is_linked()) { 640 if (true_target.is_linked()) {
640 loaded.Jump(); 641 loaded.Jump();
641 true_target.Bind(); 642 true_target.Bind();
642 frame_->Push(Factory::true_value()); 643 frame_->Push(FACTORY->true_value());
643 loaded.Bind(); 644 loaded.Bind();
644 } 645 }
645 646
646 } else if (dest.is_used()) { 647 } else if (dest.is_used()) {
647 // There is true, and possibly false, control flow (with true as 648 // There is true, and possibly false, control flow (with true as
648 // the fall through). 649 // the fall through).
649 JumpTarget loaded; 650 JumpTarget loaded;
650 frame_->Push(Factory::true_value()); 651 frame_->Push(FACTORY->true_value());
651 if (false_target.is_linked()) { 652 if (false_target.is_linked()) {
652 loaded.Jump(); 653 loaded.Jump();
653 false_target.Bind(); 654 false_target.Bind();
654 frame_->Push(Factory::false_value()); 655 frame_->Push(FACTORY->false_value());
655 loaded.Bind(); 656 loaded.Bind();
656 } 657 }
657 658
658 } else { 659 } else {
659 // We have a valid value on top of the frame, but we still may 660 // We have a valid value on top of the frame, but we still may
660 // have dangling jumps to the true and false targets from nested 661 // have dangling jumps to the true and false targets from nested
661 // subexpressions (eg, the left subexpressions of the 662 // subexpressions (eg, the left subexpressions of the
662 // short-circuited boolean operators). 663 // short-circuited boolean operators).
663 ASSERT(has_valid_frame()); 664 ASSERT(has_valid_frame());
664 if (true_target.is_linked() || false_target.is_linked()) { 665 if (true_target.is_linked() || false_target.is_linked()) {
665 JumpTarget loaded; 666 JumpTarget loaded;
666 loaded.Jump(); // Don't lose the current TOS. 667 loaded.Jump(); // Don't lose the current TOS.
667 if (true_target.is_linked()) { 668 if (true_target.is_linked()) {
668 true_target.Bind(); 669 true_target.Bind();
669 frame_->Push(Factory::true_value()); 670 frame_->Push(FACTORY->true_value());
670 if (false_target.is_linked()) { 671 if (false_target.is_linked()) {
671 loaded.Jump(); 672 loaded.Jump();
672 } 673 }
673 } 674 }
674 if (false_target.is_linked()) { 675 if (false_target.is_linked()) {
675 false_target.Bind(); 676 false_target.Bind();
676 frame_->Push(Factory::false_value()); 677 frame_->Push(FACTORY->false_value());
677 } 678 }
678 loaded.Bind(); 679 loaded.Bind();
679 } 680 }
680 } 681 }
681 } 682 }
682 ASSERT(has_valid_frame()); 683 ASSERT(has_valid_frame());
683 ASSERT(frame_->height() == original_height + 1); 684 ASSERT(frame_->height() == original_height + 1);
684 } 685 }
685 686
686 687
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
744 745
745 Result CodeGenerator::StoreArgumentsObject(bool initial) { 746 Result CodeGenerator::StoreArgumentsObject(bool initial) {
746 ArgumentsAllocationMode mode = ArgumentsMode(); 747 ArgumentsAllocationMode mode = ArgumentsMode();
747 ASSERT(mode != NO_ARGUMENTS_ALLOCATION); 748 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
748 749
749 Comment cmnt(masm_, "[ store arguments object"); 750 Comment cmnt(masm_, "[ store arguments object");
750 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { 751 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
751 // When using lazy arguments allocation, we store the arguments marker value 752 // When using lazy arguments allocation, we store the arguments marker value
752 // as a sentinel indicating that the arguments object hasn't been 753 // as a sentinel indicating that the arguments object hasn't been
753 // allocated yet. 754 // allocated yet.
754 frame_->Push(Factory::arguments_marker()); 755 frame_->Push(FACTORY->arguments_marker());
755 } else { 756 } else {
756 ArgumentsAccessStub stub(is_strict_mode() 757 ArgumentsAccessStub stub(is_strict_mode()
757 ? ArgumentsAccessStub::NEW_STRICT 758 ? ArgumentsAccessStub::NEW_STRICT
758 : ArgumentsAccessStub::NEW_NON_STRICT); 759 : ArgumentsAccessStub::NEW_NON_STRICT);
759 frame_->PushFunction(); 760 frame_->PushFunction();
760 frame_->PushReceiverSlotAddress(); 761 frame_->PushReceiverSlotAddress();
761 frame_->Push(Smi::FromInt(scope()->num_parameters())); 762 frame_->Push(Smi::FromInt(scope()->num_parameters()));
762 Result result = frame_->CallStub(&stub, 3); 763 Result result = frame_->CallStub(&stub, 3);
763 frame_->Push(&result); 764 frame_->Push(&result);
764 } 765 }
(...skipping 11 matching lines...) Expand all
776 // We have to skip storing into the arguments slot if it has 777 // We have to skip storing into the arguments slot if it has
777 // already been written to. This can happen if the a function 778 // already been written to. This can happen if the a function
778 // has a local variable named 'arguments'. 779 // has a local variable named 'arguments'.
779 LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF); 780 LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
780 Result probe = frame_->Pop(); 781 Result probe = frame_->Pop();
781 if (probe.is_constant()) { 782 if (probe.is_constant()) {
782 // We have to skip updating the arguments object if it has 783 // We have to skip updating the arguments object if it has
783 // been assigned a proper value. 784 // been assigned a proper value.
784 skip_arguments = !probe.handle()->IsArgumentsMarker(); 785 skip_arguments = !probe.handle()->IsArgumentsMarker();
785 } else { 786 } else {
786 __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker())); 787 __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
787 probe.Unuse(); 788 probe.Unuse();
788 done.Branch(not_equal); 789 done.Branch(not_equal);
789 } 790 }
790 } 791 }
791 if (!skip_arguments) { 792 if (!skip_arguments) {
792 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT); 793 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
793 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); 794 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
794 } 795 }
795 if (shadow != NULL) { 796 if (shadow != NULL) {
796 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT); 797 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
907 __ test(value.reg(), Immediate(kSmiTagMask)); 908 __ test(value.reg(), Immediate(kSmiTagMask));
908 dest->true_target()->Branch(zero); 909 dest->true_target()->Branch(zero);
909 __ fldz(); 910 __ fldz();
910 __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset)); 911 __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
911 __ FCmp(); 912 __ FCmp();
912 value.Unuse(); 913 value.Unuse();
913 dest->Split(not_zero); 914 dest->Split(not_zero);
914 } else { 915 } else {
915 // Fast case checks. 916 // Fast case checks.
916 // 'false' => false. 917 // 'false' => false.
917 __ cmp(value.reg(), Factory::false_value()); 918 __ cmp(value.reg(), FACTORY->false_value());
918 dest->false_target()->Branch(equal); 919 dest->false_target()->Branch(equal);
919 920
920 // 'true' => true. 921 // 'true' => true.
921 __ cmp(value.reg(), Factory::true_value()); 922 __ cmp(value.reg(), FACTORY->true_value());
922 dest->true_target()->Branch(equal); 923 dest->true_target()->Branch(equal);
923 924
924 // 'undefined' => false. 925 // 'undefined' => false.
925 __ cmp(value.reg(), Factory::undefined_value()); 926 __ cmp(value.reg(), FACTORY->undefined_value());
926 dest->false_target()->Branch(equal); 927 dest->false_target()->Branch(equal);
927 928
928 // Smi => false iff zero. 929 // Smi => false iff zero.
929 STATIC_ASSERT(kSmiTag == 0); 930 STATIC_ASSERT(kSmiTag == 0);
930 __ test(value.reg(), Operand(value.reg())); 931 __ test(value.reg(), Operand(value.reg()));
931 dest->false_target()->Branch(zero); 932 dest->false_target()->Branch(zero);
932 __ test(value.reg(), Immediate(kSmiTagMask)); 933 __ test(value.reg(), Immediate(kSmiTagMask));
933 dest->true_target()->Branch(zero); 934 dest->true_target()->Branch(zero);
934 935
935 // Call the stub for all other cases. 936 // Call the stub for all other cases.
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
986 TypeInfo right_info_; 987 TypeInfo right_info_;
987 OverwriteMode mode_; 988 OverwriteMode mode_;
988 Label answer_out_of_range_; 989 Label answer_out_of_range_;
989 Label non_smi_input_; 990 Label non_smi_input_;
990 Label constant_rhs_; 991 Label constant_rhs_;
991 Smi* smi_value_; 992 Smi* smi_value_;
992 }; 993 };
993 994
994 995
995 Label* DeferredInlineBinaryOperation::NonSmiInputLabel() { 996 Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
996 if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) { 997 if (Token::IsBitOp(op_) &&
998 Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
997 return &non_smi_input_; 999 return &non_smi_input_;
998 } else { 1000 } else {
999 return entry_label(); 1001 return entry_label();
1000 } 1002 }
1001 } 1003 }
1002 1004
1003 1005
1004 void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) { 1006 void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
1005 __ j(cond, &answer_out_of_range_); 1007 __ j(cond, &answer_out_of_range_);
1006 } 1008 }
1007 1009
1008 1010
1009 void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond, 1011 void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
1010 Smi* smi_value) { 1012 Smi* smi_value) {
1011 smi_value_ = smi_value; 1013 smi_value_ = smi_value;
1012 __ j(cond, &constant_rhs_); 1014 __ j(cond, &constant_rhs_);
1013 } 1015 }
1014 1016
1015 1017
1016 void DeferredInlineBinaryOperation::Generate() { 1018 void DeferredInlineBinaryOperation::Generate() {
1017 // Registers are not saved implicitly for this stub, so we should not 1019 // Registers are not saved implicitly for this stub, so we should not
1018 // tread on the registers that were not passed to us. 1020 // tread on the registers that were not passed to us.
1019 if (CpuFeatures::IsSupported(SSE2) && 1021 if (Isolate::Current()->cpu_features()->IsSupported(SSE2) &&
1020 ((op_ == Token::ADD) || 1022 ((op_ == Token::ADD) ||
1021 (op_ == Token::SUB) || 1023 (op_ == Token::SUB) ||
1022 (op_ == Token::MUL) || 1024 (op_ == Token::MUL) ||
1023 (op_ == Token::DIV))) { 1025 (op_ == Token::DIV))) {
1024 CpuFeatures::Scope use_sse2(SSE2); 1026 CpuFeatures::Scope use_sse2(SSE2);
1025 Label call_runtime, after_alloc_failure; 1027 Label call_runtime, after_alloc_failure;
1026 Label left_smi, right_smi, load_right, do_op; 1028 Label left_smi, right_smi, load_right, do_op;
1027 if (!left_info_.IsSmi()) { 1029 if (!left_info_.IsSmi()) {
1028 __ test(left_, Immediate(kSmiTagMask)); 1030 __ test(left_, Immediate(kSmiTagMask));
1029 __ j(zero, &left_smi); 1031 __ j(zero, &left_smi);
1030 if (!left_info_.IsNumber()) { 1032 if (!left_info_.IsNumber()) {
1031 __ cmp(FieldOperand(left_, HeapObject::kMapOffset), 1033 __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
1032 Factory::heap_number_map()); 1034 FACTORY->heap_number_map());
1033 __ j(not_equal, &call_runtime); 1035 __ j(not_equal, &call_runtime);
1034 } 1036 }
1035 __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset)); 1037 __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
1036 if (mode_ == OVERWRITE_LEFT) { 1038 if (mode_ == OVERWRITE_LEFT) {
1037 __ mov(dst_, left_); 1039 __ mov(dst_, left_);
1038 } 1040 }
1039 __ jmp(&load_right); 1041 __ jmp(&load_right);
1040 1042
1041 __ bind(&left_smi); 1043 __ bind(&left_smi);
1042 } else { 1044 } else {
1043 if (FLAG_debug_code) __ AbortIfNotSmi(left_); 1045 if (FLAG_debug_code) __ AbortIfNotSmi(left_);
1044 } 1046 }
1045 __ SmiUntag(left_); 1047 __ SmiUntag(left_);
1046 __ cvtsi2sd(xmm0, Operand(left_)); 1048 __ cvtsi2sd(xmm0, Operand(left_));
1047 __ SmiTag(left_); 1049 __ SmiTag(left_);
1048 if (mode_ == OVERWRITE_LEFT) { 1050 if (mode_ == OVERWRITE_LEFT) {
1049 Label alloc_failure; 1051 Label alloc_failure;
1050 __ push(left_); 1052 __ push(left_);
1051 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure); 1053 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
1052 __ pop(left_); 1054 __ pop(left_);
1053 } 1055 }
1054 1056
1055 __ bind(&load_right); 1057 __ bind(&load_right);
1056 if (!right_info_.IsSmi()) { 1058 if (!right_info_.IsSmi()) {
1057 __ test(right_, Immediate(kSmiTagMask)); 1059 __ test(right_, Immediate(kSmiTagMask));
1058 __ j(zero, &right_smi); 1060 __ j(zero, &right_smi);
1059 if (!right_info_.IsNumber()) { 1061 if (!right_info_.IsNumber()) {
1060 __ cmp(FieldOperand(right_, HeapObject::kMapOffset), 1062 __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
1061 Factory::heap_number_map()); 1063 FACTORY->heap_number_map());
1062 __ j(not_equal, &call_runtime); 1064 __ j(not_equal, &call_runtime);
1063 } 1065 }
1064 __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset)); 1066 __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
1065 if (mode_ == OVERWRITE_RIGHT) { 1067 if (mode_ == OVERWRITE_RIGHT) {
1066 __ mov(dst_, right_); 1068 __ mov(dst_, right_);
1067 } else if (mode_ == NO_OVERWRITE) { 1069 } else if (mode_ == NO_OVERWRITE) {
1068 Label alloc_failure; 1070 Label alloc_failure;
1069 __ push(left_); 1071 __ push(left_);
1070 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure); 1072 __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
1071 __ pop(left_); 1073 __ pop(left_);
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1145 } 1147 }
1146 1148
1147 __ bind(&non_smi_input_); 1149 __ bind(&non_smi_input_);
1148 1150
1149 if (rhs_is_constant) { 1151 if (rhs_is_constant) {
1150 __ bind(&constant_rhs_); 1152 __ bind(&constant_rhs_);
1151 // In this case the input is a heap object and it is in the dst_ register. 1153 // In this case the input is a heap object and it is in the dst_ register.
1152 // The left_ and right_ registers have not been initialized yet. 1154 // The left_ and right_ registers have not been initialized yet.
1153 __ mov(right_, Immediate(smi_value_)); 1155 __ mov(right_, Immediate(smi_value_));
1154 __ mov(left_, Operand(dst_)); 1156 __ mov(left_, Operand(dst_));
1155 if (!CpuFeatures::IsSupported(SSE2)) { 1157 if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
1156 __ jmp(entry_label()); 1158 __ jmp(entry_label());
1157 return; 1159 return;
1158 } else { 1160 } else {
1159 CpuFeatures::Scope use_sse2(SSE2); 1161 CpuFeatures::Scope use_sse2(SSE2);
1160 __ JumpIfNotNumber(dst_, left_info_, entry_label()); 1162 __ JumpIfNotNumber(dst_, left_info_, entry_label());
1161 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label()); 1163 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
1162 __ SmiUntag(right_); 1164 __ SmiUntag(right_);
1163 } 1165 }
1164 } else { 1166 } else {
1165 // We know we have SSE2 here because otherwise the label is not linked (see 1167 // We know we have SSE2 here because otherwise the label is not linked (see
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
1258 // Put a heap number pointer in left_. 1260 // Put a heap number pointer in left_.
1259 __ bind(&answer_out_of_range_); 1261 __ bind(&answer_out_of_range_);
1260 SaveRegisters(); 1262 SaveRegisters();
1261 if (mode_ == OVERWRITE_LEFT) { 1263 if (mode_ == OVERWRITE_LEFT) {
1262 __ test(left_, Immediate(kSmiTagMask)); 1264 __ test(left_, Immediate(kSmiTagMask));
1263 __ j(not_zero, &allocation_ok); 1265 __ j(not_zero, &allocation_ok);
1264 } 1266 }
1265 // This trashes right_. 1267 // This trashes right_.
1266 __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2); 1268 __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
1267 __ bind(&allocation_ok); 1269 __ bind(&allocation_ok);
1268 if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) { 1270 if (Isolate::Current()->cpu_features()->IsSupported(SSE2) &&
1271 op_ != Token::SHR) {
1269 CpuFeatures::Scope use_sse2(SSE2); 1272 CpuFeatures::Scope use_sse2(SSE2);
1270 ASSERT(Token::IsBitOp(op_)); 1273 ASSERT(Token::IsBitOp(op_));
1271 // Signed conversion. 1274 // Signed conversion.
1272 __ cvtsi2sd(xmm0, Operand(dst_)); 1275 __ cvtsi2sd(xmm0, Operand(dst_));
1273 __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0); 1276 __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
1274 } else { 1277 } else {
1275 if (op_ == Token::SHR) { 1278 if (op_ == Token::SHR) {
1276 __ push(Immediate(0)); // High word of unsigned value. 1279 __ push(Immediate(0)); // High word of unsigned value.
1277 __ push(dst_); 1280 __ push(dst_);
1278 __ fild_d(Operand(esp, 0)); 1281 __ fild_d(Operand(esp, 0));
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after
1500 return frame_->CallStub(stub, left, right); 1503 return frame_->CallStub(stub, left, right);
1501 } else { 1504 } else {
1502 frame_->Push(left); 1505 frame_->Push(left);
1503 frame_->Push(right); 1506 frame_->Push(right);
1504 return frame_->CallStub(stub, 2); 1507 return frame_->CallStub(stub, 2);
1505 } 1508 }
1506 } 1509 }
1507 1510
1508 1511
1509 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { 1512 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
1510 Object* answer_object = Heap::undefined_value(); 1513 Object* answer_object = HEAP->undefined_value();
1511 switch (op) { 1514 switch (op) {
1512 case Token::ADD: 1515 case Token::ADD:
1513 if (Smi::IsValid(left + right)) { 1516 if (Smi::IsValid(left + right)) {
1514 answer_object = Smi::FromInt(left + right); 1517 answer_object = Smi::FromInt(left + right);
1515 } 1518 }
1516 break; 1519 break;
1517 case Token::SUB: 1520 case Token::SUB:
1518 if (Smi::IsValid(left - right)) { 1521 if (Smi::IsValid(left - right)) {
1519 answer_object = Smi::FromInt(left - right); 1522 answer_object = Smi::FromInt(left - right);
1520 } 1523 }
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1572 unsigned_left >>= shift_amount; 1575 unsigned_left >>= shift_amount;
1573 } 1576 }
1574 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left))); 1577 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
1575 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left)); 1578 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
1576 break; 1579 break;
1577 } 1580 }
1578 default: 1581 default:
1579 UNREACHABLE(); 1582 UNREACHABLE();
1580 break; 1583 break;
1581 } 1584 }
1582 if (answer_object == Heap::undefined_value()) { 1585 if (answer_object->IsUndefined()) {
1583 return false; 1586 return false;
1584 } 1587 }
1585 frame_->Push(Handle<Object>(answer_object)); 1588 frame_->Push(Handle<Object>(answer_object));
1586 return true; 1589 return true;
1587 } 1590 }
1588 1591
1589 1592
1590 void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left, 1593 void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
1591 Result* right, 1594 Result* right,
1592 JumpTarget* both_smi) { 1595 JumpTarget* both_smi) {
(...skipping 1428 matching lines...) Expand 10 before | Expand all | Expand 10 after
3021 dest->false_target()->Branch(zero); 3024 dest->false_target()->Branch(zero);
3022 } else { 3025 } else {
3023 // Do the smi check, then the comparison. 3026 // Do the smi check, then the comparison.
3024 __ test(left_reg, Immediate(kSmiTagMask)); 3027 __ test(left_reg, Immediate(kSmiTagMask));
3025 is_smi.Branch(zero, left_side, right_side); 3028 is_smi.Branch(zero, left_side, right_side);
3026 } 3029 }
3027 3030
3028 // Jump or fall through to here if we are comparing a non-smi to a 3031 // Jump or fall through to here if we are comparing a non-smi to a
3029 // constant smi. If the non-smi is a heap number and this is not 3032 // constant smi. If the non-smi is a heap number and this is not
3030 // a loop condition, inline the floating point code. 3033 // a loop condition, inline the floating point code.
3031 if (!is_loop_condition && CpuFeatures::IsSupported(SSE2)) { 3034 if (!is_loop_condition &&
3035 Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
3032 // Right side is a constant smi and left side has been checked 3036 // Right side is a constant smi and left side has been checked
3033 // not to be a smi. 3037 // not to be a smi.
3034 CpuFeatures::Scope use_sse2(SSE2); 3038 CpuFeatures::Scope use_sse2(SSE2);
3035 JumpTarget not_number; 3039 JumpTarget not_number;
3036 __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset), 3040 __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
3037 Immediate(Factory::heap_number_map())); 3041 Immediate(FACTORY->heap_number_map()));
3038 not_number.Branch(not_equal, left_side); 3042 not_number.Branch(not_equal, left_side);
3039 __ movdbl(xmm1, 3043 __ movdbl(xmm1,
3040 FieldOperand(left_reg, HeapNumber::kValueOffset)); 3044 FieldOperand(left_reg, HeapNumber::kValueOffset));
3041 int value = Smi::cast(*right_val)->value(); 3045 int value = Smi::cast(*right_val)->value();
3042 if (value == 0) { 3046 if (value == 0) {
3043 __ xorpd(xmm0, xmm0); 3047 __ xorpd(xmm0, xmm0);
3044 } else { 3048 } else {
3045 Result temp = allocator()->Allocate(); 3049 Result temp = allocator()->Allocate();
3046 __ mov(temp.reg(), Immediate(value)); 3050 __ mov(temp.reg(), Immediate(value));
3047 __ cvtsi2sd(xmm0, Operand(temp.reg())); 3051 __ cvtsi2sd(xmm0, Operand(temp.reg()));
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
3093 Result* operand, 3097 Result* operand,
3094 Result* left_side, 3098 Result* left_side,
3095 Result* right_side, 3099 Result* right_side,
3096 JumpTarget* not_numbers) { 3100 JumpTarget* not_numbers) {
3097 // Perform check if operand is not known to be a number. 3101 // Perform check if operand is not known to be a number.
3098 if (!operand->type_info().IsNumber()) { 3102 if (!operand->type_info().IsNumber()) {
3099 Label done; 3103 Label done;
3100 __ test(operand->reg(), Immediate(kSmiTagMask)); 3104 __ test(operand->reg(), Immediate(kSmiTagMask));
3101 __ j(zero, &done); 3105 __ j(zero, &done);
3102 __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset), 3106 __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
3103 Immediate(Factory::heap_number_map())); 3107 Immediate(FACTORY->heap_number_map()));
3104 not_numbers->Branch(not_equal, left_side, right_side, not_taken); 3108 not_numbers->Branch(not_equal, left_side, right_side, not_taken);
3105 __ bind(&done); 3109 __ bind(&done);
3106 } 3110 }
3107 } 3111 }
3108 3112
3109 3113
3110 // Load a comparison operand to the FPU stack. This assumes that the operand has 3114 // Load a comparison operand to the FPU stack. This assumes that the operand has
3111 // already been checked and is a number. 3115 // already been checked and is a number.
3112 static void LoadComparisonOperand(MacroAssembler* masm_, 3116 static void LoadComparisonOperand(MacroAssembler* masm_,
3113 Result* operand) { 3117 Result* operand) {
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3160 __ SmiUntag(operand->reg()); 3164 __ SmiUntag(operand->reg());
3161 __ cvtsi2sd(xmm_reg, Operand(operand->reg())); 3165 __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
3162 __ SmiTag(operand->reg()); 3166 __ SmiTag(operand->reg());
3163 } else { 3167 } else {
3164 // Operand type not known, check for smi or heap number. 3168 // Operand type not known, check for smi or heap number.
3165 Label smi; 3169 Label smi;
3166 __ test(operand->reg(), Immediate(kSmiTagMask)); 3170 __ test(operand->reg(), Immediate(kSmiTagMask));
3167 __ j(zero, &smi); 3171 __ j(zero, &smi);
3168 if (!operand->type_info().IsNumber()) { 3172 if (!operand->type_info().IsNumber()) {
3169 __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset), 3173 __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
3170 Immediate(Factory::heap_number_map())); 3174 Immediate(FACTORY->heap_number_map()));
3171 not_numbers->Branch(not_equal, left_side, right_side, taken); 3175 not_numbers->Branch(not_equal, left_side, right_side, taken);
3172 } 3176 }
3173 __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); 3177 __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
3174 __ jmp(&done); 3178 __ jmp(&done);
3175 3179
3176 __ bind(&smi); 3180 __ bind(&smi);
3177 // Comvert smi to float and keep the original smi. 3181 // Comvert smi to float and keep the original smi.
3178 __ SmiUntag(operand->reg()); 3182 __ SmiUntag(operand->reg());
3179 __ cvtsi2sd(xmm_reg, Operand(operand->reg())); 3183 __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
3180 __ SmiTag(operand->reg()); 3184 __ SmiTag(operand->reg());
3181 __ jmp(&done); 3185 __ jmp(&done);
3182 } 3186 }
3183 __ bind(&done); 3187 __ bind(&done);
3184 } 3188 }
3185 3189
3186 3190
3187 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, 3191 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
3188 Result* right_side, 3192 Result* right_side,
3189 Condition cc, 3193 Condition cc,
3190 ControlDestination* dest) { 3194 ControlDestination* dest) {
3191 ASSERT(left_side->is_register()); 3195 ASSERT(left_side->is_register());
3192 ASSERT(right_side->is_register()); 3196 ASSERT(right_side->is_register());
3193 3197
3194 JumpTarget not_numbers; 3198 JumpTarget not_numbers;
3195 if (CpuFeatures::IsSupported(SSE2)) { 3199 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
3196 CpuFeatures::Scope use_sse2(SSE2); 3200 CpuFeatures::Scope use_sse2(SSE2);
3197 3201
3198 // Load left and right operand into registers xmm0 and xmm1 and compare. 3202 // Load left and right operand into registers xmm0 and xmm1 and compare.
3199 LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side, 3203 LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
3200 &not_numbers); 3204 &not_numbers);
3201 LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side, 3205 LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
3202 &not_numbers); 3206 &not_numbers);
3203 __ ucomisd(xmm0, xmm1); 3207 __ ucomisd(xmm0, xmm1);
3204 } else { 3208 } else {
3205 Label check_right, compare; 3209 Label check_right, compare;
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
3267 // stack, as receiver and arguments, and calls x. 3271 // stack, as receiver and arguments, and calls x.
3268 // In the implementation comments, we call x the applicand 3272 // In the implementation comments, we call x the applicand
3269 // and y the receiver. 3273 // and y the receiver.
3270 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION); 3274 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
3271 ASSERT(arguments->IsArguments()); 3275 ASSERT(arguments->IsArguments());
3272 3276
3273 // Load applicand.apply onto the stack. This will usually 3277 // Load applicand.apply onto the stack. This will usually
3274 // give us a megamorphic load site. Not super, but it works. 3278 // give us a megamorphic load site. Not super, but it works.
3275 Load(applicand); 3279 Load(applicand);
3276 frame()->Dup(); 3280 frame()->Dup();
3277 Handle<String> name = Factory::LookupAsciiSymbol("apply"); 3281 Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
3278 frame()->Push(name); 3282 frame()->Push(name);
3279 Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET); 3283 Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
3280 __ nop(); 3284 __ nop();
3281 frame()->Push(&answer); 3285 frame()->Push(&answer);
3282 3286
3283 // Load the receiver and the existing arguments object onto the 3287 // Load the receiver and the existing arguments object onto the
3284 // expression stack. Avoid allocating the arguments object here. 3288 // expression stack. Avoid allocating the arguments object here.
3285 Load(receiver); 3289 Load(receiver);
3286 LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF); 3290 LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
3287 3291
(...skipping 11 matching lines...) Expand all
3299 // from the stack. This also deals with cases where a local variable 3303 // from the stack. This also deals with cases where a local variable
3300 // named 'arguments' has been introduced. 3304 // named 'arguments' has been introduced.
3301 frame_->Dup(); 3305 frame_->Dup();
3302 Result probe = frame_->Pop(); 3306 Result probe = frame_->Pop();
3303 { VirtualFrame::SpilledScope spilled_scope; 3307 { VirtualFrame::SpilledScope spilled_scope;
3304 Label slow, done; 3308 Label slow, done;
3305 bool try_lazy = true; 3309 bool try_lazy = true;
3306 if (probe.is_constant()) { 3310 if (probe.is_constant()) {
3307 try_lazy = probe.handle()->IsArgumentsMarker(); 3311 try_lazy = probe.handle()->IsArgumentsMarker();
3308 } else { 3312 } else {
3309 __ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker())); 3313 __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
3310 probe.Unuse(); 3314 probe.Unuse();
3311 __ j(not_equal, &slow); 3315 __ j(not_equal, &slow);
3312 } 3316 }
3313 3317
3314 if (try_lazy) { 3318 if (try_lazy) {
3315 Label build_args; 3319 Label build_args;
3316 // Get rid of the arguments object probe. 3320 // Get rid of the arguments object probe.
3317 frame_->Drop(); // Can be called on a spilled frame. 3321 frame_->Drop(); // Can be called on a spilled frame.
3318 // Stack now has 3 elements on it. 3322 // Stack now has 3 elements on it.
3319 // Contents of stack at this point: 3323 // Contents of stack at this point:
(...skipping 15 matching lines...) Expand all
3335 __ j(below, &build_args); 3339 __ j(below, &build_args);
3336 3340
3337 // Check that applicand.apply is Function.prototype.apply. 3341 // Check that applicand.apply is Function.prototype.apply.
3338 __ mov(eax, Operand(esp, kPointerSize)); 3342 __ mov(eax, Operand(esp, kPointerSize));
3339 __ test(eax, Immediate(kSmiTagMask)); 3343 __ test(eax, Immediate(kSmiTagMask));
3340 __ j(zero, &build_args); 3344 __ j(zero, &build_args);
3341 __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx); 3345 __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
3342 __ j(not_equal, &build_args); 3346 __ j(not_equal, &build_args);
3343 __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset)); 3347 __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
3344 __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag)); 3348 __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
3345 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply)); 3349 Handle<Code> apply_code(Isolate::Current()->builtins()->builtin(
3350 Builtins::FunctionApply));
3346 __ cmp(Operand(ecx), Immediate(apply_code)); 3351 __ cmp(Operand(ecx), Immediate(apply_code));
3347 __ j(not_equal, &build_args); 3352 __ j(not_equal, &build_args);
3348 3353
3349 // Check that applicand is a function. 3354 // Check that applicand is a function.
3350 __ mov(edi, Operand(esp, 2 * kPointerSize)); 3355 __ mov(edi, Operand(esp, 2 * kPointerSize));
3351 __ test(edi, Immediate(kSmiTagMask)); 3356 __ test(edi, Immediate(kSmiTagMask));
3352 __ j(zero, &build_args); 3357 __ j(zero, &build_args);
3353 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); 3358 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
3354 __ j(not_equal, &build_args); 3359 __ j(not_equal, &build_args);
3355 3360
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
3562 frame_->EmitPush(Immediate(var->name())); 3567 frame_->EmitPush(Immediate(var->name()));
3563 // Declaration nodes are always introduced in one of two modes. 3568 // Declaration nodes are always introduced in one of two modes.
3564 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST); 3569 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
3565 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY; 3570 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
3566 frame_->EmitPush(Immediate(Smi::FromInt(attr))); 3571 frame_->EmitPush(Immediate(Smi::FromInt(attr)));
3567 // Push initial value, if any. 3572 // Push initial value, if any.
3568 // Note: For variables we must not push an initial value (such as 3573 // Note: For variables we must not push an initial value (such as
3569 // 'undefined') because we may have a (legal) redeclaration and we 3574 // 'undefined') because we may have a (legal) redeclaration and we
3570 // must not destroy the current value. 3575 // must not destroy the current value.
3571 if (node->mode() == Variable::CONST) { 3576 if (node->mode() == Variable::CONST) {
3572 frame_->EmitPush(Immediate(Factory::the_hole_value())); 3577 frame_->EmitPush(Immediate(FACTORY->the_hole_value()));
3573 } else if (node->fun() != NULL) { 3578 } else if (node->fun() != NULL) {
3574 Load(node->fun()); 3579 Load(node->fun());
3575 } else { 3580 } else {
3576 frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value! 3581 frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
3577 } 3582 }
3578 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4); 3583 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
3579 // Ignore the return value (declarations are statements). 3584 // Ignore the return value (declarations are statements).
3580 return; 3585 return;
3581 } 3586 }
3582 3587
3583 ASSERT(!var->is_global()); 3588 ASSERT(!var->is_global());
3584 3589
3585 // If we have a function or a constant, we need to initialize the variable. 3590 // If we have a function or a constant, we need to initialize the variable.
3586 Expression* val = NULL; 3591 Expression* val = NULL;
3587 if (node->mode() == Variable::CONST) { 3592 if (node->mode() == Variable::CONST) {
3588 val = new Literal(Factory::the_hole_value()); 3593 val = new Literal(FACTORY->the_hole_value());
3589 } else { 3594 } else {
3590 val = node->fun(); // NULL if we don't have a function 3595 val = node->fun(); // NULL if we don't have a function
3591 } 3596 }
3592 3597
3593 if (val != NULL) { 3598 if (val != NULL) {
3594 { 3599 {
3595 // Set the initial value. 3600 // Set the initial value.
3596 Reference target(this, node->proxy()); 3601 Reference target(this, node->proxy());
3597 Load(val); 3602 Load(val);
3598 target.SetValue(NOT_CONST_INIT); 3603 target.SetValue(NOT_CONST_INIT);
(...skipping 760 matching lines...) Expand 10 before | Expand all | Expand 10 after
4359 JumpTarget exit; 4364 JumpTarget exit;
4360 4365
4361 // Get the object to enumerate over (converted to JSObject). 4366 // Get the object to enumerate over (converted to JSObject).
4362 LoadAndSpill(node->enumerable()); 4367 LoadAndSpill(node->enumerable());
4363 4368
4364 // Both SpiderMonkey and kjs ignore null and undefined in contrast 4369 // Both SpiderMonkey and kjs ignore null and undefined in contrast
4365 // to the specification. 12.6.4 mandates a call to ToObject. 4370 // to the specification. 12.6.4 mandates a call to ToObject.
4366 frame_->EmitPop(eax); 4371 frame_->EmitPop(eax);
4367 4372
4368 // eax: value to be iterated over 4373 // eax: value to be iterated over
4369 __ cmp(eax, Factory::undefined_value()); 4374 __ cmp(eax, FACTORY->undefined_value());
4370 exit.Branch(equal); 4375 exit.Branch(equal);
4371 __ cmp(eax, Factory::null_value()); 4376 __ cmp(eax, FACTORY->null_value());
4372 exit.Branch(equal); 4377 exit.Branch(equal);
4373 4378
4374 // Stack layout in body: 4379 // Stack layout in body:
4375 // [iteration counter (smi)] <- slot 0 4380 // [iteration counter (smi)] <- slot 0
4376 // [length of array] <- slot 1 4381 // [length of array] <- slot 1
4377 // [FixedArray] <- slot 2 4382 // [FixedArray] <- slot 2
4378 // [Map or 0] <- slot 3 4383 // [Map or 0] <- slot 3
4379 // [Object] <- slot 4 4384 // [Object] <- slot 4
4380 4385
4381 // Check if enumerable is already a JSObject 4386 // Check if enumerable is already a JSObject
(...skipping 18 matching lines...) Expand all
4400 // guarantee cache validity, call the runtime system to check cache 4405 // guarantee cache validity, call the runtime system to check cache
4401 // validity or get the property names in a fixed array. 4406 // validity or get the property names in a fixed array.
4402 JumpTarget call_runtime; 4407 JumpTarget call_runtime;
4403 JumpTarget loop(JumpTarget::BIDIRECTIONAL); 4408 JumpTarget loop(JumpTarget::BIDIRECTIONAL);
4404 JumpTarget check_prototype; 4409 JumpTarget check_prototype;
4405 JumpTarget use_cache; 4410 JumpTarget use_cache;
4406 __ mov(ecx, eax); 4411 __ mov(ecx, eax);
4407 loop.Bind(); 4412 loop.Bind();
4408 // Check that there are no elements. 4413 // Check that there are no elements.
4409 __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset)); 4414 __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
4410 __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array())); 4415 __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
4411 call_runtime.Branch(not_equal); 4416 call_runtime.Branch(not_equal);
4412 // Check that instance descriptors are not empty so that we can 4417 // Check that instance descriptors are not empty so that we can
4413 // check for an enum cache. Leave the map in ebx for the subsequent 4418 // check for an enum cache. Leave the map in ebx for the subsequent
4414 // prototype load. 4419 // prototype load.
4415 __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset)); 4420 __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
4416 __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset)); 4421 __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
4417 __ cmp(Operand(edx), Immediate(Factory::empty_descriptor_array())); 4422 __ cmp(Operand(edx), Immediate(FACTORY->empty_descriptor_array()));
4418 call_runtime.Branch(equal); 4423 call_runtime.Branch(equal);
4419 // Check that there in an enum cache in the non-empty instance 4424 // Check that there in an enum cache in the non-empty instance
4420 // descriptors. This is the case if the next enumeration index 4425 // descriptors. This is the case if the next enumeration index
4421 // field does not contain a smi. 4426 // field does not contain a smi.
4422 __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset)); 4427 __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
4423 __ test(edx, Immediate(kSmiTagMask)); 4428 __ test(edx, Immediate(kSmiTagMask));
4424 call_runtime.Branch(zero); 4429 call_runtime.Branch(zero);
4425 // For all objects but the receiver, check that the cache is empty. 4430 // For all objects but the receiver, check that the cache is empty.
4426 __ cmp(ecx, Operand(eax)); 4431 __ cmp(ecx, Operand(eax));
4427 check_prototype.Branch(equal); 4432 check_prototype.Branch(equal);
4428 __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset)); 4433 __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
4429 __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array())); 4434 __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
4430 call_runtime.Branch(not_equal); 4435 call_runtime.Branch(not_equal);
4431 check_prototype.Bind(); 4436 check_prototype.Bind();
4432 // Load the prototype from the map and loop if non-null. 4437 // Load the prototype from the map and loop if non-null.
4433 __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset)); 4438 __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
4434 __ cmp(Operand(ecx), Immediate(Factory::null_value())); 4439 __ cmp(Operand(ecx), Immediate(FACTORY->null_value()));
4435 loop.Branch(not_equal); 4440 loop.Branch(not_equal);
4436 // The enum cache is valid. Load the map of the object being 4441 // The enum cache is valid. Load the map of the object being
4437 // iterated over and use the cache for the iteration. 4442 // iterated over and use the cache for the iteration.
4438 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); 4443 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
4439 use_cache.Jump(); 4444 use_cache.Jump();
4440 4445
4441 call_runtime.Bind(); 4446 call_runtime.Bind();
4442 // Call the runtime to get the property names for the object. 4447 // Call the runtime to get the property names for the object.
4443 frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call 4448 frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
4444 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1); 4449 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
4445 4450
4446 // If we got a map from the runtime call, we can do a fast 4451 // If we got a map from the runtime call, we can do a fast
4447 // modification check. Otherwise, we got a fixed array, and we have 4452 // modification check. Otherwise, we got a fixed array, and we have
4448 // to do a slow check. 4453 // to do a slow check.
4449 // eax: map or fixed array (result from call to 4454 // eax: map or fixed array (result from call to
4450 // Runtime::kGetPropertyNamesFast) 4455 // Runtime::kGetPropertyNamesFast)
4451 __ mov(edx, Operand(eax)); 4456 __ mov(edx, Operand(eax));
4452 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); 4457 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
4453 __ cmp(ecx, Factory::meta_map()); 4458 __ cmp(ecx, FACTORY->meta_map());
4454 fixed_array.Branch(not_equal); 4459 fixed_array.Branch(not_equal);
4455 4460
4456 use_cache.Bind(); 4461 use_cache.Bind();
4457 // Get enum cache 4462 // Get enum cache
4458 // eax: map (either the result from a call to 4463 // eax: map (either the result from a call to
4459 // Runtime::kGetPropertyNamesFast or has been fetched directly from 4464 // Runtime::kGetPropertyNamesFast or has been fetched directly from
4460 // the object) 4465 // the object)
4461 __ mov(ecx, Operand(eax)); 4466 __ mov(ecx, Operand(eax));
4462 4467
4463 __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset)); 4468 __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
4635 // After shadowing stops, the original targets are unshadowed and the 4640 // After shadowing stops, the original targets are unshadowed and the
4636 // ShadowTargets represent the formerly shadowing targets. 4641 // ShadowTargets represent the formerly shadowing targets.
4637 bool has_unlinks = false; 4642 bool has_unlinks = false;
4638 for (int i = 0; i < shadows.length(); i++) { 4643 for (int i = 0; i < shadows.length(); i++) {
4639 shadows[i]->StopShadowing(); 4644 shadows[i]->StopShadowing();
4640 has_unlinks = has_unlinks || shadows[i]->is_linked(); 4645 has_unlinks = has_unlinks || shadows[i]->is_linked();
4641 } 4646 }
4642 function_return_is_shadowed_ = function_return_was_shadowed; 4647 function_return_is_shadowed_ = function_return_was_shadowed;
4643 4648
4644 // Get an external reference to the handler address. 4649 // Get an external reference to the handler address.
4645 ExternalReference handler_address(Top::k_handler_address); 4650 ExternalReference handler_address(Isolate::k_handler_address);
4646 4651
4647 // Make sure that there's nothing left on the stack above the 4652 // Make sure that there's nothing left on the stack above the
4648 // handler structure. 4653 // handler structure.
4649 if (FLAG_debug_code) { 4654 if (FLAG_debug_code) {
4650 __ mov(eax, Operand::StaticVariable(handler_address)); 4655 __ mov(eax, Operand::StaticVariable(handler_address));
4651 __ cmp(esp, Operand(eax)); 4656 __ cmp(esp, Operand(eax));
4652 __ Assert(equal, "stack pointer should point to top handler"); 4657 __ Assert(equal, "stack pointer should point to top handler");
4653 } 4658 }
4654 4659
4655 // If we can fall off the end of the try block, unlink from try chain. 4660 // If we can fall off the end of the try block, unlink from try chain.
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
4761 // After shadowing stops, the original targets are unshadowed and the 4766 // After shadowing stops, the original targets are unshadowed and the
4762 // ShadowTargets represent the formerly shadowing targets. 4767 // ShadowTargets represent the formerly shadowing targets.
4763 int nof_unlinks = 0; 4768 int nof_unlinks = 0;
4764 for (int i = 0; i < shadows.length(); i++) { 4769 for (int i = 0; i < shadows.length(); i++) {
4765 shadows[i]->StopShadowing(); 4770 shadows[i]->StopShadowing();
4766 if (shadows[i]->is_linked()) nof_unlinks++; 4771 if (shadows[i]->is_linked()) nof_unlinks++;
4767 } 4772 }
4768 function_return_is_shadowed_ = function_return_was_shadowed; 4773 function_return_is_shadowed_ = function_return_was_shadowed;
4769 4774
4770 // Get an external reference to the handler address. 4775 // Get an external reference to the handler address.
4771 ExternalReference handler_address(Top::k_handler_address); 4776 ExternalReference handler_address(Isolate::k_handler_address);
4772 4777
4773 // If we can fall off the end of the try block, unlink from the try 4778 // If we can fall off the end of the try block, unlink from the try
4774 // chain and set the state on the frame to FALLING. 4779 // chain and set the state on the frame to FALLING.
4775 if (has_valid_frame()) { 4780 if (has_valid_frame()) {
4776 // The next handler address is on top of the frame. 4781 // The next handler address is on top of the frame.
4777 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 4782 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4778 frame_->EmitPop(Operand::StaticVariable(handler_address)); 4783 frame_->EmitPop(Operand::StaticVariable(handler_address));
4779 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); 4784 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4780 4785
4781 // Fake a top of stack value (unneeded when FALLING) and set the 4786 // Fake a top of stack value (unneeded when FALLING) and set the
4782 // state in ecx, then jump around the unlink blocks if any. 4787 // state in ecx, then jump around the unlink blocks if any.
4783 frame_->EmitPush(Immediate(Factory::undefined_value())); 4788 frame_->EmitPush(Immediate(FACTORY->undefined_value()));
4784 __ Set(ecx, Immediate(Smi::FromInt(FALLING))); 4789 __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
4785 if (nof_unlinks > 0) { 4790 if (nof_unlinks > 0) {
4786 finally_block.Jump(); 4791 finally_block.Jump();
4787 } 4792 }
4788 } 4793 }
4789 4794
4790 // Generate code to unlink and set the state for the (formerly) 4795 // Generate code to unlink and set the state for the (formerly)
4791 // shadowing targets that have been jumped to. 4796 // shadowing targets that have been jumped to.
4792 for (int i = 0; i < shadows.length(); i++) { 4797 for (int i = 0; i < shadows.length(); i++) {
4793 if (shadows[i]->is_linked()) { 4798 if (shadows[i]->is_linked()) {
(...skipping 22 matching lines...) Expand all
4816 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 4821 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4817 frame_->EmitPop(Operand::StaticVariable(handler_address)); 4822 frame_->EmitPop(Operand::StaticVariable(handler_address));
4818 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); 4823 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4819 4824
4820 if (i == kReturnShadowIndex) { 4825 if (i == kReturnShadowIndex) {
4821 // If this target shadowed the function return, materialize 4826 // If this target shadowed the function return, materialize
4822 // the return value on the stack. 4827 // the return value on the stack.
4823 frame_->EmitPush(eax); 4828 frame_->EmitPush(eax);
4824 } else { 4829 } else {
4825 // Fake TOS for targets that shadowed breaks and continues. 4830 // Fake TOS for targets that shadowed breaks and continues.
4826 frame_->EmitPush(Immediate(Factory::undefined_value())); 4831 frame_->EmitPush(Immediate(FACTORY->undefined_value()));
4827 } 4832 }
4828 __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i))); 4833 __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
4829 if (--nof_unlinks > 0) { 4834 if (--nof_unlinks > 0) {
4830 // If this is not the last unlink block, jump around the next. 4835 // If this is not the last unlink block, jump around the next.
4831 finally_block.Jump(); 4836 finally_block.Jump();
4832 } 4837 }
4833 } 4838 }
4834 } 4839 }
4835 4840
4836 // --- Finally block --- 4841 // --- Finally block ---
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
4924 FastNewClosureStub stub( 4929 FastNewClosureStub stub(
4925 function_info->strict_mode() ? kStrictMode : kNonStrictMode); 4930 function_info->strict_mode() ? kStrictMode : kNonStrictMode);
4926 frame()->EmitPush(Immediate(function_info)); 4931 frame()->EmitPush(Immediate(function_info));
4927 return frame()->CallStub(&stub, 1); 4932 return frame()->CallStub(&stub, 1);
4928 } else { 4933 } else {
4929 // Call the runtime to instantiate the function based on the 4934 // Call the runtime to instantiate the function based on the
4930 // shared function info. 4935 // shared function info.
4931 frame()->EmitPush(esi); 4936 frame()->EmitPush(esi);
4932 frame()->EmitPush(Immediate(function_info)); 4937 frame()->EmitPush(Immediate(function_info));
4933 frame()->EmitPush(Immediate(pretenure 4938 frame()->EmitPush(Immediate(pretenure
4934 ? Factory::true_value() 4939 ? FACTORY->true_value()
4935 : Factory::false_value())); 4940 : FACTORY->false_value()));
4936 return frame()->CallRuntime(Runtime::kNewClosure, 3); 4941 return frame()->CallRuntime(Runtime::kNewClosure, 3);
4937 } 4942 }
4938 } 4943 }
4939 4944
4940 4945
4941 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { 4946 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
4942 Comment cmnt(masm_, "[ FunctionLiteral"); 4947 Comment cmnt(masm_, "[ FunctionLiteral");
4943 ASSERT(!in_safe_int32_mode()); 4948 ASSERT(!in_safe_int32_mode());
4944 // Build the function info and instantiate it. 4949 // Build the function info and instantiate it.
4945 Handle<SharedFunctionInfo> function_info = 4950 Handle<SharedFunctionInfo> function_info =
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
5033 // Const slots may contain 'the hole' value (the constant hasn't been 5038 // Const slots may contain 'the hole' value (the constant hasn't been
5034 // initialized yet) which needs to be converted into the 'undefined' 5039 // initialized yet) which needs to be converted into the 'undefined'
5035 // value. 5040 // value.
5036 // 5041 //
5037 // We currently spill the virtual frame because constants use the 5042 // We currently spill the virtual frame because constants use the
5038 // potentially unsafe direct-frame access of SlotOperand. 5043 // potentially unsafe direct-frame access of SlotOperand.
5039 VirtualFrame::SpilledScope spilled_scope; 5044 VirtualFrame::SpilledScope spilled_scope;
5040 Comment cmnt(masm_, "[ Load const"); 5045 Comment cmnt(masm_, "[ Load const");
5041 Label exit; 5046 Label exit;
5042 __ mov(ecx, SlotOperand(slot, ecx)); 5047 __ mov(ecx, SlotOperand(slot, ecx));
5043 __ cmp(ecx, Factory::the_hole_value()); 5048 __ cmp(ecx, FACTORY->the_hole_value());
5044 __ j(not_equal, &exit); 5049 __ j(not_equal, &exit);
5045 __ mov(ecx, Factory::undefined_value()); 5050 __ mov(ecx, FACTORY->undefined_value());
5046 __ bind(&exit); 5051 __ bind(&exit);
5047 frame()->EmitPush(ecx); 5052 frame()->EmitPush(ecx);
5048 5053
5049 } else if (slot->type() == Slot::PARAMETER) { 5054 } else if (slot->type() == Slot::PARAMETER) {
5050 frame()->PushParameterAt(slot->index()); 5055 frame()->PushParameterAt(slot->index());
5051 5056
5052 } else if (slot->type() == Slot::LOCAL) { 5057 } else if (slot->type() == Slot::LOCAL) {
5053 frame()->PushLocalAt(slot->index()); 5058 frame()->PushLocalAt(slot->index());
5054 5059
5055 } else { 5060 } else {
(...skipping 29 matching lines...) Expand all
5085 result = StoreArgumentsObject(false); 5090 result = StoreArgumentsObject(false);
5086 } 5091 }
5087 frame()->Push(&result); 5092 frame()->Push(&result);
5088 return; 5093 return;
5089 } 5094 }
5090 ASSERT(result.is_register()); 5095 ASSERT(result.is_register());
5091 // The loaded value is in a register. If it is the sentinel that 5096 // The loaded value is in a register. If it is the sentinel that
5092 // indicates that we haven't loaded the arguments object yet, we 5097 // indicates that we haven't loaded the arguments object yet, we
5093 // need to do it now. 5098 // need to do it now.
5094 JumpTarget exit; 5099 JumpTarget exit;
5095 __ cmp(Operand(result.reg()), Immediate(Factory::arguments_marker())); 5100 __ cmp(Operand(result.reg()), Immediate(FACTORY->arguments_marker()));
5096 frame()->Push(&result); 5101 frame()->Push(&result);
5097 exit.Branch(not_equal); 5102 exit.Branch(not_equal);
5098 5103
5099 result = StoreArgumentsObject(false); 5104 result = StoreArgumentsObject(false);
5100 frame()->SetElementAt(0, &result); 5105 frame()->SetElementAt(0, &result);
5101 result.Unuse(); 5106 result.Unuse();
5102 exit.Bind(); 5107 exit.Bind();
5103 return; 5108 return;
5104 } 5109 }
5105 5110
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
5139 if (s != NULL && s->is_eval_scope()) { 5144 if (s != NULL && s->is_eval_scope()) {
5140 // Loop up the context chain. There is no frame effect so it is 5145 // Loop up the context chain. There is no frame effect so it is
5141 // safe to use raw labels here. 5146 // safe to use raw labels here.
5142 Label next, fast; 5147 Label next, fast;
5143 if (!context.is(tmp.reg())) { 5148 if (!context.is(tmp.reg())) {
5144 __ mov(tmp.reg(), context); 5149 __ mov(tmp.reg(), context);
5145 } 5150 }
5146 __ bind(&next); 5151 __ bind(&next);
5147 // Terminate at global context. 5152 // Terminate at global context.
5148 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset), 5153 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
5149 Immediate(Factory::global_context_map())); 5154 Immediate(FACTORY->global_context_map()));
5150 __ j(equal, &fast); 5155 __ j(equal, &fast);
5151 // Check that extension is NULL. 5156 // Check that extension is NULL.
5152 __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0)); 5157 __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
5153 slow->Branch(not_equal, not_taken); 5158 slow->Branch(not_equal, not_taken);
5154 // Load next context in chain. 5159 // Load next context in chain.
5155 __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX)); 5160 __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
5156 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); 5161 __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
5157 __ jmp(&next); 5162 __ jmp(&next);
5158 __ bind(&fast); 5163 __ bind(&fast);
5159 } 5164 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
5199 if (potential_slot != NULL) { 5204 if (potential_slot != NULL) {
5200 // Generate fast case for locals that rewrite to slots. 5205 // Generate fast case for locals that rewrite to slots.
5201 // Allocate a fresh register to use as a temp in 5206 // Allocate a fresh register to use as a temp in
5202 // ContextSlotOperandCheckExtensions and to hold the result 5207 // ContextSlotOperandCheckExtensions and to hold the result
5203 // value. 5208 // value.
5204 *result = allocator()->Allocate(); 5209 *result = allocator()->Allocate();
5205 ASSERT(result->is_valid()); 5210 ASSERT(result->is_valid());
5206 __ mov(result->reg(), 5211 __ mov(result->reg(),
5207 ContextSlotOperandCheckExtensions(potential_slot, *result, slow)); 5212 ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
5208 if (potential_slot->var()->mode() == Variable::CONST) { 5213 if (potential_slot->var()->mode() == Variable::CONST) {
5209 __ cmp(result->reg(), Factory::the_hole_value()); 5214 __ cmp(result->reg(), FACTORY->the_hole_value());
5210 done->Branch(not_equal, result); 5215 done->Branch(not_equal, result);
5211 __ mov(result->reg(), Factory::undefined_value()); 5216 __ mov(result->reg(), FACTORY->undefined_value());
5212 } 5217 }
5213 done->Jump(result); 5218 done->Jump(result);
5214 } else if (rewrite != NULL) { 5219 } else if (rewrite != NULL) {
5215 // Generate fast case for calls of an argument function. 5220 // Generate fast case for calls of an argument function.
5216 Property* property = rewrite->AsProperty(); 5221 Property* property = rewrite->AsProperty();
5217 if (property != NULL) { 5222 if (property != NULL) {
5218 VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); 5223 VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
5219 Literal* key_literal = property->key()->AsLiteral(); 5224 Literal* key_literal = property->key()->AsLiteral();
5220 if (obj_proxy != NULL && 5225 if (obj_proxy != NULL &&
5221 key_literal != NULL && 5226 key_literal != NULL &&
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
5288 // Only the first const initialization must be executed (the slot 5293 // Only the first const initialization must be executed (the slot
5289 // still contains 'the hole' value). When the assignment is executed, 5294 // still contains 'the hole' value). When the assignment is executed,
5290 // the code is identical to a normal store (see below). 5295 // the code is identical to a normal store (see below).
5291 // 5296 //
5292 // We spill the frame in the code below because the direct-frame 5297 // We spill the frame in the code below because the direct-frame
5293 // access of SlotOperand is potentially unsafe with an unspilled 5298 // access of SlotOperand is potentially unsafe with an unspilled
5294 // frame. 5299 // frame.
5295 VirtualFrame::SpilledScope spilled_scope; 5300 VirtualFrame::SpilledScope spilled_scope;
5296 Comment cmnt(masm_, "[ Init const"); 5301 Comment cmnt(masm_, "[ Init const");
5297 __ mov(ecx, SlotOperand(slot, ecx)); 5302 __ mov(ecx, SlotOperand(slot, ecx));
5298 __ cmp(ecx, Factory::the_hole_value()); 5303 __ cmp(ecx, FACTORY->the_hole_value());
5299 exit.Branch(not_equal); 5304 exit.Branch(not_equal);
5300 } 5305 }
5301 5306
5302 // We must execute the store. Storing a variable must keep the (new) 5307 // We must execute the store. Storing a variable must keep the (new)
5303 // value on the stack. This is necessary for compiling assignment 5308 // value on the stack. This is necessary for compiling assignment
5304 // expressions. 5309 // expressions.
5305 // 5310 //
5306 // Note: We will reach here even with slot->var()->mode() == 5311 // Note: We will reach here even with slot->var()->mode() ==
5307 // Variable::CONST because of const declarations which will initialize 5312 // Variable::CONST because of const declarations which will initialize
5308 // consts to 'the hole' value and by doing so, end up calling this code. 5313 // consts to 'the hole' value and by doing so, end up calling this code.
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
5459 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax); 5464 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
5460 } 5465 }
5461 5466
5462 5467
5463 class DeferredAllocateInNewSpace: public DeferredCode { 5468 class DeferredAllocateInNewSpace: public DeferredCode {
5464 public: 5469 public:
5465 DeferredAllocateInNewSpace(int size, 5470 DeferredAllocateInNewSpace(int size,
5466 Register target, 5471 Register target,
5467 int registers_to_save = 0) 5472 int registers_to_save = 0)
5468 : size_(size), target_(target), registers_to_save_(registers_to_save) { 5473 : size_(size), target_(target), registers_to_save_(registers_to_save) {
5469 ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace()); 5474 ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
5470 ASSERT_EQ(0, registers_to_save & target.bit()); 5475 ASSERT_EQ(0, registers_to_save & target.bit());
5471 set_comment("[ DeferredAllocateInNewSpace"); 5476 set_comment("[ DeferredAllocateInNewSpace");
5472 } 5477 }
5473 void Generate(); 5478 void Generate();
5474 5479
5475 private: 5480 private:
5476 int size_; 5481 int size_;
5477 Register target_; 5482 Register target_;
5478 int registers_to_save_; 5483 int registers_to_save_;
5479 }; 5484 };
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
5520 Result boilerplate = allocator_->Allocate(); 5525 Result boilerplate = allocator_->Allocate();
5521 ASSERT(boilerplate.is_valid()); 5526 ASSERT(boilerplate.is_valid());
5522 int literal_offset = 5527 int literal_offset =
5523 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; 5528 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
5524 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); 5529 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
5525 5530
5526 // Check whether we need to materialize the RegExp object. If so, 5531 // Check whether we need to materialize the RegExp object. If so,
5527 // jump to the deferred code passing the literals array. 5532 // jump to the deferred code passing the literals array.
5528 DeferredRegExpLiteral* deferred = 5533 DeferredRegExpLiteral* deferred =
5529 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node); 5534 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
5530 __ cmp(boilerplate.reg(), Factory::undefined_value()); 5535 __ cmp(boilerplate.reg(), FACTORY->undefined_value());
5531 deferred->Branch(equal); 5536 deferred->Branch(equal);
5532 deferred->BindExit(); 5537 deferred->BindExit();
5533 5538
5534 // Register of boilerplate contains RegExp object. 5539 // Register of boilerplate contains RegExp object.
5535 5540
5536 Result tmp = allocator()->Allocate(); 5541 Result tmp = allocator()->Allocate();
5537 ASSERT(tmp.is_valid()); 5542 ASSERT(tmp.is_valid());
5538 5543
5539 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 5544 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5540 5545
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
5678 5683
5679 // Load the literals array of the function. 5684 // Load the literals array of the function.
5680 __ mov(literals.reg(), 5685 __ mov(literals.reg(),
5681 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); 5686 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
5682 5687
5683 frame_->Push(&literals); 5688 frame_->Push(&literals);
5684 frame_->Push(Smi::FromInt(node->literal_index())); 5689 frame_->Push(Smi::FromInt(node->literal_index()));
5685 frame_->Push(node->constant_elements()); 5690 frame_->Push(node->constant_elements());
5686 int length = node->values()->length(); 5691 int length = node->values()->length();
5687 Result clone; 5692 Result clone;
5688 if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) { 5693 if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
5689 FastCloneShallowArrayStub stub( 5694 FastCloneShallowArrayStub stub(
5690 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); 5695 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
5691 clone = frame_->CallStub(&stub, 3); 5696 clone = frame_->CallStub(&stub, 3);
5692 __ IncrementCounter(&Counters::cow_arrays_created_stub, 1); 5697 __ IncrementCounter(COUNTERS->cow_arrays_created_stub(), 1);
5693 } else if (node->depth() > 1) { 5698 } else if (node->depth() > 1) {
5694 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3); 5699 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
5695 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { 5700 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
5696 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); 5701 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
5697 } else { 5702 } else {
5698 FastCloneShallowArrayStub stub( 5703 FastCloneShallowArrayStub stub(
5699 FastCloneShallowArrayStub::CLONE_ELEMENTS, length); 5704 FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
5700 clone = frame_->CallStub(&stub, 3); 5705 clone = frame_->CallStub(&stub, 3);
5701 } 5706 }
5702 frame_->Push(&clone); 5707 frame_->Push(&clone);
(...skipping 382 matching lines...) Expand 10 before | Expand all | Expand 10 after
6085 6090
6086 // In a call to eval, we first call %ResolvePossiblyDirectEval to 6091 // In a call to eval, we first call %ResolvePossiblyDirectEval to
6087 // resolve the function we need to call and the receiver of the 6092 // resolve the function we need to call and the receiver of the
6088 // call. Then we call the resolved function using the given 6093 // call. Then we call the resolved function using the given
6089 // arguments. 6094 // arguments.
6090 6095
6091 // Prepare the stack for the call to the resolved function. 6096 // Prepare the stack for the call to the resolved function.
6092 Load(function); 6097 Load(function);
6093 6098
6094 // Allocate a frame slot for the receiver. 6099 // Allocate a frame slot for the receiver.
6095 frame_->Push(Factory::undefined_value()); 6100 frame_->Push(FACTORY->undefined_value());
6096 6101
6097 // Load the arguments. 6102 // Load the arguments.
6098 int arg_count = args->length(); 6103 int arg_count = args->length();
6099 for (int i = 0; i < arg_count; i++) { 6104 for (int i = 0; i < arg_count; i++) {
6100 Load(args->at(i)); 6105 Load(args->at(i));
6101 frame_->SpillTop(); 6106 frame_->SpillTop();
6102 } 6107 }
6103 6108
6104 // Result to hold the result of the function resolution and the 6109 // Result to hold the result of the function resolution and the
6105 // final result of the eval call. 6110 // final result of the eval call.
(...skipping 11 matching lines...) Expand all
6117 // ResolvePossiblyDirectEvalNoLookup by pushing the loaded 6122 // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
6118 // function, the first argument to the eval call and the 6123 // function, the first argument to the eval call and the
6119 // receiver. 6124 // receiver.
6120 Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(), 6125 Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
6121 NOT_INSIDE_TYPEOF, 6126 NOT_INSIDE_TYPEOF,
6122 &slow); 6127 &slow);
6123 frame_->Push(&fun); 6128 frame_->Push(&fun);
6124 if (arg_count > 0) { 6129 if (arg_count > 0) {
6125 frame_->PushElementAt(arg_count); 6130 frame_->PushElementAt(arg_count);
6126 } else { 6131 } else {
6127 frame_->Push(Factory::undefined_value()); 6132 frame_->Push(FACTORY->undefined_value());
6128 } 6133 }
6129 frame_->PushParameterAt(-1); 6134 frame_->PushParameterAt(-1);
6130 6135
6131 // Push the strict mode flag. 6136 // Push the strict mode flag.
6132 frame_->Push(Smi::FromInt(strict_mode_flag())); 6137 frame_->Push(Smi::FromInt(strict_mode_flag()));
6133 6138
6134 // Resolve the call. 6139 // Resolve the call.
6135 result = 6140 result =
6136 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4); 6141 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
6137 6142
6138 done.Jump(&result); 6143 done.Jump(&result);
6139 slow.Bind(); 6144 slow.Bind();
6140 } 6145 }
6141 6146
6142 // Prepare the stack for the call to ResolvePossiblyDirectEval by 6147 // Prepare the stack for the call to ResolvePossiblyDirectEval by
6143 // pushing the loaded function, the first argument to the eval 6148 // pushing the loaded function, the first argument to the eval
6144 // call and the receiver. 6149 // call and the receiver.
6145 frame_->PushElementAt(arg_count + 1); 6150 frame_->PushElementAt(arg_count + 1);
6146 if (arg_count > 0) { 6151 if (arg_count > 0) {
6147 frame_->PushElementAt(arg_count); 6152 frame_->PushElementAt(arg_count);
6148 } else { 6153 } else {
6149 frame_->Push(Factory::undefined_value()); 6154 frame_->Push(FACTORY->undefined_value());
6150 } 6155 }
6151 frame_->PushParameterAt(-1); 6156 frame_->PushParameterAt(-1);
6152 6157
6153 // Push the strict mode flag. 6158 // Push the strict mode flag.
6154 frame_->Push(Smi::FromInt(strict_mode_flag())); 6159 frame_->Push(Smi::FromInt(strict_mode_flag()));
6155 6160
6156 // Resolve the call. 6161 // Resolve the call.
6157 result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); 6162 result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
6158 6163
6159 // If we generated fast-case code bind the jump-target where fast 6164 // If we generated fast-case code bind the jump-target where fast
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after
6433 // 2 (array): Arguments to the format string. 6438 // 2 (array): Arguments to the format string.
6434 ASSERT_EQ(args->length(), 3); 6439 ASSERT_EQ(args->length(), 3);
6435 #ifdef ENABLE_LOGGING_AND_PROFILING 6440 #ifdef ENABLE_LOGGING_AND_PROFILING
6436 if (ShouldGenerateLog(args->at(0))) { 6441 if (ShouldGenerateLog(args->at(0))) {
6437 Load(args->at(1)); 6442 Load(args->at(1));
6438 Load(args->at(2)); 6443 Load(args->at(2));
6439 frame_->CallRuntime(Runtime::kLog, 2); 6444 frame_->CallRuntime(Runtime::kLog, 2);
6440 } 6445 }
6441 #endif 6446 #endif
6442 // Finally, we're expected to leave a value on the top of the stack. 6447 // Finally, we're expected to leave a value on the top of the stack.
6443 frame_->Push(Factory::undefined_value()); 6448 frame_->Push(FACTORY->undefined_value());
6444 } 6449 }
6445 6450
6446 6451
6447 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { 6452 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
6448 ASSERT(args->length() == 1); 6453 ASSERT(args->length() == 1);
6449 Load(args->at(0)); 6454 Load(args->at(0));
6450 Result value = frame_->Pop(); 6455 Result value = frame_->Pop();
6451 value.ToRegister(); 6456 value.ToRegister();
6452 ASSERT(value.is_valid()); 6457 ASSERT(value.is_valid());
6453 __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask)); 6458 __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
(...skipping 22 matching lines...) Expand all
6476 return &char_code_at_generator_; 6481 return &char_code_at_generator_;
6477 } 6482 }
6478 6483
6479 virtual void Generate() { 6484 virtual void Generate() {
6480 VirtualFrameRuntimeCallHelper call_helper(frame_state()); 6485 VirtualFrameRuntimeCallHelper call_helper(frame_state());
6481 char_code_at_generator_.GenerateSlow(masm(), call_helper); 6486 char_code_at_generator_.GenerateSlow(masm(), call_helper);
6482 6487
6483 __ bind(&need_conversion_); 6488 __ bind(&need_conversion_);
6484 // Move the undefined value into the result register, which will 6489 // Move the undefined value into the result register, which will
6485 // trigger conversion. 6490 // trigger conversion.
6486 __ Set(result_, Immediate(Factory::undefined_value())); 6491 __ Set(result_, Immediate(FACTORY->undefined_value()));
6487 __ jmp(exit_label()); 6492 __ jmp(exit_label());
6488 6493
6489 __ bind(&index_out_of_range_); 6494 __ bind(&index_out_of_range_);
6490 // When the index is out of range, the spec requires us to return 6495 // When the index is out of range, the spec requires us to return
6491 // NaN. 6496 // NaN.
6492 __ Set(result_, Immediate(Factory::nan_value())); 6497 __ Set(result_, Immediate(FACTORY->nan_value()));
6493 __ jmp(exit_label()); 6498 __ jmp(exit_label());
6494 } 6499 }
6495 6500
6496 private: 6501 private:
6497 Register result_; 6502 Register result_;
6498 6503
6499 Label need_conversion_; 6504 Label need_conversion_;
6500 Label index_out_of_range_; 6505 Label index_out_of_range_;
6501 6506
6502 StringCharCodeAtGenerator char_code_at_generator_; 6507 StringCharCodeAtGenerator char_code_at_generator_;
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
6605 6610
6606 __ bind(&need_conversion_); 6611 __ bind(&need_conversion_);
6607 // Move smi zero into the result register, which will trigger 6612 // Move smi zero into the result register, which will trigger
6608 // conversion. 6613 // conversion.
6609 __ Set(result_, Immediate(Smi::FromInt(0))); 6614 __ Set(result_, Immediate(Smi::FromInt(0)));
6610 __ jmp(exit_label()); 6615 __ jmp(exit_label());
6611 6616
6612 __ bind(&index_out_of_range_); 6617 __ bind(&index_out_of_range_);
6613 // When the index is out of range, the spec requires us to return 6618 // When the index is out of range, the spec requires us to return
6614 // the empty string. 6619 // the empty string.
6615 __ Set(result_, Immediate(Factory::empty_string())); 6620 __ Set(result_, Immediate(FACTORY->empty_string()));
6616 __ jmp(exit_label()); 6621 __ jmp(exit_label());
6617 } 6622 }
6618 6623
6619 private: 6624 private:
6620 Register result_; 6625 Register result_;
6621 6626
6622 Label need_conversion_; 6627 Label need_conversion_;
6623 Label index_out_of_range_; 6628 Label index_out_of_range_;
6624 6629
6625 StringCharAtGenerator char_at_generator_; 6630 StringCharAtGenerator char_at_generator_;
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
6723 6728
6724 // Check that the array has fast elements. 6729 // Check that the array has fast elements.
6725 __ test_b(FieldOperand(scratch, Map::kBitField2Offset), 6730 __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
6726 1 << Map::kHasFastElements); 6731 1 << Map::kHasFastElements);
6727 __ j(zero, &bailout); 6732 __ j(zero, &bailout);
6728 6733
6729 // If the array has length zero, return the empty string. 6734 // If the array has length zero, return the empty string.
6730 __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset)); 6735 __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
6731 __ sar(array_length, 1); 6736 __ sar(array_length, 1);
6732 __ j(not_zero, &non_trivial_array); 6737 __ j(not_zero, &non_trivial_array);
6733 __ mov(result_operand, Factory::empty_string()); 6738 __ mov(result_operand, FACTORY->empty_string());
6734 __ jmp(&done); 6739 __ jmp(&done);
6735 6740
6736 // Save the array length. 6741 // Save the array length.
6737 __ bind(&non_trivial_array); 6742 __ bind(&non_trivial_array);
6738 __ mov(array_length_operand, array_length); 6743 __ mov(array_length_operand, array_length);
6739 6744
6740 // Save the FixedArray containing array's elements. 6745 // Save the FixedArray containing array's elements.
6741 // End of array's live range. 6746 // End of array's live range.
6742 elements = array; 6747 elements = array;
6743 __ mov(elements, FieldOperand(array, JSArray::kElementsOffset)); 6748 __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
6934 FieldOperand(string, SeqAsciiString::kHeaderSize)); 6939 FieldOperand(string, SeqAsciiString::kHeaderSize));
6935 __ CopyBytes(string, result_pos, string_length, scratch); 6940 __ CopyBytes(string, result_pos, string_length, scratch);
6936 __ add(Operand(index), Immediate(1)); 6941 __ add(Operand(index), Immediate(1));
6937 6942
6938 __ cmp(index, array_length_operand); 6943 __ cmp(index, array_length_operand);
6939 __ j(less, &loop_3); // End while (index < length). 6944 __ j(less, &loop_3); // End while (index < length).
6940 __ jmp(&done); 6945 __ jmp(&done);
6941 6946
6942 6947
6943 __ bind(&bailout); 6948 __ bind(&bailout);
6944 __ mov(result_operand, Factory::undefined_value()); 6949 __ mov(result_operand, FACTORY->undefined_value());
6945 __ bind(&done); 6950 __ bind(&done);
6946 __ mov(eax, result_operand); 6951 __ mov(eax, result_operand);
6947 // Drop temp values from the stack, and restore context register. 6952 // Drop temp values from the stack, and restore context register.
6948 __ add(Operand(esp), Immediate(2 * kPointerSize)); 6953 __ add(Operand(esp), Immediate(2 * kPointerSize));
6949 6954
6950 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 6955 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
6951 frame_->Drop(1); 6956 frame_->Drop(1);
6952 frame_->Push(&array_result); 6957 frame_->Push(&array_result);
6953 } 6958 }
6954 6959
(...skipping 20 matching lines...) Expand all
6975 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { 6980 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
6976 // This generates a fast version of: 6981 // This generates a fast version of:
6977 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') 6982 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
6978 ASSERT(args->length() == 1); 6983 ASSERT(args->length() == 1);
6979 Load(args->at(0)); 6984 Load(args->at(0));
6980 Result obj = frame_->Pop(); 6985 Result obj = frame_->Pop();
6981 obj.ToRegister(); 6986 obj.ToRegister();
6982 6987
6983 __ test(obj.reg(), Immediate(kSmiTagMask)); 6988 __ test(obj.reg(), Immediate(kSmiTagMask));
6984 destination()->false_target()->Branch(zero); 6989 destination()->false_target()->Branch(zero);
6985 __ cmp(obj.reg(), Factory::null_value()); 6990 __ cmp(obj.reg(), FACTORY->null_value());
6986 destination()->true_target()->Branch(equal); 6991 destination()->true_target()->Branch(equal);
6987 6992
6988 Result map = allocator()->Allocate(); 6993 Result map = allocator()->Allocate();
6989 ASSERT(map.is_valid()); 6994 ASSERT(map.is_valid());
6990 __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset)); 6995 __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
6991 // Undetectable objects behave like undefined when tested with typeof. 6996 // Undetectable objects behave like undefined when tested with typeof.
6992 __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset), 6997 __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
6993 1 << Map::kIsUndetectable); 6998 1 << Map::kIsUndetectable);
6994 destination()->false_target()->Branch(not_zero); 6999 destination()->false_target()->Branch(not_zero);
6995 // Do a range test for JSObject type. We can't use 7000 // Do a range test for JSObject type. We can't use
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
7046 7051
7047 // Check that map is loaded as expected. 7052 // Check that map is loaded as expected.
7048 if (FLAG_debug_code) { 7053 if (FLAG_debug_code) {
7049 __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset)); 7054 __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
7050 __ Assert(equal, "Map not in expected register"); 7055 __ Assert(equal, "Map not in expected register");
7051 } 7056 }
7052 7057
7053 // Check for fast case object. Generate false result for slow case object. 7058 // Check for fast case object. Generate false result for slow case object.
7054 __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset)); 7059 __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
7055 __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset)); 7060 __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
7056 __ cmp(scratch1_, Factory::hash_table_map()); 7061 __ cmp(scratch1_, FACTORY->hash_table_map());
7057 __ j(equal, &false_result); 7062 __ j(equal, &false_result);
7058 7063
7059 // Look for valueOf symbol in the descriptor array, and indicate false if 7064 // Look for valueOf symbol in the descriptor array, and indicate false if
7060 // found. The type is not checked, so if it is a transition it is a false 7065 // found. The type is not checked, so if it is a transition it is a false
7061 // negative. 7066 // negative.
7062 __ mov(map_result_, 7067 __ mov(map_result_,
7063 FieldOperand(map_result_, Map::kInstanceDescriptorsOffset)); 7068 FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
7064 __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset)); 7069 __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
7065 // map_result_: descriptor array 7070 // map_result_: descriptor array
7066 // scratch1_: length of descriptor array 7071 // scratch1_: length of descriptor array
7067 // Calculate the end of the descriptor array. 7072 // Calculate the end of the descriptor array.
7068 STATIC_ASSERT(kSmiTag == 0); 7073 STATIC_ASSERT(kSmiTag == 0);
7069 STATIC_ASSERT(kSmiTagSize == 1); 7074 STATIC_ASSERT(kSmiTagSize == 1);
7070 STATIC_ASSERT(kPointerSize == 4); 7075 STATIC_ASSERT(kPointerSize == 4);
7071 __ lea(scratch1_, 7076 __ lea(scratch1_,
7072 Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize)); 7077 Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
7073 // Calculate location of the first key name. 7078 // Calculate location of the first key name.
7074 __ add(Operand(map_result_), 7079 __ add(Operand(map_result_),
7075 Immediate(FixedArray::kHeaderSize + 7080 Immediate(FixedArray::kHeaderSize +
7076 DescriptorArray::kFirstIndex * kPointerSize)); 7081 DescriptorArray::kFirstIndex * kPointerSize));
7077 // Loop through all the keys in the descriptor array. If one of these is the 7082 // Loop through all the keys in the descriptor array. If one of these is the
7078 // symbol valueOf the result is false. 7083 // symbol valueOf the result is false.
7079 Label entry, loop; 7084 Label entry, loop;
7080 __ jmp(&entry); 7085 __ jmp(&entry);
7081 __ bind(&loop); 7086 __ bind(&loop);
7082 __ mov(scratch2_, FieldOperand(map_result_, 0)); 7087 __ mov(scratch2_, FieldOperand(map_result_, 0));
7083 __ cmp(scratch2_, Factory::value_of_symbol()); 7088 __ cmp(scratch2_, FACTORY->value_of_symbol());
7084 __ j(equal, &false_result); 7089 __ j(equal, &false_result);
7085 __ add(Operand(map_result_), Immediate(kPointerSize)); 7090 __ add(Operand(map_result_), Immediate(kPointerSize));
7086 __ bind(&entry); 7091 __ bind(&entry);
7087 __ cmp(map_result_, Operand(scratch1_)); 7092 __ cmp(map_result_, Operand(scratch1_));
7088 __ j(not_equal, &loop); 7093 __ j(not_equal, &loop);
7089 7094
7090 // Reload map as register map_result_ was used as temporary above. 7095 // Reload map as register map_result_ was used as temporary above.
7091 __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset)); 7096 __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
7092 7097
7093 // If a valueOf property is not found on the object check that it's 7098 // If a valueOf property is not found on the object check that it's
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
7288 // instance class name from there. 7293 // instance class name from there.
7289 __ mov(obj.reg(), 7294 __ mov(obj.reg(),
7290 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset)); 7295 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
7291 __ mov(obj.reg(), 7296 __ mov(obj.reg(),
7292 FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset)); 7297 FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
7293 frame_->Push(&obj); 7298 frame_->Push(&obj);
7294 leave.Jump(); 7299 leave.Jump();
7295 7300
7296 // Functions have class 'Function'. 7301 // Functions have class 'Function'.
7297 function.Bind(); 7302 function.Bind();
7298 frame_->Push(Factory::function_class_symbol()); 7303 frame_->Push(FACTORY->function_class_symbol());
7299 leave.Jump(); 7304 leave.Jump();
7300 7305
7301 // Objects with a non-function constructor have class 'Object'. 7306 // Objects with a non-function constructor have class 'Object'.
7302 non_function_constructor.Bind(); 7307 non_function_constructor.Bind();
7303 frame_->Push(Factory::Object_symbol()); 7308 frame_->Push(FACTORY->Object_symbol());
7304 leave.Jump(); 7309 leave.Jump();
7305 7310
7306 // Non-JS objects have class null. 7311 // Non-JS objects have class null.
7307 null.Bind(); 7312 null.Bind();
7308 frame_->Push(Factory::null_value()); 7313 frame_->Push(FACTORY->null_value());
7309 7314
7310 // All done. 7315 // All done.
7311 leave.Bind(); 7316 leave.Bind();
7312 } 7317 }
7313 7318
7314 7319
7315 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { 7320 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
7316 ASSERT(args->length() == 1); 7321 ASSERT(args->length() == 1);
7317 JumpTarget leave; 7322 JumpTarget leave;
7318 Load(args->at(0)); // Load the object. 7323 Load(args->at(0)); // Load the object.
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
7440 7445
7441 __ bind(&heapnumber_allocated); 7446 __ bind(&heapnumber_allocated);
7442 7447
7443 __ PrepareCallCFunction(0, ebx); 7448 __ PrepareCallCFunction(0, ebx);
7444 __ CallCFunction(ExternalReference::random_uint32_function(), 0); 7449 __ CallCFunction(ExternalReference::random_uint32_function(), 0);
7445 7450
7446 // Convert 32 random bits in eax to 0.(32 random bits) in a double 7451 // Convert 32 random bits in eax to 0.(32 random bits) in a double
7447 // by computing: 7452 // by computing:
7448 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). 7453 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
7449 // This is implemented on both SSE2 and FPU. 7454 // This is implemented on both SSE2 and FPU.
7450 if (CpuFeatures::IsSupported(SSE2)) { 7455 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
7451 CpuFeatures::Scope fscope(SSE2); 7456 CpuFeatures::Scope fscope(SSE2);
7452 __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. 7457 __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
7453 __ movd(xmm1, Operand(ebx)); 7458 __ movd(xmm1, Operand(ebx));
7454 __ movd(xmm0, Operand(eax)); 7459 __ movd(xmm0, Operand(eax));
7455 __ cvtss2sd(xmm1, xmm1); 7460 __ cvtss2sd(xmm1, xmm1);
7456 __ pxor(xmm0, xmm1); 7461 __ pxor(xmm0, xmm1);
7457 __ subsd(xmm0, xmm1); 7462 __ subsd(xmm0, xmm1);
7458 __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0); 7463 __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
7459 } else { 7464 } else {
7460 // 0x4130000000000000 is 1.0 x 2^20 as a double. 7465 // 0x4130000000000000 is 1.0 x 2^20 as a double.
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after
7657 } 7662 }
7658 7663
7659 7664
7660 void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) { 7665 void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
7661 ASSERT_EQ(2, args->length()); 7666 ASSERT_EQ(2, args->length());
7662 7667
7663 ASSERT_NE(NULL, args->at(0)->AsLiteral()); 7668 ASSERT_NE(NULL, args->at(0)->AsLiteral());
7664 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value(); 7669 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
7665 7670
7666 Handle<FixedArray> jsfunction_result_caches( 7671 Handle<FixedArray> jsfunction_result_caches(
7667 Top::global_context()->jsfunction_result_caches()); 7672 Isolate::Current()->global_context()->jsfunction_result_caches());
7668 if (jsfunction_result_caches->length() <= cache_id) { 7673 if (jsfunction_result_caches->length() <= cache_id) {
7669 __ Abort("Attempt to use undefined cache."); 7674 __ Abort("Attempt to use undefined cache.");
7670 frame_->Push(Factory::undefined_value()); 7675 frame_->Push(FACTORY->undefined_value());
7671 return; 7676 return;
7672 } 7677 }
7673 7678
7674 Load(args->at(1)); 7679 Load(args->at(1));
7675 Result key = frame_->Pop(); 7680 Result key = frame_->Pop();
7676 key.ToRegister(); 7681 key.ToRegister();
7677 7682
7678 Result cache = allocator()->Allocate(); 7683 Result cache = allocator()->Allocate();
7679 ASSERT(cache.is_valid()); 7684 ASSERT(cache.is_valid());
7680 __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX)); 7685 __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
7777 // has no indexed interceptor. 7782 // has no indexed interceptor.
7778 __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg()); 7783 __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
7779 deferred->Branch(below); 7784 deferred->Branch(below);
7780 __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset), 7785 __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
7781 KeyedLoadIC::kSlowCaseBitFieldMask); 7786 KeyedLoadIC::kSlowCaseBitFieldMask);
7782 deferred->Branch(not_zero); 7787 deferred->Branch(not_zero);
7783 7788
7784 // Check the object's elements are in fast case and writable. 7789 // Check the object's elements are in fast case and writable.
7785 __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset)); 7790 __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
7786 __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset), 7791 __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
7787 Immediate(Factory::fixed_array_map())); 7792 Immediate(FACTORY->fixed_array_map()));
7788 deferred->Branch(not_equal); 7793 deferred->Branch(not_equal);
7789 7794
7790 // Smi-tagging is equivalent to multiplying by 2. 7795 // Smi-tagging is equivalent to multiplying by 2.
7791 STATIC_ASSERT(kSmiTag == 0); 7796 STATIC_ASSERT(kSmiTag == 0);
7792 STATIC_ASSERT(kSmiTagSize == 1); 7797 STATIC_ASSERT(kSmiTagSize == 1);
7793 7798
7794 // Check that both indices are smis. 7799 // Check that both indices are smis.
7795 __ mov(tmp2.reg(), index1.reg()); 7800 __ mov(tmp2.reg(), index1.reg());
7796 __ or_(tmp2.reg(), Operand(index2.reg())); 7801 __ or_(tmp2.reg(), Operand(index2.reg()));
7797 __ test(tmp2.reg(), Immediate(kSmiTagMask)); 7802 __ test(tmp2.reg(), Immediate(kSmiTagMask));
(...skipping 20 matching lines...) Expand all
7818 __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done); 7823 __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
7819 // Possible optimization: do a check that both values are Smis 7824 // Possible optimization: do a check that both values are Smis
7820 // (or them and test against Smi mask.) 7825 // (or them and test against Smi mask.)
7821 7826
7822 __ mov(tmp2.reg(), tmp1.reg()); 7827 __ mov(tmp2.reg(), tmp1.reg());
7823 __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg()); 7828 __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
7824 __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg()); 7829 __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
7825 __ bind(&done); 7830 __ bind(&done);
7826 7831
7827 deferred->BindExit(); 7832 deferred->BindExit();
7828 frame_->Push(Factory::undefined_value()); 7833 frame_->Push(FACTORY->undefined_value());
7829 } 7834 }
7830 7835
7831 7836
7832 void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) { 7837 void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
7833 Comment cmnt(masm_, "[ GenerateCallFunction"); 7838 Comment cmnt(masm_, "[ GenerateCallFunction");
7834 7839
7835 ASSERT(args->length() >= 2); 7840 ASSERT(args->length() >= 2);
7836 7841
7837 int n_args = args->length() - 2; // for receiver and function. 7842 int n_args = args->length() - 2; // for receiver and function.
7838 Load(args->at(0)); // receiver 7843 Load(args->at(0)); // receiver
7839 for (int i = 0; i < n_args; i++) { 7844 for (int i = 0; i < n_args; i++) {
7840 Load(args->at(i + 1)); 7845 Load(args->at(i + 1));
7841 } 7846 }
7842 Load(args->at(n_args + 1)); // function 7847 Load(args->at(n_args + 1)); // function
7843 Result result = frame_->CallJSFunction(n_args); 7848 Result result = frame_->CallJSFunction(n_args);
7844 frame_->Push(&result); 7849 frame_->Push(&result);
7845 } 7850 }
7846 7851
7847 7852
7848 // Generates the Math.pow method. Only handles special cases and 7853 // Generates the Math.pow method. Only handles special cases and
7849 // branches to the runtime system for everything else. Please note 7854 // branches to the runtime system for everything else. Please note
7850 // that this function assumes that the callsite has executed ToNumber 7855 // that this function assumes that the callsite has executed ToNumber
7851 // on both arguments. 7856 // on both arguments.
7852 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { 7857 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
7853 ASSERT(args->length() == 2); 7858 ASSERT(args->length() == 2);
7854 Load(args->at(0)); 7859 Load(args->at(0));
7855 Load(args->at(1)); 7860 Load(args->at(1));
7856 if (!CpuFeatures::IsSupported(SSE2)) { 7861 if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
7857 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2); 7862 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
7858 frame_->Push(&res); 7863 frame_->Push(&res);
7859 } else { 7864 } else {
7860 CpuFeatures::Scope use_sse2(SSE2); 7865 CpuFeatures::Scope use_sse2(SSE2);
7861 Label allocate_return; 7866 Label allocate_return;
7862 // Load the two operands while leaving the values on the frame. 7867 // Load the two operands while leaving the values on the frame.
7863 frame()->Dup(); 7868 frame()->Dup();
7864 Result exponent = frame()->Pop(); 7869 Result exponent = frame()->Pop();
7865 exponent.ToRegister(); 7870 exponent.ToRegister();
7866 frame()->Spill(exponent.reg()); 7871 frame()->Spill(exponent.reg());
(...skipping 20 matching lines...) Expand all
7887 __ j(not_zero, &base_nonsmi); 7892 __ j(not_zero, &base_nonsmi);
7888 7893
7889 // Optimized version when y is an integer. 7894 // Optimized version when y is an integer.
7890 Label powi; 7895 Label powi;
7891 __ SmiUntag(base.reg()); 7896 __ SmiUntag(base.reg());
7892 __ cvtsi2sd(xmm0, Operand(base.reg())); 7897 __ cvtsi2sd(xmm0, Operand(base.reg()));
7893 __ jmp(&powi); 7898 __ jmp(&powi);
7894 // exponent is smi and base is a heapnumber. 7899 // exponent is smi and base is a heapnumber.
7895 __ bind(&base_nonsmi); 7900 __ bind(&base_nonsmi);
7896 __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset), 7901 __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
7897 Factory::heap_number_map()); 7902 FACTORY->heap_number_map());
7898 call_runtime.Branch(not_equal); 7903 call_runtime.Branch(not_equal);
7899 7904
7900 __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); 7905 __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
7901 7906
7902 // Optimized version of pow if y is an integer. 7907 // Optimized version of pow if y is an integer.
7903 __ bind(&powi); 7908 __ bind(&powi);
7904 __ SmiUntag(exponent.reg()); 7909 __ SmiUntag(exponent.reg());
7905 7910
7906 // Save exponent in base as we need to check if exponent is negative later. 7911 // Save exponent in base as we need to check if exponent is negative later.
7907 // We know that base and exponent are in different registers. 7912 // We know that base and exponent are in different registers.
(...skipping 30 matching lines...) Expand all
7938 __ ucomisd(xmm0, xmm1); 7943 __ ucomisd(xmm0, xmm1);
7939 call_runtime.Branch(equal); 7944 call_runtime.Branch(equal);
7940 __ divsd(xmm3, xmm1); 7945 __ divsd(xmm3, xmm1);
7941 __ movsd(xmm1, xmm3); 7946 __ movsd(xmm1, xmm3);
7942 __ jmp(&allocate_return); 7947 __ jmp(&allocate_return);
7943 7948
7944 // exponent (or both) is a heapnumber - no matter what we should now work 7949 // exponent (or both) is a heapnumber - no matter what we should now work
7945 // on doubles. 7950 // on doubles.
7946 __ bind(&exponent_nonsmi); 7951 __ bind(&exponent_nonsmi);
7947 __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset), 7952 __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
7948 Factory::heap_number_map()); 7953 FACTORY->heap_number_map());
7949 call_runtime.Branch(not_equal); 7954 call_runtime.Branch(not_equal);
7950 __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset)); 7955 __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
7951 // Test if exponent is nan. 7956 // Test if exponent is nan.
7952 __ ucomisd(xmm1, xmm1); 7957 __ ucomisd(xmm1, xmm1);
7953 call_runtime.Branch(parity_even); 7958 call_runtime.Branch(parity_even);
7954 7959
7955 Label base_not_smi; 7960 Label base_not_smi;
7956 Label handle_special_cases; 7961 Label handle_special_cases;
7957 __ test(base.reg(), Immediate(kSmiTagMask)); 7962 __ test(base.reg(), Immediate(kSmiTagMask));
7958 __ j(not_zero, &base_not_smi); 7963 __ j(not_zero, &base_not_smi);
7959 __ SmiUntag(base.reg()); 7964 __ SmiUntag(base.reg());
7960 __ cvtsi2sd(xmm0, Operand(base.reg())); 7965 __ cvtsi2sd(xmm0, Operand(base.reg()));
7961 __ jmp(&handle_special_cases); 7966 __ jmp(&handle_special_cases);
7962 __ bind(&base_not_smi); 7967 __ bind(&base_not_smi);
7963 __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset), 7968 __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
7964 Factory::heap_number_map()); 7969 FACTORY->heap_number_map());
7965 call_runtime.Branch(not_equal); 7970 call_runtime.Branch(not_equal);
7966 __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset)); 7971 __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
7967 __ and_(answer.reg(), HeapNumber::kExponentMask); 7972 __ and_(answer.reg(), HeapNumber::kExponentMask);
7968 __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask)); 7973 __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
7969 // base is NaN or +/-Infinity 7974 // base is NaN or +/-Infinity
7970 call_runtime.Branch(greater_equal); 7975 call_runtime.Branch(greater_equal);
7971 __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); 7976 __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
7972 7977
7973 // base is in xmm0 and exponent is in xmm1. 7978 // base is in xmm0 and exponent is in xmm1.
7974 __ bind(&handle_special_cases); 7979 __ bind(&handle_special_cases);
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
8063 frame_->Push(&result); 8068 frame_->Push(&result);
8064 } 8069 }
8065 8070
8066 8071
8067 // Generates the Math.sqrt method. Please note - this function assumes that 8072 // Generates the Math.sqrt method. Please note - this function assumes that
8068 // the callsite has executed ToNumber on the argument. 8073 // the callsite has executed ToNumber on the argument.
8069 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { 8074 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
8070 ASSERT_EQ(args->length(), 1); 8075 ASSERT_EQ(args->length(), 1);
8071 Load(args->at(0)); 8076 Load(args->at(0));
8072 8077
8073 if (!CpuFeatures::IsSupported(SSE2)) { 8078 if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
8074 Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); 8079 Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
8075 frame()->Push(&result); 8080 frame()->Push(&result);
8076 } else { 8081 } else {
8077 CpuFeatures::Scope use_sse2(SSE2); 8082 CpuFeatures::Scope use_sse2(SSE2);
8078 // Leave original value on the frame if we need to call runtime. 8083 // Leave original value on the frame if we need to call runtime.
8079 frame()->Dup(); 8084 frame()->Dup();
8080 Result result = frame()->Pop(); 8085 Result result = frame()->Pop();
8081 result.ToRegister(); 8086 result.ToRegister();
8082 frame()->Spill(result.reg()); 8087 frame()->Spill(result.reg());
8083 Label runtime; 8088 Label runtime;
8084 Label non_smi; 8089 Label non_smi;
8085 Label load_done; 8090 Label load_done;
8086 JumpTarget end; 8091 JumpTarget end;
8087 8092
8088 __ test(result.reg(), Immediate(kSmiTagMask)); 8093 __ test(result.reg(), Immediate(kSmiTagMask));
8089 __ j(not_zero, &non_smi); 8094 __ j(not_zero, &non_smi);
8090 __ SmiUntag(result.reg()); 8095 __ SmiUntag(result.reg());
8091 __ cvtsi2sd(xmm0, Operand(result.reg())); 8096 __ cvtsi2sd(xmm0, Operand(result.reg()));
8092 __ jmp(&load_done); 8097 __ jmp(&load_done);
8093 __ bind(&non_smi); 8098 __ bind(&non_smi);
8094 __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset), 8099 __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
8095 Factory::heap_number_map()); 8100 FACTORY->heap_number_map());
8096 __ j(not_equal, &runtime); 8101 __ j(not_equal, &runtime);
8097 __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset)); 8102 __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
8098 8103
8099 __ bind(&load_done); 8104 __ bind(&load_done);
8100 __ sqrtsd(xmm0, xmm0); 8105 __ sqrtsd(xmm0, xmm0);
8101 // A copy of the virtual frame to allow us to go to runtime after the 8106 // A copy of the virtual frame to allow us to go to runtime after the
8102 // JumpTarget jump. 8107 // JumpTarget jump.
8103 Result scratch = allocator()->Allocate(); 8108 Result scratch = allocator()->Allocate();
8104 VirtualFrame* clone = new VirtualFrame(frame()); 8109 VirtualFrame* clone = new VirtualFrame(frame());
8105 __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime); 8110 __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
8191 8196
8192 8197
8193 void CodeGenerator::VisitCallRuntime(CallRuntime* node) { 8198 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
8194 ASSERT(!in_safe_int32_mode()); 8199 ASSERT(!in_safe_int32_mode());
8195 if (CheckForInlineRuntimeCall(node)) { 8200 if (CheckForInlineRuntimeCall(node)) {
8196 return; 8201 return;
8197 } 8202 }
8198 8203
8199 ZoneList<Expression*>* args = node->arguments(); 8204 ZoneList<Expression*>* args = node->arguments();
8200 Comment cmnt(masm_, "[ CallRuntime"); 8205 Comment cmnt(masm_, "[ CallRuntime");
8201 Runtime::Function* function = node->function(); 8206 const Runtime::Function* function = node->function();
8202 8207
8203 if (function == NULL) { 8208 if (function == NULL) {
8204 // Push the builtins object found in the current global object. 8209 // Push the builtins object found in the current global object.
8205 Result temp = allocator()->Allocate(); 8210 Result temp = allocator()->Allocate();
8206 ASSERT(temp.is_valid()); 8211 ASSERT(temp.is_valid());
8207 __ mov(temp.reg(), GlobalObjectOperand()); 8212 __ mov(temp.reg(), GlobalObjectOperand());
8208 __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset)); 8213 __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
8209 frame_->Push(&temp); 8214 frame_->Push(&temp);
8210 } 8215 }
8211 8216
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
8274 // variable. Sync the virtual frame eagerly so we can push the 8279 // variable. Sync the virtual frame eagerly so we can push the
8275 // arguments directly into place. 8280 // arguments directly into place.
8276 frame_->SyncRange(0, frame_->element_count() - 1); 8281 frame_->SyncRange(0, frame_->element_count() - 1);
8277 frame_->EmitPush(esi); 8282 frame_->EmitPush(esi);
8278 frame_->EmitPush(Immediate(variable->name())); 8283 frame_->EmitPush(Immediate(variable->name()));
8279 Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2); 8284 Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
8280 frame_->Push(&answer); 8285 frame_->Push(&answer);
8281 } else { 8286 } else {
8282 // Default: Result of deleting non-global, not dynamically 8287 // Default: Result of deleting non-global, not dynamically
8283 // introduced variables is false. 8288 // introduced variables is false.
8284 frame_->Push(Factory::false_value()); 8289 frame_->Push(FACTORY->false_value());
8285 } 8290 }
8286 } else { 8291 } else {
8287 // Default: Result of deleting expressions is true. 8292 // Default: Result of deleting expressions is true.
8288 Load(node->expression()); // may have side-effects 8293 Load(node->expression()); // may have side-effects
8289 frame_->SetElementAt(0, Factory::true_value()); 8294 frame_->SetElementAt(0, FACTORY->true_value());
8290 } 8295 }
8291 8296
8292 } else if (op == Token::TYPEOF) { 8297 } else if (op == Token::TYPEOF) {
8293 // Special case for loading the typeof expression; see comment on 8298 // Special case for loading the typeof expression; see comment on
8294 // LoadTypeofExpression(). 8299 // LoadTypeofExpression().
8295 LoadTypeofExpression(node->expression()); 8300 LoadTypeofExpression(node->expression());
8296 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1); 8301 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
8297 frame_->Push(&answer); 8302 frame_->Push(&answer);
8298 8303
8299 } else if (op == Token::VOID) { 8304 } else if (op == Token::VOID) {
8300 Expression* expression = node->expression(); 8305 Expression* expression = node->expression();
8301 if (expression && expression->AsLiteral() && ( 8306 if (expression && expression->AsLiteral() && (
8302 expression->AsLiteral()->IsTrue() || 8307 expression->AsLiteral()->IsTrue() ||
8303 expression->AsLiteral()->IsFalse() || 8308 expression->AsLiteral()->IsFalse() ||
8304 expression->AsLiteral()->handle()->IsNumber() || 8309 expression->AsLiteral()->handle()->IsNumber() ||
8305 expression->AsLiteral()->handle()->IsString() || 8310 expression->AsLiteral()->handle()->IsString() ||
8306 expression->AsLiteral()->handle()->IsJSRegExp() || 8311 expression->AsLiteral()->handle()->IsJSRegExp() ||
8307 expression->AsLiteral()->IsNull())) { 8312 expression->AsLiteral()->IsNull())) {
8308 // Omit evaluating the value of the primitive literal. 8313 // Omit evaluating the value of the primitive literal.
8309 // It will be discarded anyway, and can have no side effect. 8314 // It will be discarded anyway, and can have no side effect.
8310 frame_->Push(Factory::undefined_value()); 8315 frame_->Push(FACTORY->undefined_value());
8311 } else { 8316 } else {
8312 Load(node->expression()); 8317 Load(node->expression());
8313 frame_->SetElementAt(0, Factory::undefined_value()); 8318 frame_->SetElementAt(0, FACTORY->undefined_value());
8314 } 8319 }
8315 8320
8316 } else { 8321 } else {
8317 if (in_safe_int32_mode()) { 8322 if (in_safe_int32_mode()) {
8318 Visit(node->expression()); 8323 Visit(node->expression());
8319 Result value = frame_->Pop(); 8324 Result value = frame_->Pop();
8320 ASSERT(value.is_untagged_int32()); 8325 ASSERT(value.is_untagged_int32());
8321 // Registers containing an int32 value are not multiply used. 8326 // Registers containing an int32 value are not multiply used.
8322 ASSERT(!value.is_register() || !frame_->is_used(value.reg())); 8327 ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
8323 value.ToRegister(); 8328 value.ToRegister();
(...skipping 781 matching lines...) Expand 10 before | Expand all | Expand 10 after
9105 (operation != NULL && operation->op() == Token::TYPEOF) && 9110 (operation != NULL && operation->op() == Token::TYPEOF) &&
9106 (right->AsLiteral() != NULL && 9111 (right->AsLiteral() != NULL &&
9107 right->AsLiteral()->handle()->IsString())) { 9112 right->AsLiteral()->handle()->IsString())) {
9108 Handle<String> check(String::cast(*right->AsLiteral()->handle())); 9113 Handle<String> check(String::cast(*right->AsLiteral()->handle()));
9109 9114
9110 // Load the operand and move it to a register. 9115 // Load the operand and move it to a register.
9111 LoadTypeofExpression(operation->expression()); 9116 LoadTypeofExpression(operation->expression());
9112 Result answer = frame_->Pop(); 9117 Result answer = frame_->Pop();
9113 answer.ToRegister(); 9118 answer.ToRegister();
9114 9119
9115 if (check->Equals(Heap::number_symbol())) { 9120 if (check->Equals(HEAP->number_symbol())) {
9116 __ test(answer.reg(), Immediate(kSmiTagMask)); 9121 __ test(answer.reg(), Immediate(kSmiTagMask));
9117 destination()->true_target()->Branch(zero); 9122 destination()->true_target()->Branch(zero);
9118 frame_->Spill(answer.reg()); 9123 frame_->Spill(answer.reg());
9119 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); 9124 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
9120 __ cmp(answer.reg(), Factory::heap_number_map()); 9125 __ cmp(answer.reg(), FACTORY->heap_number_map());
9121 answer.Unuse(); 9126 answer.Unuse();
9122 destination()->Split(equal); 9127 destination()->Split(equal);
9123 9128
9124 } else if (check->Equals(Heap::string_symbol())) { 9129 } else if (check->Equals(HEAP->string_symbol())) {
9125 __ test(answer.reg(), Immediate(kSmiTagMask)); 9130 __ test(answer.reg(), Immediate(kSmiTagMask));
9126 destination()->false_target()->Branch(zero); 9131 destination()->false_target()->Branch(zero);
9127 9132
9128 // It can be an undetectable string object. 9133 // It can be an undetectable string object.
9129 Result temp = allocator()->Allocate(); 9134 Result temp = allocator()->Allocate();
9130 ASSERT(temp.is_valid()); 9135 ASSERT(temp.is_valid());
9131 __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); 9136 __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
9132 __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset), 9137 __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
9133 1 << Map::kIsUndetectable); 9138 1 << Map::kIsUndetectable);
9134 destination()->false_target()->Branch(not_zero); 9139 destination()->false_target()->Branch(not_zero);
9135 __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE); 9140 __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
9136 temp.Unuse(); 9141 temp.Unuse();
9137 answer.Unuse(); 9142 answer.Unuse();
9138 destination()->Split(below); 9143 destination()->Split(below);
9139 9144
9140 } else if (check->Equals(Heap::boolean_symbol())) { 9145 } else if (check->Equals(HEAP->boolean_symbol())) {
9141 __ cmp(answer.reg(), Factory::true_value()); 9146 __ cmp(answer.reg(), FACTORY->true_value());
9142 destination()->true_target()->Branch(equal); 9147 destination()->true_target()->Branch(equal);
9143 __ cmp(answer.reg(), Factory::false_value()); 9148 __ cmp(answer.reg(), FACTORY->false_value());
9144 answer.Unuse(); 9149 answer.Unuse();
9145 destination()->Split(equal); 9150 destination()->Split(equal);
9146 9151
9147 } else if (check->Equals(Heap::undefined_symbol())) { 9152 } else if (check->Equals(HEAP->undefined_symbol())) {
9148 __ cmp(answer.reg(), Factory::undefined_value()); 9153 __ cmp(answer.reg(), FACTORY->undefined_value());
9149 destination()->true_target()->Branch(equal); 9154 destination()->true_target()->Branch(equal);
9150 9155
9151 __ test(answer.reg(), Immediate(kSmiTagMask)); 9156 __ test(answer.reg(), Immediate(kSmiTagMask));
9152 destination()->false_target()->Branch(zero); 9157 destination()->false_target()->Branch(zero);
9153 9158
9154 // It can be an undetectable object. 9159 // It can be an undetectable object.
9155 frame_->Spill(answer.reg()); 9160 frame_->Spill(answer.reg());
9156 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); 9161 __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
9157 __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset), 9162 __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
9158 1 << Map::kIsUndetectable); 9163 1 << Map::kIsUndetectable);
9159 answer.Unuse(); 9164 answer.Unuse();
9160 destination()->Split(not_zero); 9165 destination()->Split(not_zero);
9161 9166
9162 } else if (check->Equals(Heap::function_symbol())) { 9167 } else if (check->Equals(HEAP->function_symbol())) {
9163 __ test(answer.reg(), Immediate(kSmiTagMask)); 9168 __ test(answer.reg(), Immediate(kSmiTagMask));
9164 destination()->false_target()->Branch(zero); 9169 destination()->false_target()->Branch(zero);
9165 frame_->Spill(answer.reg()); 9170 frame_->Spill(answer.reg());
9166 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); 9171 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
9167 destination()->true_target()->Branch(equal); 9172 destination()->true_target()->Branch(equal);
9168 // Regular expressions are callable so typeof == 'function'. 9173 // Regular expressions are callable so typeof == 'function'.
9169 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE); 9174 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
9170 answer.Unuse(); 9175 answer.Unuse();
9171 destination()->Split(equal); 9176 destination()->Split(equal);
9172 } else if (check->Equals(Heap::object_symbol())) { 9177 } else if (check->Equals(HEAP->object_symbol())) {
9173 __ test(answer.reg(), Immediate(kSmiTagMask)); 9178 __ test(answer.reg(), Immediate(kSmiTagMask));
9174 destination()->false_target()->Branch(zero); 9179 destination()->false_target()->Branch(zero);
9175 __ cmp(answer.reg(), Factory::null_value()); 9180 __ cmp(answer.reg(), FACTORY->null_value());
9176 destination()->true_target()->Branch(equal); 9181 destination()->true_target()->Branch(equal);
9177 9182
9178 Result map = allocator()->Allocate(); 9183 Result map = allocator()->Allocate();
9179 ASSERT(map.is_valid()); 9184 ASSERT(map.is_valid());
9180 // Regular expressions are typeof == 'function', not 'object'. 9185 // Regular expressions are typeof == 'function', not 'object'.
9181 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg()); 9186 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
9182 destination()->false_target()->Branch(equal); 9187 destination()->false_target()->Branch(equal);
9183 9188
9184 // It can be an undetectable object. 9189 // It can be an undetectable object.
9185 __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset), 9190 __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
(...skipping 22 matching lines...) Expand all
9208 if (check->value() == 2147483648.0) { // 0x80000000. 9213 if (check->value() == 2147483648.0) { // 0x80000000.
9209 Load(left); 9214 Load(left);
9210 left_already_loaded = true; 9215 left_already_loaded = true;
9211 Result lhs = frame_->Pop(); 9216 Result lhs = frame_->Pop();
9212 lhs.ToRegister(); 9217 lhs.ToRegister();
9213 __ test(lhs.reg(), Immediate(kSmiTagMask)); 9218 __ test(lhs.reg(), Immediate(kSmiTagMask));
9214 destination()->true_target()->Branch(zero); // All Smis are less. 9219 destination()->true_target()->Branch(zero); // All Smis are less.
9215 Result scratch = allocator()->Allocate(); 9220 Result scratch = allocator()->Allocate();
9216 ASSERT(scratch.is_valid()); 9221 ASSERT(scratch.is_valid());
9217 __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset)); 9222 __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
9218 __ cmp(scratch.reg(), Factory::heap_number_map()); 9223 __ cmp(scratch.reg(), FACTORY->heap_number_map());
9219 JumpTarget not_a_number; 9224 JumpTarget not_a_number;
9220 not_a_number.Branch(not_equal, &lhs); 9225 not_a_number.Branch(not_equal, &lhs);
9221 __ mov(scratch.reg(), 9226 __ mov(scratch.reg(),
9222 FieldOperand(lhs.reg(), HeapNumber::kExponentOffset)); 9227 FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
9223 __ cmp(Operand(scratch.reg()), Immediate(0xfff00000)); 9228 __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
9224 not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf. 9229 not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf.
9225 const uint32_t borderline_exponent = 9230 const uint32_t borderline_exponent =
9226 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; 9231 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
9227 __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent)); 9232 __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
9228 scratch.Unuse(); 9233 scratch.Unuse();
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
9295 } 9300 }
9296 9301
9297 9302
9298 void CodeGenerator::VisitCompareToNull(CompareToNull* node) { 9303 void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
9299 ASSERT(!in_safe_int32_mode()); 9304 ASSERT(!in_safe_int32_mode());
9300 Comment cmnt(masm_, "[ CompareToNull"); 9305 Comment cmnt(masm_, "[ CompareToNull");
9301 9306
9302 Load(node->expression()); 9307 Load(node->expression());
9303 Result operand = frame_->Pop(); 9308 Result operand = frame_->Pop();
9304 operand.ToRegister(); 9309 operand.ToRegister();
9305 __ cmp(operand.reg(), Factory::null_value()); 9310 __ cmp(operand.reg(), FACTORY->null_value());
9306 if (node->is_strict()) { 9311 if (node->is_strict()) {
9307 operand.Unuse(); 9312 operand.Unuse();
9308 destination()->Split(equal); 9313 destination()->Split(equal);
9309 } else { 9314 } else {
9310 // The 'null' value is only equal to 'undefined' if using non-strict 9315 // The 'null' value is only equal to 'undefined' if using non-strict
9311 // comparisons. 9316 // comparisons.
9312 destination()->true_target()->Branch(equal); 9317 destination()->true_target()->Branch(equal);
9313 __ cmp(operand.reg(), Factory::undefined_value()); 9318 __ cmp(operand.reg(), FACTORY->undefined_value());
9314 destination()->true_target()->Branch(equal); 9319 destination()->true_target()->Branch(equal);
9315 __ test(operand.reg(), Immediate(kSmiTagMask)); 9320 __ test(operand.reg(), Immediate(kSmiTagMask));
9316 destination()->false_target()->Branch(equal); 9321 destination()->false_target()->Branch(equal);
9317 9322
9318 // It can be an undetectable object. 9323 // It can be an undetectable object.
9319 // Use a scratch register in preference to spilling operand.reg(). 9324 // Use a scratch register in preference to spilling operand.reg().
9320 Result temp = allocator()->Allocate(); 9325 Result temp = allocator()->Allocate();
9321 ASSERT(temp.is_valid()); 9326 ASSERT(temp.is_valid());
9322 __ mov(temp.reg(), 9327 __ mov(temp.reg(),
9323 FieldOperand(operand.reg(), HeapObject::kMapOffset)); 9328 FieldOperand(operand.reg(), HeapObject::kMapOffset));
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
9376 bool is_contextual_; 9381 bool is_contextual_;
9377 bool is_dont_delete_; 9382 bool is_dont_delete_;
9378 }; 9383 };
9379 9384
9380 9385
9381 void DeferredReferenceGetNamedValue::Generate() { 9386 void DeferredReferenceGetNamedValue::Generate() {
9382 if (!receiver_.is(eax)) { 9387 if (!receiver_.is(eax)) {
9383 __ mov(eax, receiver_); 9388 __ mov(eax, receiver_);
9384 } 9389 }
9385 __ Set(ecx, Immediate(name_)); 9390 __ Set(ecx, Immediate(name_));
9386 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); 9391 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
9392 Builtins::LoadIC_Initialize));
9387 RelocInfo::Mode mode = is_contextual_ 9393 RelocInfo::Mode mode = is_contextual_
9388 ? RelocInfo::CODE_TARGET_CONTEXT 9394 ? RelocInfo::CODE_TARGET_CONTEXT
9389 : RelocInfo::CODE_TARGET; 9395 : RelocInfo::CODE_TARGET;
9390 __ call(ic, mode); 9396 __ call(ic, mode);
9391 // The call must be followed by: 9397 // The call must be followed by:
9392 // - a test eax instruction to indicate that the inobject property 9398 // - a test eax instruction to indicate that the inobject property
9393 // case was inlined. 9399 // case was inlined.
9394 // - a mov ecx or mov edx instruction to indicate that the 9400 // - a mov ecx or mov edx instruction to indicate that the
9395 // contextual property load was inlined. 9401 // contextual property load was inlined.
9396 // 9402 //
9397 // Store the delta to the map check instruction here in the test 9403 // Store the delta to the map check instruction here in the test
9398 // instruction. Use masm_-> instead of the __ macro since the 9404 // instruction. Use masm_-> instead of the __ macro since the
9399 // latter can't return a value. 9405 // latter can't return a value.
9400 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); 9406 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
9401 // Here we use masm_-> instead of the __ macro because this is the 9407 // Here we use masm_-> instead of the __ macro because this is the
9402 // instruction that gets patched and coverage code gets in the way. 9408 // instruction that gets patched and coverage code gets in the way.
9403 if (is_contextual_) { 9409 if (is_contextual_) {
9404 masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site); 9410 masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
9405 __ IncrementCounter(&Counters::named_load_global_inline_miss, 1); 9411 __ IncrementCounter(COUNTERS->named_load_global_inline_miss(), 1);
9406 if (is_dont_delete_) { 9412 if (is_dont_delete_) {
9407 __ IncrementCounter(&Counters::dont_delete_hint_miss, 1); 9413 __ IncrementCounter(COUNTERS->dont_delete_hint_miss(), 1);
9408 } 9414 }
9409 } else { 9415 } else {
9410 masm_->test(eax, Immediate(-delta_to_patch_site)); 9416 masm_->test(eax, Immediate(-delta_to_patch_site));
9411 __ IncrementCounter(&Counters::named_load_inline_miss, 1); 9417 __ IncrementCounter(COUNTERS->named_load_inline_miss(), 1);
9412 } 9418 }
9413 9419
9414 if (!dst_.is(eax)) __ mov(dst_, eax); 9420 if (!dst_.is(eax)) __ mov(dst_, eax);
9415 } 9421 }
9416 9422
9417 9423
9418 class DeferredReferenceGetKeyedValue: public DeferredCode { 9424 class DeferredReferenceGetKeyedValue: public DeferredCode {
9419 public: 9425 public:
9420 explicit DeferredReferenceGetKeyedValue(Register dst, 9426 explicit DeferredReferenceGetKeyedValue(Register dst,
9421 Register receiver, 9427 Register receiver,
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
9455 } 9461 }
9456 } else { 9462 } else {
9457 __ xchg(edx, eax); 9463 __ xchg(edx, eax);
9458 } 9464 }
9459 // Calculate the delta from the IC call instruction to the map check 9465 // Calculate the delta from the IC call instruction to the map check
9460 // cmp instruction in the inlined version. This delta is stored in 9466 // cmp instruction in the inlined version. This delta is stored in
9461 // a test(eax, delta) instruction after the call so that we can find 9467 // a test(eax, delta) instruction after the call so that we can find
9462 // it in the IC initialization code and patch the cmp instruction. 9468 // it in the IC initialization code and patch the cmp instruction.
9463 // This means that we cannot allow test instructions after calls to 9469 // This means that we cannot allow test instructions after calls to
9464 // KeyedLoadIC stubs in other places. 9470 // KeyedLoadIC stubs in other places.
9465 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); 9471 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
9472 Builtins::KeyedLoadIC_Initialize));
9466 __ call(ic, RelocInfo::CODE_TARGET); 9473 __ call(ic, RelocInfo::CODE_TARGET);
9467 // The delta from the start of the map-compare instruction to the 9474 // The delta from the start of the map-compare instruction to the
9468 // test instruction. We use masm_-> directly here instead of the __ 9475 // test instruction. We use masm_-> directly here instead of the __
9469 // macro because the macro sometimes uses macro expansion to turn 9476 // macro because the macro sometimes uses macro expansion to turn
9470 // into something that can't return a value. This is encountered 9477 // into something that can't return a value. This is encountered
9471 // when doing generated code coverage tests. 9478 // when doing generated code coverage tests.
9472 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); 9479 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
9473 // Here we use masm_-> instead of the __ macro because this is the 9480 // Here we use masm_-> instead of the __ macro because this is the
9474 // instruction that gets patched and coverage code gets in the way. 9481 // instruction that gets patched and coverage code gets in the way.
9475 masm_->test(eax, Immediate(-delta_to_patch_site)); 9482 masm_->test(eax, Immediate(-delta_to_patch_site));
9476 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); 9483 __ IncrementCounter(COUNTERS->keyed_load_inline_miss(), 1);
9477 9484
9478 if (!dst_.is(eax)) __ mov(dst_, eax); 9485 if (!dst_.is(eax)) __ mov(dst_, eax);
9479 } 9486 }
9480 9487
9481 9488
9482 class DeferredReferenceSetKeyedValue: public DeferredCode { 9489 class DeferredReferenceSetKeyedValue: public DeferredCode {
9483 public: 9490 public:
9484 DeferredReferenceSetKeyedValue(Register value, 9491 DeferredReferenceSetKeyedValue(Register value,
9485 Register key, 9492 Register key,
9486 Register receiver, 9493 Register receiver,
(...skipping 15 matching lines...) Expand all
9502 Register value_; 9509 Register value_;
9503 Register key_; 9510 Register key_;
9504 Register receiver_; 9511 Register receiver_;
9505 Register scratch_; 9512 Register scratch_;
9506 Label patch_site_; 9513 Label patch_site_;
9507 StrictModeFlag strict_mode_; 9514 StrictModeFlag strict_mode_;
9508 }; 9515 };
9509 9516
9510 9517
9511 void DeferredReferenceSetKeyedValue::Generate() { 9518 void DeferredReferenceSetKeyedValue::Generate() {
9512 __ IncrementCounter(&Counters::keyed_store_inline_miss, 1); 9519 __ IncrementCounter(COUNTERS->keyed_store_inline_miss(), 1);
9513 // Move value_ to eax, key_ to ecx, and receiver_ to edx. 9520 // Move value_ to eax, key_ to ecx, and receiver_ to edx.
9514 Register old_value = value_; 9521 Register old_value = value_;
9515 9522
9516 // First, move value to eax. 9523 // First, move value to eax.
9517 if (!value_.is(eax)) { 9524 if (!value_.is(eax)) {
9518 if (key_.is(eax)) { 9525 if (key_.is(eax)) {
9519 // Move key_ out of eax, preferably to ecx. 9526 // Move key_ out of eax, preferably to ecx.
9520 if (!value_.is(ecx) && !receiver_.is(ecx)) { 9527 if (!value_.is(ecx) && !receiver_.is(ecx)) {
9521 __ mov(ecx, key_); 9528 __ mov(ecx, key_);
9522 key_ = ecx; 9529 key_ = ecx;
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
9556 } 9563 }
9557 } 9564 }
9558 } else { // Key is not in edx or ecx. 9565 } else { // Key is not in edx or ecx.
9559 if (!receiver_.is(edx)) { 9566 if (!receiver_.is(edx)) {
9560 __ mov(edx, receiver_); 9567 __ mov(edx, receiver_);
9561 } 9568 }
9562 __ mov(ecx, key_); 9569 __ mov(ecx, key_);
9563 } 9570 }
9564 9571
9565 // Call the IC stub. 9572 // Call the IC stub.
9566 Handle<Code> ic(Builtins::builtin( 9573 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
9567 (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict 9574 (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
9568 : Builtins::KeyedStoreIC_Initialize)); 9575 : Builtins::KeyedStoreIC_Initialize));
9569 __ call(ic, RelocInfo::CODE_TARGET); 9576 __ call(ic, RelocInfo::CODE_TARGET);
9570 // The delta from the start of the map-compare instruction to the 9577 // The delta from the start of the map-compare instruction to the
9571 // test instruction. We use masm_-> directly here instead of the 9578 // test instruction. We use masm_-> directly here instead of the
9572 // __ macro because the macro sometimes uses macro expansion to turn 9579 // __ macro because the macro sometimes uses macro expansion to turn
9573 // into something that can't return a value. This is encountered 9580 // into something that can't return a value. This is encountered
9574 // when doing generated code coverage tests. 9581 // when doing generated code coverage tests.
9575 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); 9582 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
9576 // Here we use masm_-> instead of the __ macro because this is the 9583 // Here we use masm_-> instead of the __ macro because this is the
9577 // instruction that gets patched and coverage code gets in the way. 9584 // instruction that gets patched and coverage code gets in the way.
9578 masm_->test(eax, Immediate(-delta_to_patch_site)); 9585 masm_->test(eax, Immediate(-delta_to_patch_site));
9579 // Restore value (returned from store IC) register. 9586 // Restore value (returned from store IC) register.
9580 if (!old_value.is(eax)) __ mov(old_value, eax); 9587 if (!old_value.is(eax)) __ mov(old_value, eax);
9581 } 9588 }
9582 9589
9583 9590
9584 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { 9591 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
9585 #ifdef DEBUG 9592 #ifdef DEBUG
9586 int original_height = frame()->height(); 9593 int original_height = frame()->height();
9587 #endif 9594 #endif
9588 9595
9589 bool contextual_load_in_builtin = 9596 bool contextual_load_in_builtin =
9590 is_contextual && 9597 is_contextual &&
9591 (Bootstrapper::IsActive() || 9598 (Isolate::Current()->bootstrapper()->IsActive() ||
9592 (!info_->closure().is_null() && info_->closure()->IsBuiltin())); 9599 (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
9593 9600
9594 Result result; 9601 Result result;
9595 // Do not inline in the global code or when not in loop. 9602 // Do not inline in the global code or when not in loop.
9596 if (scope()->is_global_scope() || 9603 if (scope()->is_global_scope() ||
9597 loop_nesting() == 0 || 9604 loop_nesting() == 0 ||
9598 contextual_load_in_builtin) { 9605 contextual_load_in_builtin) {
9599 Comment cmnt(masm(), "[ Load from named Property"); 9606 Comment cmnt(masm(), "[ Load from named Property");
9600 frame()->Push(name); 9607 frame()->Push(name);
9601 9608
(...skipping 25 matching lines...) Expand all
9627 // Check that the receiver is a heap object. 9634 // Check that the receiver is a heap object.
9628 __ test(receiver.reg(), Immediate(kSmiTagMask)); 9635 __ test(receiver.reg(), Immediate(kSmiTagMask));
9629 deferred->Branch(zero); 9636 deferred->Branch(zero);
9630 } 9637 }
9631 9638
9632 __ bind(deferred->patch_site()); 9639 __ bind(deferred->patch_site());
9633 // This is the map check instruction that will be patched (so we can't 9640 // This is the map check instruction that will be patched (so we can't
9634 // use the double underscore macro that may insert instructions). 9641 // use the double underscore macro that may insert instructions).
9635 // Initially use an invalid map to force a failure. 9642 // Initially use an invalid map to force a failure.
9636 masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 9643 masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
9637 Immediate(Factory::null_value())); 9644 Immediate(FACTORY->null_value()));
9638 // This branch is always a forwards branch so it's always a fixed size 9645 // This branch is always a forwards branch so it's always a fixed size
9639 // which allows the assert below to succeed and patching to work. 9646 // which allows the assert below to succeed and patching to work.
9640 deferred->Branch(not_equal); 9647 deferred->Branch(not_equal);
9641 9648
9642 // The delta from the patch label to the actual load must be 9649 // The delta from the patch label to the actual load must be
9643 // statically known. 9650 // statically known.
9644 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) == 9651 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
9645 LoadIC::kOffsetToLoadInstruction); 9652 LoadIC::kOffsetToLoadInstruction);
9646 9653
9647 if (is_contextual) { 9654 if (is_contextual) {
9648 // Load the (initialy invalid) cell and get its value. 9655 // Load the (initialy invalid) cell and get its value.
9649 masm()->mov(result.reg(), Factory::null_value()); 9656 masm()->mov(result.reg(), FACTORY->null_value());
9650 if (FLAG_debug_code) { 9657 if (FLAG_debug_code) {
9651 __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset), 9658 __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
9652 Factory::global_property_cell_map()); 9659 FACTORY->global_property_cell_map());
9653 __ Assert(equal, "Uninitialized inlined contextual load"); 9660 __ Assert(equal, "Uninitialized inlined contextual load");
9654 } 9661 }
9655 __ mov(result.reg(), 9662 __ mov(result.reg(),
9656 FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset)); 9663 FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
9664 __ cmp(result.reg(), FACTORY->the_hole_value());
9665 deferred->Branch(equal);
9657 bool is_dont_delete = false; 9666 bool is_dont_delete = false;
9658 if (!info_->closure().is_null()) { 9667 if (!info_->closure().is_null()) {
9659 // When doing lazy compilation we can check if the global cell 9668 // When doing lazy compilation we can check if the global cell
9660 // already exists and use its "don't delete" status as a hint. 9669 // already exists and use its "don't delete" status as a hint.
9661 AssertNoAllocation no_gc; 9670 AssertNoAllocation no_gc;
9662 v8::internal::GlobalObject* global_object = 9671 v8::internal::GlobalObject* global_object =
9663 info_->closure()->context()->global(); 9672 info_->closure()->context()->global();
9664 LookupResult lookup; 9673 LookupResult lookup;
9665 global_object->LocalLookupRealNamedProperty(*name, &lookup); 9674 global_object->LocalLookupRealNamedProperty(*name, &lookup);
9666 if (lookup.IsProperty() && lookup.type() == NORMAL) { 9675 if (lookup.IsProperty() && lookup.type() == NORMAL) {
9667 ASSERT(lookup.holder() == global_object); 9676 ASSERT(lookup.holder() == global_object);
9668 ASSERT(global_object->property_dictionary()->ValueAt( 9677 ASSERT(global_object->property_dictionary()->ValueAt(
9669 lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell()); 9678 lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
9670 is_dont_delete = lookup.IsDontDelete(); 9679 is_dont_delete = lookup.IsDontDelete();
9671 } 9680 }
9672 } 9681 }
9673 deferred->set_is_dont_delete(is_dont_delete); 9682 deferred->set_is_dont_delete(is_dont_delete);
9674 if (!is_dont_delete) { 9683 if (!is_dont_delete) {
9675 __ cmp(result.reg(), Factory::the_hole_value()); 9684 __ cmp(result.reg(), FACTORY->the_hole_value());
9676 deferred->Branch(equal); 9685 deferred->Branch(equal);
9677 } else if (FLAG_debug_code) { 9686 } else if (FLAG_debug_code) {
9678 __ cmp(result.reg(), Factory::the_hole_value()); 9687 __ cmp(result.reg(), FACTORY->the_hole_value());
9679 __ Check(not_equal, "DontDelete cells can't contain the hole"); 9688 __ Check(not_equal, "DontDelete cells can't contain the hole");
9680 } 9689 }
9681 __ IncrementCounter(&Counters::named_load_global_inline, 1); 9690 __ IncrementCounter(COUNTERS->named_load_global_inline(), 1);
9682 if (is_dont_delete) { 9691 if (is_dont_delete) {
9683 __ IncrementCounter(&Counters::dont_delete_hint_hit, 1); 9692 __ IncrementCounter(COUNTERS->dont_delete_hint_hit(), 1);
9684 } 9693 }
9685 } else { 9694 } else {
9686 // The initial (invalid) offset has to be large enough to force a 32-bit 9695 // The initial (invalid) offset has to be large enough to force a 32-bit
9687 // instruction encoding to allow patching with an arbitrary offset. Use 9696 // instruction encoding to allow patching with an arbitrary offset. Use
9688 // kMaxInt (minus kHeapObjectTag). 9697 // kMaxInt (minus kHeapObjectTag).
9689 int offset = kMaxInt; 9698 int offset = kMaxInt;
9690 masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset)); 9699 masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
9691 __ IncrementCounter(&Counters::named_load_inline, 1); 9700 __ IncrementCounter(COUNTERS->named_load_inline(), 1);
9692 } 9701 }
9693 9702
9694 deferred->BindExit(); 9703 deferred->BindExit();
9695 } 9704 }
9696 ASSERT(frame()->height() == original_height - 1); 9705 ASSERT(frame()->height() == original_height - 1);
9697 return result; 9706 return result;
9698 } 9707 }
9699 9708
9700 9709
9701 Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { 9710 Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
(...skipping 25 matching lines...) Expand all
9727 9736
9728 // Check that the receiver is a heap object. 9737 // Check that the receiver is a heap object.
9729 __ test(receiver.reg(), Immediate(kSmiTagMask)); 9738 __ test(receiver.reg(), Immediate(kSmiTagMask));
9730 slow.Branch(zero, &value, &receiver); 9739 slow.Branch(zero, &value, &receiver);
9731 9740
9732 // This is the map check instruction that will be patched (so we can't 9741 // This is the map check instruction that will be patched (so we can't
9733 // use the double underscore macro that may insert instructions). 9742 // use the double underscore macro that may insert instructions).
9734 // Initially use an invalid map to force a failure. 9743 // Initially use an invalid map to force a failure.
9735 __ bind(&patch_site); 9744 __ bind(&patch_site);
9736 masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 9745 masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
9737 Immediate(Factory::null_value())); 9746 Immediate(FACTORY->null_value()));
9738 // This branch is always a forwards branch so it's always a fixed size 9747 // This branch is always a forwards branch so it's always a fixed size
9739 // which allows the assert below to succeed and patching to work. 9748 // which allows the assert below to succeed and patching to work.
9740 slow.Branch(not_equal, &value, &receiver); 9749 slow.Branch(not_equal, &value, &receiver);
9741 9750
9742 // The delta from the patch label to the store offset must be 9751 // The delta from the patch label to the store offset must be
9743 // statically known. 9752 // statically known.
9744 ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) == 9753 ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
9745 StoreIC::kOffsetToStoreInstruction); 9754 StoreIC::kOffsetToStoreInstruction);
9746 9755
9747 // The initial (invalid) offset has to be large enough to force a 32-bit 9756 // The initial (invalid) offset has to be large enough to force a 32-bit
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
9837 __ test(receiver.reg(), Immediate(kSmiTagMask)); 9846 __ test(receiver.reg(), Immediate(kSmiTagMask));
9838 deferred->Branch(zero); 9847 deferred->Branch(zero);
9839 9848
9840 // Check that the receiver has the expected map. 9849 // Check that the receiver has the expected map.
9841 // Initially, use an invalid map. The map is patched in the IC 9850 // Initially, use an invalid map. The map is patched in the IC
9842 // initialization code. 9851 // initialization code.
9843 __ bind(deferred->patch_site()); 9852 __ bind(deferred->patch_site());
9844 // Use masm-> here instead of the double underscore macro since extra 9853 // Use masm-> here instead of the double underscore macro since extra
9845 // coverage code can interfere with the patching. 9854 // coverage code can interfere with the patching.
9846 masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 9855 masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
9847 Immediate(Factory::null_value())); 9856 Immediate(FACTORY->null_value()));
9848 deferred->Branch(not_equal); 9857 deferred->Branch(not_equal);
9849 9858
9850 // Check that the key is a smi. 9859 // Check that the key is a smi.
9851 if (!key.is_smi()) { 9860 if (!key.is_smi()) {
9852 __ test(key.reg(), Immediate(kSmiTagMask)); 9861 __ test(key.reg(), Immediate(kSmiTagMask));
9853 deferred->Branch(not_zero); 9862 deferred->Branch(not_zero);
9854 } else { 9863 } else {
9855 if (FLAG_debug_code) __ AbortIfNotSmi(key.reg()); 9864 if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
9856 } 9865 }
9857 9866
9858 // Get the elements array from the receiver. 9867 // Get the elements array from the receiver.
9859 __ mov(elements.reg(), 9868 __ mov(elements.reg(),
9860 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); 9869 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
9861 __ AssertFastElements(elements.reg()); 9870 __ AssertFastElements(elements.reg());
9862 9871
9863 // Check that the key is within bounds. 9872 // Check that the key is within bounds.
9864 __ cmp(key.reg(), 9873 __ cmp(key.reg(),
9865 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); 9874 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
9866 deferred->Branch(above_equal); 9875 deferred->Branch(above_equal);
9867 9876
9868 // Load and check that the result is not the hole. 9877 // Load and check that the result is not the hole.
9869 // Key holds a smi. 9878 // Key holds a smi.
9870 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 9879 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
9871 __ mov(elements.reg(), 9880 __ mov(elements.reg(),
9872 FieldOperand(elements.reg(), 9881 FieldOperand(elements.reg(),
9873 key.reg(), 9882 key.reg(),
9874 times_2, 9883 times_2,
9875 FixedArray::kHeaderSize)); 9884 FixedArray::kHeaderSize));
9876 result = elements; 9885 result = elements;
9877 __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value())); 9886 __ cmp(Operand(result.reg()), Immediate(FACTORY->the_hole_value()));
9878 deferred->Branch(equal); 9887 deferred->Branch(equal);
9879 __ IncrementCounter(&Counters::keyed_load_inline, 1); 9888 __ IncrementCounter(COUNTERS->keyed_load_inline(), 1);
9880 9889
9881 deferred->BindExit(); 9890 deferred->BindExit();
9882 } else { 9891 } else {
9883 Comment cmnt(masm_, "[ Load from keyed Property"); 9892 Comment cmnt(masm_, "[ Load from keyed Property");
9884 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET); 9893 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
9885 // Make sure that we do not have a test instruction after the 9894 // Make sure that we do not have a test instruction after the
9886 // call. A test instruction after the call is used to 9895 // call. A test instruction after the call is used to
9887 // indicate that we have generated an inline version of the 9896 // indicate that we have generated an inline version of the
9888 // keyed load. The explicit nop instruction is here because 9897 // keyed load. The explicit nop instruction is here because
9889 // the push that follows might be peep-hole optimized away. 9898 // the push that follows might be peep-hole optimized away.
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
9961 deferred->Branch(not_zero); 9970 deferred->Branch(not_zero);
9962 } 9971 }
9963 9972
9964 __ bind(&in_new_space); 9973 __ bind(&in_new_space);
9965 // Bind the deferred code patch site to be able to locate the fixed 9974 // Bind the deferred code patch site to be able to locate the fixed
9966 // array map comparison. When debugging, we patch this comparison to 9975 // array map comparison. When debugging, we patch this comparison to
9967 // always fail so that we will hit the IC call in the deferred code 9976 // always fail so that we will hit the IC call in the deferred code
9968 // which will allow the debugger to break for fast case stores. 9977 // which will allow the debugger to break for fast case stores.
9969 __ bind(deferred->patch_site()); 9978 __ bind(deferred->patch_site());
9970 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset), 9979 __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
9971 Immediate(Factory::fixed_array_map())); 9980 Immediate(FACTORY->fixed_array_map()));
9972 deferred->Branch(not_equal); 9981 deferred->Branch(not_equal);
9973 9982
9974 // Check that the key is within bounds. Both the key and the length of 9983 // Check that the key is within bounds. Both the key and the length of
9975 // the JSArray are smis (because the fixed array check above ensures the 9984 // the JSArray are smis (because the fixed array check above ensures the
9976 // elements are in fast case). Use unsigned comparison to handle negative 9985 // elements are in fast case). Use unsigned comparison to handle negative
9977 // keys. 9986 // keys.
9978 __ cmp(key.reg(), 9987 __ cmp(key.reg(),
9979 FieldOperand(receiver.reg(), JSArray::kLengthOffset)); 9988 FieldOperand(receiver.reg(), JSArray::kLengthOffset));
9980 deferred->Branch(above_equal); 9989 deferred->Branch(above_equal);
9981 9990
9982 // Store the value. 9991 // Store the value.
9983 __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg()); 9992 __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
9984 __ IncrementCounter(&Counters::keyed_store_inline, 1); 9993 __ IncrementCounter(COUNTERS->keyed_store_inline(), 1);
9985 9994
9986 deferred->BindExit(); 9995 deferred->BindExit();
9987 } else { 9996 } else {
9988 result = frame()->CallKeyedStoreIC(strict_mode_flag()); 9997 result = frame()->CallKeyedStoreIC(strict_mode_flag());
9989 // Make sure that we do not have a test instruction after the 9998 // Make sure that we do not have a test instruction after the
9990 // call. A test instruction after the call is used to 9999 // call. A test instruction after the call is used to
9991 // indicate that we have generated an inline version of the 10000 // indicate that we have generated an inline version of the
9992 // keyed store. 10001 // keyed store.
9993 __ nop(); 10002 __ nop();
9994 } 10003 }
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
10177 int stack_offset = 0; // Update if we change the stack height. 10186 int stack_offset = 0; // Update if we change the stack height.
10178 10187
10179 if (FLAG_debug_code) { 10188 if (FLAG_debug_code) {
10180 __ cmp(Operand(esp, kSizeOffset + stack_offset), 10189 __ cmp(Operand(esp, kSizeOffset + stack_offset),
10181 Immediate(kMinComplexMemCopy)); 10190 Immediate(kMinComplexMemCopy));
10182 Label ok; 10191 Label ok;
10183 __ j(greater_equal, &ok); 10192 __ j(greater_equal, &ok);
10184 __ int3(); 10193 __ int3();
10185 __ bind(&ok); 10194 __ bind(&ok);
10186 } 10195 }
10187 if (CpuFeatures::IsSupported(SSE2)) { 10196 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
10188 CpuFeatures::Scope enable(SSE2); 10197 CpuFeatures::Scope enable(SSE2);
10189 __ push(edi); 10198 __ push(edi);
10190 __ push(esi); 10199 __ push(esi);
10191 stack_offset += 2 * kPointerSize; 10200 stack_offset += 2 * kPointerSize;
10192 Register dst = edi; 10201 Register dst = edi;
10193 Register src = esi; 10202 Register src = esi;
10194 Register count = ecx; 10203 Register count = ecx;
10195 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); 10204 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
10196 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); 10205 __ mov(src, Operand(esp, stack_offset + kSourceOffset));
10197 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); 10206 __ mov(count, Operand(esp, stack_offset + kSizeOffset));
10198 10207
10199 10208
10200 __ movdqu(xmm0, Operand(src, 0)); 10209 __ movdqu(xmm0, Operand(src, 0));
10201 __ movdqu(Operand(dst, 0), xmm0); 10210 __ movdqu(Operand(dst, 0), xmm0);
10202 __ mov(edx, dst); 10211 __ mov(edx, dst);
10203 __ and_(edx, 0xF); 10212 __ and_(edx, 0xF);
10204 __ neg(edx); 10213 __ neg(edx);
10205 __ add(Operand(edx), Immediate(16)); 10214 __ add(Operand(edx), Immediate(16));
10206 __ add(dst, Operand(edx)); 10215 __ add(dst, Operand(edx));
10207 __ add(src, Operand(edx)); 10216 __ add(src, Operand(edx));
10208 __ sub(Operand(count), edx); 10217 __ sub(Operand(count), edx);
10209 10218
10210 // edi is now aligned. Check if esi is also aligned. 10219 // edi is now aligned. Check if esi is also aligned.
10211 Label unaligned_source; 10220 Label unaligned_source;
10212 __ test(Operand(src), Immediate(0x0F)); 10221 __ test(Operand(src), Immediate(0x0F));
10213 __ j(not_zero, &unaligned_source); 10222 __ j(not_zero, &unaligned_source);
10214 { 10223 {
10215 __ IncrementCounter(&Counters::memcopy_aligned, 1); 10224 __ IncrementCounter(COUNTERS->memcopy_aligned(), 1);
10216 // Copy loop for aligned source and destination. 10225 // Copy loop for aligned source and destination.
10217 __ mov(edx, count); 10226 __ mov(edx, count);
10218 Register loop_count = ecx; 10227 Register loop_count = ecx;
10219 Register count = edx; 10228 Register count = edx;
10220 __ shr(loop_count, 5); 10229 __ shr(loop_count, 5);
10221 { 10230 {
10222 // Main copy loop. 10231 // Main copy loop.
10223 Label loop; 10232 Label loop;
10224 __ bind(&loop); 10233 __ bind(&loop);
10225 __ prefetch(Operand(src, 0x20), 1); 10234 __ prefetch(Operand(src, 0x20), 1);
(...skipping 27 matching lines...) Expand all
10253 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset)); 10262 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
10254 __ pop(esi); 10263 __ pop(esi);
10255 __ pop(edi); 10264 __ pop(edi);
10256 __ ret(0); 10265 __ ret(0);
10257 } 10266 }
10258 __ Align(16); 10267 __ Align(16);
10259 { 10268 {
10260 // Copy loop for unaligned source and aligned destination. 10269 // Copy loop for unaligned source and aligned destination.
10261 // If source is not aligned, we can't read it as efficiently. 10270 // If source is not aligned, we can't read it as efficiently.
10262 __ bind(&unaligned_source); 10271 __ bind(&unaligned_source);
10263 __ IncrementCounter(&Counters::memcopy_unaligned, 1); 10272 __ IncrementCounter(COUNTERS->memcopy_unaligned(), 1);
10264 __ mov(edx, ecx); 10273 __ mov(edx, ecx);
10265 Register loop_count = ecx; 10274 Register loop_count = ecx;
10266 Register count = edx; 10275 Register count = edx;
10267 __ shr(loop_count, 5); 10276 __ shr(loop_count, 5);
10268 { 10277 {
10269 // Main copy loop 10278 // Main copy loop
10270 Label loop; 10279 Label loop;
10271 __ bind(&loop); 10280 __ bind(&loop);
10272 __ prefetch(Operand(src, 0x20), 1); 10281 __ prefetch(Operand(src, 0x20), 1);
10273 __ movdqu(xmm0, Operand(src, 0x00)); 10282 __ movdqu(xmm0, Operand(src, 0x00));
(...skipping 23 matching lines...) Expand all
10297 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 10306 __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
10298 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); 10307 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
10299 10308
10300 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset)); 10309 __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
10301 __ pop(esi); 10310 __ pop(esi);
10302 __ pop(edi); 10311 __ pop(edi);
10303 __ ret(0); 10312 __ ret(0);
10304 } 10313 }
10305 10314
10306 } else { 10315 } else {
10307 __ IncrementCounter(&Counters::memcopy_noxmm, 1); 10316 __ IncrementCounter(COUNTERS->memcopy_noxmm(), 1);
10308 // SSE2 not supported. Unlikely to happen in practice. 10317 // SSE2 not supported. Unlikely to happen in practice.
10309 __ push(edi); 10318 __ push(edi);
10310 __ push(esi); 10319 __ push(esi);
10311 stack_offset += 2 * kPointerSize; 10320 stack_offset += 2 * kPointerSize;
10312 __ cld(); 10321 __ cld();
10313 Register dst = edi; 10322 Register dst = edi;
10314 Register src = esi; 10323 Register src = esi;
10315 Register count = ecx; 10324 Register count = ecx;
10316 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); 10325 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
10317 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); 10326 __ mov(src, Operand(esp, stack_offset + kSourceOffset));
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
10358 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size); 10367 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
10359 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size); 10368 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
10360 return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress()); 10369 return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
10361 } 10370 }
10362 10371
10363 #undef __ 10372 #undef __
10364 10373
10365 } } // namespace v8::internal 10374 } } // namespace v8::internal
10366 10375
10367 #endif // V8_TARGET_ARCH_IA32 10376 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/codegen-ia32.h ('k') | src/ia32/cpu-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698