Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 6624085: [Isolates] Merge 7051:7083 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/code-stubs-x64.cc ('k') | src/x64/deoptimizer-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 593 matching lines...) Expand 10 before | Expand all | Expand 10 after
604 LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF); 604 LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
605 } else { 605 } else {
606 // Anything else can be handled normally. 606 // Anything else can be handled normally.
607 Load(expr); 607 Load(expr);
608 } 608 }
609 } 609 }
610 610
611 611
612 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() { 612 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
613 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION; 613 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
614 ASSERT(scope()->arguments_shadow() != NULL); 614
615 // In strict mode there is no need for shadow arguments.
616 ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
615 // We don't want to do lazy arguments allocation for functions that 617 // We don't want to do lazy arguments allocation for functions that
616 // have heap-allocated contexts, because it interfers with the 618 // have heap-allocated contexts, because it interfers with the
617 // uninitialized const tracking in the context objects. 619 // uninitialized const tracking in the context objects.
618 return (scope()->num_heap_slots() > 0) 620 return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
619 ? EAGER_ARGUMENTS_ALLOCATION 621 ? EAGER_ARGUMENTS_ALLOCATION
620 : LAZY_ARGUMENTS_ALLOCATION; 622 : LAZY_ARGUMENTS_ALLOCATION;
621 } 623 }
622 624
623 625
624 Result CodeGenerator::StoreArgumentsObject(bool initial) { 626 Result CodeGenerator::StoreArgumentsObject(bool initial) {
625 ArgumentsAllocationMode mode = ArgumentsMode(); 627 ArgumentsAllocationMode mode = ArgumentsMode();
626 ASSERT(mode != NO_ARGUMENTS_ALLOCATION); 628 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
627 629
628 Comment cmnt(masm_, "[ store arguments object"); 630 Comment cmnt(masm_, "[ store arguments object");
629 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { 631 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
630 // When using lazy arguments allocation, we store the arguments marker value 632 // When using lazy arguments allocation, we store the arguments marker value
631 // as a sentinel indicating that the arguments object hasn't been 633 // as a sentinel indicating that the arguments object hasn't been
632 // allocated yet. 634 // allocated yet.
633 frame_->Push(FACTORY->arguments_marker()); 635 frame_->Push(FACTORY->arguments_marker());
634 } else { 636 } else {
635 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); 637 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
636 frame_->PushFunction(); 638 frame_->PushFunction();
637 frame_->PushReceiverSlotAddress(); 639 frame_->PushReceiverSlotAddress();
638 frame_->Push(Smi::FromInt(scope()->num_parameters())); 640 frame_->Push(Smi::FromInt(scope()->num_parameters()));
639 Result result = frame_->CallStub(&stub, 3); 641 Result result = frame_->CallStub(&stub, 3);
640 frame_->Push(&result); 642 frame_->Push(&result);
641 } 643 }
642 644
643 Variable* arguments = scope()->arguments(); 645 Variable* arguments = scope()->arguments();
644 Variable* shadow = scope()->arguments_shadow(); 646 Variable* shadow = scope()->arguments_shadow();
645 ASSERT(arguments != NULL && arguments->AsSlot() != NULL); 647 ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
646 ASSERT(shadow != NULL && shadow->AsSlot() != NULL); 648 ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
649 scope()->is_strict_mode());
650
647 JumpTarget done; 651 JumpTarget done;
648 bool skip_arguments = false; 652 bool skip_arguments = false;
649 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { 653 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
650 // We have to skip storing into the arguments slot if it has 654 // We have to skip storing into the arguments slot if it has
651 // already been written to. This can happen if the a function 655 // already been written to. This can happen if the a function
652 // has a local variable named 'arguments'. 656 // has a local variable named 'arguments'.
653 LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF); 657 LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
654 Result probe = frame_->Pop(); 658 Result probe = frame_->Pop();
655 if (probe.is_constant()) { 659 if (probe.is_constant()) {
656 // We have to skip updating the arguments object if it has 660 // We have to skip updating the arguments object if it has
657 // been assigned a proper value. 661 // been assigned a proper value.
658 skip_arguments = !probe.handle()->IsArgumentsMarker(); 662 skip_arguments = !probe.handle()->IsArgumentsMarker();
659 } else { 663 } else {
660 __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex); 664 __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
661 probe.Unuse(); 665 probe.Unuse();
662 done.Branch(not_equal); 666 done.Branch(not_equal);
663 } 667 }
664 } 668 }
665 if (!skip_arguments) { 669 if (!skip_arguments) {
666 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT); 670 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
667 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); 671 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
668 } 672 }
669 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT); 673 if (shadow != NULL) {
674 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
675 }
670 return frame_->Pop(); 676 return frame_->Pop();
671 } 677 }
672 678
673 //------------------------------------------------------------------------------ 679 //------------------------------------------------------------------------------
674 // CodeGenerator implementation of variables, lookups, and stores. 680 // CodeGenerator implementation of variables, lookups, and stores.
675 681
676 Reference::Reference(CodeGenerator* cgen, 682 Reference::Reference(CodeGenerator* cgen,
677 Expression* expression, 683 Expression* expression,
678 bool persist_after_get) 684 bool persist_after_get)
679 : cgen_(cgen), 685 : cgen_(cgen),
(...skipping 7533 matching lines...) Expand 10 before | Expand all | Expand 10 after
8213 DeferredReferenceGetNamedValue* deferred = 8219 DeferredReferenceGetNamedValue* deferred =
8214 new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name); 8220 new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
8215 8221
8216 // Check that the receiver is a heap object. 8222 // Check that the receiver is a heap object.
8217 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); 8223 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
8218 8224
8219 __ bind(deferred->patch_site()); 8225 __ bind(deferred->patch_site());
8220 // This is the map check instruction that will be patched (so we can't 8226 // This is the map check instruction that will be patched (so we can't
8221 // use the double underscore macro that may insert instructions). 8227 // use the double underscore macro that may insert instructions).
8222 // Initially use an invalid map to force a failure. 8228 // Initially use an invalid map to force a failure.
8223 masm()->Move(kScratchRegister, FACTORY->null_value()); 8229 masm()->movq(kScratchRegister, FACTORY->null_value(),
8230 RelocInfo::EMBEDDED_OBJECT);
8224 masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 8231 masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
8225 kScratchRegister); 8232 kScratchRegister);
8226 // This branch is always a forwards branch so it's always a fixed 8233 // This branch is always a forwards branch so it's always a fixed
8227 // size which allows the assert below to succeed and patching to work. 8234 // size which allows the assert below to succeed and patching to work.
8228 // Don't use deferred->Branch(...), since that might add coverage code. 8235 // Don't use deferred->Branch(...), since that might add coverage code.
8229 masm()->j(not_equal, deferred->entry_label()); 8236 masm()->j(not_equal, deferred->entry_label());
8230 8237
8231 // The delta from the patch label to the load offset must be 8238 // The delta from the patch label to the load offset must be
8232 // statically known. 8239 // statically known.
8233 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) == 8240 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
8289 Condition is_smi = masm()->CheckSmi(receiver.reg()); 8296 Condition is_smi = masm()->CheckSmi(receiver.reg());
8290 slow.Branch(is_smi, &value, &receiver); 8297 slow.Branch(is_smi, &value, &receiver);
8291 8298
8292 // This is the map check instruction that will be patched. 8299 // This is the map check instruction that will be patched.
8293 // Initially use an invalid map to force a failure. The exact 8300 // Initially use an invalid map to force a failure. The exact
8294 // instruction sequence is important because we use the 8301 // instruction sequence is important because we use the
8295 // kOffsetToStoreInstruction constant for patching. We avoid using 8302 // kOffsetToStoreInstruction constant for patching. We avoid using
8296 // the __ macro for the following two instructions because it 8303 // the __ macro for the following two instructions because it
8297 // might introduce extra instructions. 8304 // might introduce extra instructions.
8298 __ bind(&patch_site); 8305 __ bind(&patch_site);
8299 masm()->Move(kScratchRegister, FACTORY->null_value()); 8306 masm()->movq(kScratchRegister, FACTORY->null_value(),
8307 RelocInfo::EMBEDDED_OBJECT);
8300 masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 8308 masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
8301 kScratchRegister); 8309 kScratchRegister);
8302 // This branch is always a forwards branch so it's always a fixed size 8310 // This branch is always a forwards branch so it's always a fixed size
8303 // which allows the assert below to succeed and patching to work. 8311 // which allows the assert below to succeed and patching to work.
8304 slow.Branch(not_equal, &value, &receiver); 8312 slow.Branch(not_equal, &value, &receiver);
8305 8313
8306 // The delta from the patch label to the store offset must be 8314 // The delta from the patch label to the store offset must be
8307 // statically known. 8315 // statically known.
8308 ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) == 8316 ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
8309 StoreIC::kOffsetToStoreInstruction); 8317 StoreIC::kOffsetToStoreInstruction);
(...skipping 519 matching lines...) Expand 10 before | Expand all | Expand 10 after
8829 } 8837 }
8830 8838
8831 #endif 8839 #endif
8832 8840
8833 8841
8834 #undef __ 8842 #undef __
8835 8843
8836 } } // namespace v8::internal 8844 } } // namespace v8::internal
8837 8845
8838 #endif // V8_TARGET_ARCH_X64 8846 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/code-stubs-x64.cc ('k') | src/x64/deoptimizer-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698