Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1027)

Side by Side Diff: runtime/vm/flow_graph_optimizer.cc

Issue 395943003: Support allocation sinking for compound objects. (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: improve tests Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/flow_graph_optimizer.h ('k') | runtime/vm/il_printer.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/flow_graph_optimizer.h" 5 #include "vm/flow_graph_optimizer.h"
6 6
7 #include "vm/bit_vector.h" 7 #include "vm/bit_vector.h"
8 #include "vm/cha.h" 8 #include "vm/cha.h"
9 #include "vm/cpu.h" 9 #include "vm/cpu.h"
10 #include "vm/dart_entry.h" 10 #include "vm/dart_entry.h"
(...skipping 5649 matching lines...) Expand 10 before | Expand all | Expand 10 after
5660 TryHoistCheckSmiThroughPhi( 5660 TryHoistCheckSmiThroughPhi(
5661 &it, header, pre_header, current->AsCheckSmi()); 5661 &it, header, pre_header, current->AsCheckSmi());
5662 } 5662 }
5663 } 5663 }
5664 } 5664 }
5665 } 5665 }
5666 } 5666 }
5667 } 5667 }
5668 5668
5669 5669
5670 // Alias represents a family of locations. It is used to capture aliasing
5671 // between stores and loads. Store can alias another load or store if and only
5672 // if they have the same alias.
5673 class Alias : public ValueObject {
5674 public:
5675 Alias(const Alias& other) : ValueObject(), alias_(other.alias_) { }
5676
5677 // All indexed load/stores alias each other.
5678 // TODO(vegorov): incorporate type of array into alias to disambiguate
5679 // different typed data and normal arrays.
5680 static Alias UnknownIndex(intptr_t id) {
5681 return Alias(kUnknownIndexAlias, id);
5682 }
5683
5684 static Alias ConstantIndex(intptr_t id) {
5685 ASSERT(id != 0);
5686 return Alias(kConstantIndex, id);
5687 }
5688
5689 // Field load/stores alias each other only when they access the same field.
5690 // AliasedSet assigns ids to a combination of instance and field during
5691 // the optimization phase.
5692 static Alias Field(intptr_t id) {
5693 ASSERT(id != 0);
5694 return Alias(kFieldAlias, id);
5695 }
5696
5697 // VMField load/stores alias each other when field offset matches.
5698 // TODO(vegorov) storing a context variable does not alias loading array
5699 // length.
5700 static Alias VMField(intptr_t id) {
5701 ASSERT(id != 0);
5702 return Alias(kVMFieldAlias, id);
5703 }
5704
5705 // Current context load/stores alias each other.
5706 static Alias CurrentContext() {
5707 return Alias(kCurrentContextAlias, 0);
5708 }
5709
5710 // Operation does not alias anything.
5711 static Alias None() {
5712 return Alias(kNoneAlias);
5713 }
5714
5715 bool IsNone() const {
5716 return alias_ == kNoneAlias;
5717 }
5718
5719 // Convert this alias to a positive array index.
5720 intptr_t ToIndex() const {
5721 ASSERT(!IsNone());
5722 return alias_;
5723 }
5724
5725 private:
5726 enum {
5727 // Number of bits required to encode Kind value.
5728 // The payload occupies the rest of the bits, but leaves the MSB (sign bit)
5729 // empty so that the resulting encoded value is always a positive integer.
5730 kBitsForKind = 3,
5731 kBitsForPayload = kWordSize * kBitsPerByte - kBitsForKind - 1,
5732 };
5733
5734 enum Kind {
5735 kNoneAlias = -1,
5736 kCurrentContextAlias = 0,
5737 kUnknownIndexAlias = 1,
5738 kFieldAlias = 2,
5739 kVMFieldAlias = 3,
5740 kConstantIndex = 4,
5741 kNumKinds = kConstantIndex + 1
5742 };
5743 COMPILE_ASSERT(kNumKinds < ((1 << kBitsForKind) - 1));
5744
5745 explicit Alias(intptr_t alias) : alias_(alias) { }
5746
5747 Alias(Kind kind, uword payload)
5748 : alias_(KindField::encode(kind) | PayloadField::encode(payload)) { }
5749
5750 uword payload() const {
5751 return PayloadField::decode(alias_);
5752 }
5753
5754 Kind kind() const {
5755 return IsNone() ? kNoneAlias : KindField::decode(alias_);
5756 }
5757
5758 typedef BitField<Kind, 0, kBitsForKind> KindField;
5759 typedef BitField<uword, kBitsForKind, kBitsForPayload> PayloadField;
5760
5761 const intptr_t alias_;
5762 };
5763
5764
5765 // Place describes an abstract location (e.g. field) that IR can load 5670 // Place describes an abstract location (e.g. field) that IR can load
5766 // from or store to. 5671 // from or store to.
5672 //
5673 // Places are also used to describe wild-card locations also known as aliases,
5674 // that essentially represent sets of places that alias each other. Places A
5675 // and B are said to alias each other if store into A can affect load from B.
5676 //
5677 // We distinguish the following aliases:
5678 //
5679 // - for fields
5680 // - *.f, *.@offs - field inside some object;
5681 // - X.f, X.@offs - field inside an allocated object X;
5682 // - for indexed accesses
5683 // - *[*] - non-constant index inside some object;
5684 // - *[C] - constant index inside some object;
5685 // - X[*] - non-constant index inside an allocated object X;
5686 // - X[C] - constant index inside an allocated object X.
5687 //
5688 // Separating allocations from other objects improves precision of the
5689 // load forwarding pass because of the following two properties:
5690 //
5691 // - if X can be proven to have no aliases itself (i.e. there is no other SSA
5692 // variable that points to X) then no place inside X can be aliased with any
5693 // wildcard dependent place (*.f, *.@offs, *[*], *[C]);
5694 // - given allocations X and Y no place inside X can be aliased with any place
5695 // inside Y even if any of them or both escape.
5696 //
5697 // It important to realize that single place can belong to multiple aliases.
5698 // For example place X.f with aliased allocation X belongs both to X.f and *.f
5699 // aliases. Likewise X[C] with non-aliased allocation X belongs to X[C] and X[*]
5700 // aliases.
5701 //
5767 class Place : public ValueObject { 5702 class Place : public ValueObject {
5768 public: 5703 public:
5769 enum Kind { 5704 enum Kind {
5770 kNone, 5705 kNone,
5771 5706
5772 // Field location. For instance fields is represented as a pair of a Field 5707 // Field location. For instance fields is represented as a pair of a Field
5773 // object and an instance (SSA definition) that is being accessed. 5708 // object and an instance (SSA definition) that is being accessed.
5774 // For static fields instance is NULL. 5709 // For static fields instance is NULL.
5775 kField, 5710 kField,
5776 5711
5777 // VMField location. Represented as a pair of an instance (SSA definition) 5712 // VMField location. Represented as a pair of an instance (SSA definition)
5778 // being accessed and offset to the field. 5713 // being accessed and offset to the field.
5779 kVMField, 5714 kVMField,
5780 5715
5781 // Indexed location. 5716 // Indexed location with a non-constant index.
5782 kIndexed, 5717 kIndexed,
5783 5718
5719 // Indexed location with a constant index.
5720 kConstantIndexed,
5721
5784 // Current context. 5722 // Current context.
5785 kContext 5723 kContext
5786 }; 5724 };
5787 5725
5788 Place(const Place& other) 5726 Place(const Place& other)
5789 : ValueObject(), 5727 : ValueObject(),
5790 kind_(other.kind_), 5728 kind_(other.kind_),
5791 representation_(other.representation_), 5729 representation_(other.representation_),
5792 instance_(other.instance_), 5730 instance_(other.instance_),
5793 raw_selector_(other.raw_selector_), 5731 raw_selector_(other.raw_selector_),
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
5845 case Instruction::kStoreStaticField: 5783 case Instruction::kStoreStaticField:
5846 kind_ = kField; 5784 kind_ = kField;
5847 representation_ = instr->AsStoreStaticField()-> 5785 representation_ = instr->AsStoreStaticField()->
5848 RequiredInputRepresentation(StoreStaticFieldInstr::kValuePos); 5786 RequiredInputRepresentation(StoreStaticFieldInstr::kValuePos);
5849 field_ = &instr->AsStoreStaticField()->field(); 5787 field_ = &instr->AsStoreStaticField()->field();
5850 *is_store = true; 5788 *is_store = true;
5851 break; 5789 break;
5852 5790
5853 case Instruction::kLoadIndexed: { 5791 case Instruction::kLoadIndexed: {
5854 LoadIndexedInstr* load_indexed = instr->AsLoadIndexed(); 5792 LoadIndexedInstr* load_indexed = instr->AsLoadIndexed();
5855 kind_ = kIndexed;
5856 representation_ = load_indexed->representation(); 5793 representation_ = load_indexed->representation();
5857 instance_ = load_indexed->array()->definition()->OriginalDefinition(); 5794 instance_ = load_indexed->array()->definition()->OriginalDefinition();
5858 index_ = load_indexed->index()->definition(); 5795 SetIndex(load_indexed->index()->definition());
5859 *is_load = true; 5796 *is_load = true;
5860 break; 5797 break;
5861 } 5798 }
5862 5799
5863 case Instruction::kStoreIndexed: { 5800 case Instruction::kStoreIndexed: {
5864 StoreIndexedInstr* store_indexed = instr->AsStoreIndexed(); 5801 StoreIndexedInstr* store_indexed = instr->AsStoreIndexed();
5865 kind_ = kIndexed;
5866 representation_ = store_indexed-> 5802 representation_ = store_indexed->
5867 RequiredInputRepresentation(StoreIndexedInstr::kValuePos); 5803 RequiredInputRepresentation(StoreIndexedInstr::kValuePos);
5868 instance_ = store_indexed->array()->definition()->OriginalDefinition(); 5804 instance_ = store_indexed->array()->definition()->OriginalDefinition();
5869 index_ = store_indexed->index()->definition(); 5805 SetIndex(store_indexed->index()->definition());
5870 *is_store = true; 5806 *is_store = true;
5871 break; 5807 break;
5872 } 5808 }
5873 5809
5874 case Instruction::kCurrentContext: 5810 case Instruction::kCurrentContext:
5875 kind_ = kContext; 5811 kind_ = kContext;
5876 ASSERT(instr->AsCurrentContext()->representation() == kTagged); 5812 ASSERT(instr->AsCurrentContext()->representation() == kTagged);
5877 representation_ = kTagged; 5813 representation_ = kTagged;
5878 *is_load = true; 5814 *is_load = true;
5879 break; 5815 break;
5880 5816
5881 case Instruction::kStoreContext: 5817 case Instruction::kStoreContext:
5882 kind_ = kContext; 5818 kind_ = kContext;
5883 ASSERT(instr->AsStoreContext()->RequiredInputRepresentation( 5819 ASSERT(instr->AsStoreContext()->RequiredInputRepresentation(
5884 StoreContextInstr::kValuePos) == kTagged); 5820 StoreContextInstr::kValuePos) == kTagged);
5885 representation_ = kTagged; 5821 representation_ = kTagged;
5886 *is_store = true; 5822 *is_store = true;
5887 break; 5823 break;
5888 5824
5889 default: 5825 default:
5890 break; 5826 break;
5891 } 5827 }
5892 } 5828 }
5893 5829
5830 // Create object representing *[*] alias.
5831 static Place* CreateAnyInstanceAnyIndexAlias(Isolate* isolate,
5832 intptr_t id) {
5833 return Wrap(isolate, Place(kIndexed, NULL, 0), id);
5834 }
5835
5836 // Return least generic alias for this place. Given that aliases are
5837 // essentially sets of places we define least generic alias as a smallest
5838 // alias that contains this place.
5839 //
5840 // We obtain such alias by a simple transformation:
5841 //
5842 // - for places that depend on an instance X.f, X.@offs, X[i], X[C]
5843 // we drop X if X is not an allocation because in this case X does not
5844 // posess an identity obtaining aliases *.f, *.@offs, *[i] and *[C]
5845 // respectively;
5846 // - for non-constant indexed places X[i] we drop information about the
5847 // index obtaining alias X[*].
5848 //
5849 Place ToAlias() const {
5850 return Place(
5851 kind_,
5852 (DependsOnInstance() && IsAllocation(instance())) ? instance() : NULL,
5853 (kind() == kIndexed) ? 0 : raw_selector_);
5854 }
5855
5856 bool DependsOnInstance() const {
5857 switch (kind()) {
5858 case kField:
5859 case kVMField:
5860 case kIndexed:
5861 case kConstantIndexed:
5862 return true;
5863
5864 case kContext:
5865 case kNone:
5866 return false;
5867 }
5868
5869 UNREACHABLE();
5870 return false;
5871 }
5872
5873 // Given instance dependent alias X.f, X.@offs, X[C], X[*] return
5874 // wild-card dependent alias *.f, *.@offs, *[C] or *[*] respectively.
5875 Place CopyWithoutInstance() const {
5876 ASSERT(DependsOnInstance());
5877 return Place(kind_, NULL, raw_selector_);
5878 }
5879
5880 // Given alias X[C] or *[C] return X[*] and *[*] respectively.
5881 Place CopyWithoutIndex() const {
5882 ASSERT(kind_ == kConstantIndexed);
5883 return Place(kIndexed, instance_, 0);
5884 }
5885
5894 intptr_t id() const { return id_; } 5886 intptr_t id() const { return id_; }
5895 void set_id(intptr_t id) { id_ = id; }
5896 5887
5897 Kind kind() const { return kind_; } 5888 Kind kind() const { return kind_; }
5898 5889
5899 Representation representation() const { return representation_; } 5890 Representation representation() const { return representation_; }
5900 5891
5901 Definition* instance() const { 5892 Definition* instance() const {
5902 ASSERT((kind_ == kField) || (kind_ == kVMField) || (kind_ == kIndexed)); 5893 ASSERT(DependsOnInstance());
5903 return instance_; 5894 return instance_;
5904 } 5895 }
5905 5896
5906 void set_instance(Definition* def) { 5897 void set_instance(Definition* def) {
5907 ASSERT((kind_ == kField) || (kind_ == kVMField) || (kind_ == kIndexed)); 5898 ASSERT(DependsOnInstance());
5908 instance_ = def->OriginalDefinition(); 5899 instance_ = def->OriginalDefinition();
5909 } 5900 }
5910 5901
5911 const Field& field() const { 5902 const Field& field() const {
5912 ASSERT(kind_ == kField); 5903 ASSERT(kind_ == kField);
5913 return *field_; 5904 return *field_;
5914 } 5905 }
5915 5906
5916 intptr_t offset_in_bytes() const { 5907 intptr_t offset_in_bytes() const {
5917 ASSERT(kind_ == kVMField); 5908 ASSERT(kind_ == kVMField);
5918 return offset_in_bytes_; 5909 return offset_in_bytes_;
5919 } 5910 }
5920 5911
5921 Definition* index() const { 5912 Definition* index() const {
5922 ASSERT(kind_ == kIndexed); 5913 ASSERT(kind_ == kIndexed);
5923 return index_; 5914 return index_;
5924 } 5915 }
5925 5916
5917 intptr_t index_constant() const {
5918 ASSERT(kind_ == kConstantIndexed);
5919 return index_constant_;
5920 }
5921
5922 static const char* DefinitionName(Definition* def) {
5923 if (def == NULL) {
5924 return "*";
5925 } else {
5926 return Isolate::Current()->current_zone()->PrintToString(
5927 "v%" Pd, def->ssa_temp_index());
5928 }
5929 }
5930
5926 const char* ToCString() const { 5931 const char* ToCString() const {
5927 switch (kind_) { 5932 switch (kind_) {
5928 case kNone: 5933 case kNone:
5929 return "<none>"; 5934 return "<none>";
5930 5935
5931 case kField: { 5936 case kField: {
5932 const char* field_name = String::Handle(field().name()).ToCString(); 5937 const char* field_name = String::Handle(field().name()).ToCString();
5933 if (instance() == NULL) { 5938 if (field().is_static()) {
5934 return field_name; 5939 return Isolate::Current()->current_zone()->PrintToString(
5940 "<%s>", field_name);
5941 } else {
5942 return Isolate::Current()->current_zone()->PrintToString(
5943 "<%s.%s>", DefinitionName(instance()), field_name);
5935 } 5944 }
5936 return Isolate::Current()->current_zone()->PrintToString(
5937 "<v%" Pd ".%s>", instance()->ssa_temp_index(), field_name);
5938 } 5945 }
5939 5946
5940 case kVMField: { 5947 case kVMField:
5941 return Isolate::Current()->current_zone()->PrintToString( 5948 return Isolate::Current()->current_zone()->PrintToString(
5942 "<v%" Pd "@%" Pd ">", 5949 "<%s.@%" Pd ">",
5943 instance()->ssa_temp_index(), offset_in_bytes()); 5950 DefinitionName(instance()),
5944 } 5951 offset_in_bytes());
5945 5952
5946 case kIndexed: { 5953 case kIndexed:
5947 return Isolate::Current()->current_zone()->PrintToString( 5954 return Isolate::Current()->current_zone()->PrintToString(
5948 "<v%" Pd "[v%" Pd "]>", 5955 "<%s[%s]>",
5949 instance()->ssa_temp_index(), 5956 DefinitionName(instance()),
5950 index()->ssa_temp_index()); 5957 DefinitionName(index()));
5951 } 5958
5959 case kConstantIndexed:
5960 return Isolate::Current()->current_zone()->PrintToString(
5961 "<%s[%" Pd "]>",
5962 DefinitionName(instance()),
5963 index_constant());
5952 5964
5953 case kContext: 5965 case kContext:
5954 return "<context>"; 5966 return "<context>";
5955 } 5967 }
5956 UNREACHABLE(); 5968 UNREACHABLE();
5957 return "<?>"; 5969 return "<?>";
5958 } 5970 }
5959 5971
5960 bool IsFinalField() const { 5972 bool IsFinalField() const {
5961 return (kind() == kField) && field().is_final(); 5973 return (kind() == kField) && field().is_final();
5962 } 5974 }
5963 5975
5964 intptr_t Hashcode() const { 5976 intptr_t Hashcode() const {
5965 return (kind_ * 63 + reinterpret_cast<intptr_t>(instance_)) * 31 + 5977 return (kind_ * 63 + reinterpret_cast<intptr_t>(instance_)) * 31 +
5966 representation_ * 15 + FieldHashcode(); 5978 representation_ * 15 + FieldHashcode();
5967 } 5979 }
5968 5980
5969 bool Equals(Place* other) const { 5981 bool Equals(const Place* other) const {
5970 return (kind_ == other->kind_) && 5982 return (kind_ == other->kind_) &&
5971 (representation_ == other->representation_) && 5983 (representation_ == other->representation_) &&
5972 (instance_ == other->instance_) && 5984 (instance_ == other->instance_) &&
5973 SameField(other); 5985 SameField(other);
5974 } 5986 }
5975 5987
5976 // Create a zone allocated copy of this place. 5988 // Create a zone allocated copy of this place and assign given id to it.
5977 static Place* Wrap(Isolate* isolate, const Place& place); 5989 static Place* Wrap(Isolate* isolate, const Place& place, intptr_t id);
5990
5991 static bool IsAllocation(Definition* defn) {
5992 // TODO(vegorov): add CreateContext to this list.
5993 return (defn != NULL) &&
5994 (defn->IsAllocateObject() ||
5995 defn->IsCreateArray() ||
5996 (defn->IsStaticCall() &&
5997 defn->AsStaticCall()->IsRecognizedFactory()));
5998 }
5978 5999
5979 private: 6000 private:
5980 bool SameField(Place* other) const { 6001 Place(Kind kind, Definition* instance, intptr_t selector)
6002 : kind_(kind),
6003 representation_(kNoRepresentation),
6004 instance_(instance),
6005 raw_selector_(selector),
6006 id_(0) {
6007 }
6008
6009 bool SameField(const Place* other) const {
5981 return (kind_ == kField) ? (field().raw() == other->field().raw()) 6010 return (kind_ == kField) ? (field().raw() == other->field().raw())
5982 : (offset_in_bytes_ == other->offset_in_bytes_); 6011 : (offset_in_bytes_ == other->offset_in_bytes_);
5983 } 6012 }
5984 6013
5985 intptr_t FieldHashcode() const { 6014 intptr_t FieldHashcode() const {
5986 return (kind_ == kField) ? reinterpret_cast<intptr_t>(field().raw()) 6015 return (kind_ == kField) ? reinterpret_cast<intptr_t>(field().raw())
5987 : offset_in_bytes_; 6016 : offset_in_bytes_;
5988 } 6017 }
5989 6018
6019 void SetIndex(Definition* index) {
6020 ConstantInstr* index_constant = index->AsConstant();
6021 if ((index_constant != NULL) && index_constant->value().IsSmi()) {
6022 kind_ = kConstantIndexed;
6023 index_constant_ = Smi::Cast(index_constant->value()).Value();
6024 } else {
6025 kind_ = kIndexed;
6026 index_ = index;
6027 }
6028 }
6029
5990 Kind kind_; 6030 Kind kind_;
5991 Representation representation_; 6031 Representation representation_;
5992 Definition* instance_; 6032 Definition* instance_;
5993 union { 6033 union {
5994 intptr_t raw_selector_; 6034 intptr_t raw_selector_;
5995 const Field* field_; 6035 const Field* field_;
5996 intptr_t offset_in_bytes_; 6036 intptr_t offset_in_bytes_;
6037 intptr_t index_constant_;
5997 Definition* index_; 6038 Definition* index_;
5998 }; 6039 };
5999 6040
6000 intptr_t id_; 6041 intptr_t id_;
6001 }; 6042 };
6002 6043
6003 6044
6004 class ZonePlace : public ZoneAllocated { 6045 class ZonePlace : public ZoneAllocated {
6005 public: 6046 public:
6006 explicit ZonePlace(const Place& place) : place_(place) { } 6047 explicit ZonePlace(const Place& place) : place_(place) { }
6007 6048
6008 Place* place() { return &place_; } 6049 Place* place() { return &place_; }
6009 6050
6010 private: 6051 private:
6011 Place place_; 6052 Place place_;
6012 }; 6053 };
6013 6054
6014 6055
6015 Place* Place::Wrap(Isolate* isolate, const Place& place) { 6056 Place* Place::Wrap(Isolate* isolate, const Place& place, intptr_t id) {
6016 return (new(isolate) ZonePlace(place))->place(); 6057 Place* wrapped = (new(isolate) ZonePlace(place))->place();
6058 wrapped->id_ = id;
6059 return wrapped;
6017 } 6060 }
6018 6061
6019 6062
6020 // Correspondence between places connected through outgoing phi moves on the 6063 // Correspondence between places connected through outgoing phi moves on the
6021 // edge that targets join. 6064 // edge that targets join.
6022 class PhiPlaceMoves : public ZoneAllocated { 6065 class PhiPlaceMoves : public ZoneAllocated {
6023 public: 6066 public:
6024 // Record a move from the place with id |from| to the place with id |to| at 6067 // Record a move from the place with id |from| to the place with id |to| at
6025 // the given block. 6068 // the given block.
6026 void CreateOutgoingMove(Isolate* isolate, 6069 void CreateOutgoingMove(Isolate* isolate,
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
6066 // carries a set of places that can be aliased by side-effects, essentially 6109 // carries a set of places that can be aliased by side-effects, essentially
6067 // those that are affected by calls. 6110 // those that are affected by calls.
6068 class AliasedSet : public ZoneAllocated { 6111 class AliasedSet : public ZoneAllocated {
6069 public: 6112 public:
6070 AliasedSet(Isolate* isolate, 6113 AliasedSet(Isolate* isolate,
6071 ZoneGrowableArray<Place*>* places, 6114 ZoneGrowableArray<Place*>* places,
6072 PhiPlaceMoves* phi_moves) 6115 PhiPlaceMoves* phi_moves)
6073 : isolate_(isolate), 6116 : isolate_(isolate),
6074 places_(*places), 6117 places_(*places),
6075 phi_moves_(phi_moves), 6118 phi_moves_(phi_moves),
6076 sets_(), 6119 aliases_(5),
6077 aliased_by_effects_(new(isolate) BitVector(places->length())), 6120 aliases_map_(),
6078 max_field_id_(0), 6121 representatives_(),
6079 field_ids_(), 6122 killed_(),
6080 max_index_id_(0), 6123 aliased_by_effects_(new(isolate) BitVector(places->length())) {
6081 index_ids_(), 6124 InsertAlias(Place::CreateAnyInstanceAnyIndexAlias(isolate_,
6082 max_unknown_index_id_(0), 6125 kAnyInstanceAnyIndexAlias));
6083 unknown_index_ids_(), 6126 for (intptr_t i = 0; i < places_.length(); i++) {
6084 max_vm_field_id_(0), 6127 AddRepresentative(places_[i]);
6085 vm_field_ids_() { }
6086
6087 Alias ComputeAlias(Place* place) {
6088 switch (place->kind()) {
6089 case Place::kIndexed:
6090 if (place->index()->IsConstant()) {
6091 const Object& index = place->index()->AsConstant()->value();
6092 if (index.IsSmi()) {
6093 return Alias::ConstantIndex(
6094 GetInstanceIndexId(place->instance(),
6095 Smi::Cast(index).Value()));
6096 }
6097 }
6098 return Alias::UnknownIndex(GetUnknownIndexId(place->instance()));
6099 case Place::kField:
6100 return Alias::Field(
6101 GetInstanceFieldId(place->instance(), place->field()));
6102 case Place::kVMField:
6103 return Alias::VMField(
6104 GetInstanceVMFieldId(place->instance(), place->offset_in_bytes()));
6105 case Place::kContext:
6106 return Alias::CurrentContext();
6107 case Place::kNone:
6108 UNREACHABLE();
6109 } 6128 }
6110 6129 ComputeKillSets();
6111 UNREACHABLE();
6112 return Alias::None();
6113 } 6130 }
6114 6131
6115 Alias ComputeAliasForStore(Instruction* instr) { 6132 intptr_t LookupAliasId(const Place& alias) {
6116 StoreIndexedInstr* store_indexed = instr->AsStoreIndexed(); 6133 const Place* result = aliases_map_.Lookup(&alias);
6117 if (store_indexed != NULL) { 6134 return (result != NULL) ? result->id() : kNoAlias;
6118 Definition* instance = store_indexed->array()->definition();
6119 if (store_indexed->index()->definition()->IsConstant()) {
6120 const Object& index =
6121 store_indexed->index()->definition()->AsConstant()->value();
6122 if (index.IsSmi()) {
6123 return Alias::ConstantIndex(
6124 GetInstanceIndexId(instance, Smi::Cast(index).Value()));
6125 }
6126 }
6127 return Alias::UnknownIndex(GetUnknownIndexId(instance));
6128 }
6129
6130 StoreInstanceFieldInstr* store_instance_field =
6131 instr->AsStoreInstanceField();
6132 if (store_instance_field != NULL) {
6133 Definition* instance = store_instance_field->instance()->definition();
6134 if (!store_instance_field->field().IsNull()) {
6135 return Alias::Field(
6136 GetInstanceFieldId(instance, store_instance_field->field()));
6137 }
6138 return Alias::VMField(
6139 GetInstanceVMFieldId(instance,
6140 store_instance_field->offset_in_bytes()));
6141 }
6142
6143 if (instr->IsStoreContext()) {
6144 return Alias::CurrentContext();
6145 }
6146
6147 StoreStaticFieldInstr* store_static_field = instr->AsStoreStaticField();
6148 if (store_static_field != NULL) {
6149 return Alias::Field(GetStaticFieldId(store_static_field->field()));
6150 }
6151
6152 return Alias::None();
6153 } 6135 }
6154 6136
6155 BitVector* Get(const Alias alias) { 6137 bool IsStore(Instruction* instr, BitVector** killed) {
6156 const intptr_t idx = alias.ToIndex(); 6138 bool is_load = false, is_store = false;
6157 BitVector* ret = (idx < sets_.length()) ? sets_[idx] : NULL; 6139 Place place(instr, &is_load, &is_store);
6158 return ret; 6140 if (is_store && (place.kind() != Place::kNone)) {
6141 const intptr_t alias_id = LookupAliasId(place.ToAlias());
6142 if (alias_id != kNoAlias) {
6143 *killed = GetKilledSet(alias_id);
6144 }
6145 }
6146 return is_store;
6159 } 6147 }
6160 6148
6161 void AddRepresentative(Place* place) { 6149 BitVector* GetKilledSet(intptr_t alias) {
6162 if (!place->IsFinalField()) { 6150 return (alias < killed_.length()) ? killed_[alias] : NULL;
6163 AddIdForAlias(ComputeAlias(place), place->id());
6164 if (!IsIndependentFromEffects(place)) {
6165 aliased_by_effects_->Add(place->id());
6166 }
6167 }
6168 }
6169
6170 void EnsureAliasingForUnknownIndices() {
6171 // Ids start at 1 because the hash-map uses 0 for element not found.
6172 for (intptr_t unknown_index_id = 1;
6173 unknown_index_id <= max_unknown_index_id_;
6174 unknown_index_id++) {
6175 BitVector* unknown_index = Get(Alias::UnknownIndex(unknown_index_id));
6176 if (unknown_index == NULL) {
6177 return;
6178 }
6179
6180 // Constant indexes alias all non-constant indexes.
6181 // Non-constant indexes alias all constant indexes.
6182 // First update alias set for const-indices, then
6183 // update set for all indices. Ids start at 1.
6184 for (intptr_t id = 1; id <= max_index_id_; id++) {
6185 BitVector* const_indexes = Get(Alias::ConstantIndex(id));
6186 if (const_indexes != NULL) {
6187 const_indexes->AddAll(unknown_index);
6188 }
6189 }
6190
6191 for (intptr_t id = 1; id <= max_index_id_; id++) {
6192 BitVector* const_indexes = Get(Alias::ConstantIndex(id));
6193 if (const_indexes != NULL) {
6194 unknown_index->AddAll(const_indexes);
6195 }
6196 }
6197 }
6198 }
6199
6200 void AddIdForAlias(const Alias alias, intptr_t place_id) {
6201 const intptr_t idx = alias.ToIndex();
6202 while (sets_.length() <= idx) {
6203 sets_.Add(NULL);
6204 }
6205
6206 if (sets_[idx] == NULL) {
6207 sets_[idx] = new(isolate_) BitVector(max_place_id());
6208 }
6209
6210 sets_[idx]->Add(place_id);
6211 } 6151 }
6212 6152
6213 intptr_t max_place_id() const { return places().length(); } 6153 intptr_t max_place_id() const { return places().length(); }
6214 bool IsEmpty() const { return max_place_id() == 0; } 6154 bool IsEmpty() const { return max_place_id() == 0; }
6215 6155
6216 BitVector* aliased_by_effects() const { return aliased_by_effects_; } 6156 BitVector* aliased_by_effects() const { return aliased_by_effects_; }
6217 6157
6218 const ZoneGrowableArray<Place*>& places() const { 6158 const ZoneGrowableArray<Place*>& places() const {
6219 return places_; 6159 return places_;
6220 } 6160 }
6221 6161
6222 void PrintSet(BitVector* set) { 6162 void PrintSet(BitVector* set) {
6223 bool comma = false; 6163 bool comma = false;
6224 for (BitVector::Iterator it(set); 6164 for (BitVector::Iterator it(set);
6225 !it.Done(); 6165 !it.Done();
6226 it.Advance()) { 6166 it.Advance()) {
6227 if (comma) { 6167 if (comma) {
6228 OS::Print(", "); 6168 OS::Print(", ");
6229 } 6169 }
6230 OS::Print("%s", places_[it.Current()]->ToCString()); 6170 OS::Print("%s", places_[it.Current()]->ToCString());
6231 comma = true; 6171 comma = true;
6232 } 6172 }
6233 } 6173 }
6234 6174
6235 const PhiPlaceMoves* phi_moves() const { return phi_moves_; } 6175 const PhiPlaceMoves* phi_moves() const { return phi_moves_; }
6236 6176
6237 // Returns true if the result of an allocation instruction can be aliased by 6177 void RollbackAliasedIdentites() {
6238 // some other SSA variable and false otherwise. Currently simply checks if 6178 for (intptr_t i = 0; i < identity_rollback_.length(); ++i) {
6239 // this value is stored in a field, escapes to another function or 6179 identity_rollback_[i]->SetIdentity(AliasIdentity::Unknown());
6240 // participates in a phi. 6180 }
6241 static bool CanBeAliased(Definition* alloc) { 6181 }
6242 ASSERT(alloc->IsAllocateObject() || 6182
6243 alloc->IsCreateArray() || 6183 // Returns false if the result of an allocation instruction can't be aliased
6244 (alloc->IsStaticCall() && 6184 // by another SSA variable and true otherwise.
6245 alloc->AsStaticCall()->IsRecognizedFactory())); 6185 bool CanBeAliased(Definition* alloc) {
6246 if (alloc->Identity() == kIdentityUnknown) { 6186 if (!Place::IsAllocation(alloc)) {
6247 bool escapes = false; 6187 return true;
6248 for (Value* use = alloc->input_use_list(); 6188 }
6249 use != NULL; 6189
6250 use = use->next_use()) { 6190 if (alloc->Identity().IsUnknown()) {
6251 Instruction* instr = use->instruction(); 6191 ComputeAliasing(alloc);
6252 if (instr->IsPushArgument() || 6192 }
6253 (instr->IsStoreInstanceField() 6193
6254 && (use->use_index() != StoreInstanceFieldInstr::kInstancePos)) || 6194 return !alloc->Identity().IsNotAliased();
6255 (instr->IsStoreIndexed() 6195 }
6256 && (use->use_index() == StoreIndexedInstr::kValuePos)) || 6196
6257 instr->IsStoreStaticField() || 6197 private:
6258 instr->IsPhi() || 6198 enum {
6259 instr->IsAssertAssignable() || 6199 kNoAlias = 0,
6260 instr->IsRedefinition()) { 6200
6261 escapes = true; 6201 // Artificial alias that is used to collect all representatives of the
6262 break; 6202 // *[C], X[C] aliases for arbitrary C.
6263 } 6203 kAnyConstantIndexedAlias = 1,
6204
6205 // Artificial alias that is used to collect all representatives of
6206 // *[C] alias for arbitrary C.
6207 kUnknownInstanceConstantIndexedAlias = 2,
6208
6209 // Artificial alias that is used to collect all representatives of
6210 // X[*] alias for all X.
6211 kAnyAllocationIndexedAlias = 3,
6212
6213 // *[*] alias.
6214 kAnyInstanceAnyIndexAlias = 4
6215 };
6216
6217 // Compute least generic alias for the place and assign alias id to it.
6218 void AddRepresentative(Place* place) {
6219 if (!place->IsFinalField()) {
6220 const Place* alias = CanonicalizeAlias(place->ToAlias());
6221 EnsureSet(&representatives_, alias->id())->Add(place->id());
6222
6223 // Update cumulative representative sets that are used during
6224 // killed sets computation.
6225 if (alias->kind() == Place::kConstantIndexed) {
6226 if (CanBeAliased(alias->instance())) {
6227 EnsureSet(&representatives_, kAnyConstantIndexedAlias)->
6228 Add(place->id());
6229 }
6230
6231 if (alias->instance() == NULL) {
6232 EnsureSet(&representatives_, kUnknownInstanceConstantIndexedAlias)->
6233 Add(place->id());
6234 }
6235 } else if ((alias->kind() == Place::kIndexed) &&
6236 CanBeAliased(place->instance())) {
6237 EnsureSet(&representatives_, kAnyAllocationIndexedAlias)->
6238 Add(place->id());
6264 } 6239 }
6265 6240
6266 alloc->SetIdentity(escapes ? kIdentityAliased : kIdentityNotAliased); 6241 if (!IsIndependentFromEffects(place)) {
6267 } 6242 aliased_by_effects_->Add(place->id());
6268
6269 return alloc->Identity() != kIdentityNotAliased;
6270 }
6271
6272 private:
6273 // Get id assigned to the given field. Assign a new id if the field is seen
6274 // for the first time.
6275 intptr_t GetFieldId(intptr_t instance_id, const Field& field) {
6276 intptr_t id = field_ids_.Lookup(FieldIdPair::Key(instance_id, &field));
6277 if (id == 0) {
6278 id = ++max_field_id_;
6279 field_ids_.Insert(FieldIdPair(FieldIdPair::Key(instance_id, &field), id));
6280 }
6281 return id;
6282 }
6283
6284 intptr_t GetIndexId(intptr_t instance_id, intptr_t index) {
6285 intptr_t id = index_ids_.Lookup(
6286 ConstantIndexIdPair::Key(instance_id, index));
6287 if (id == 0) {
6288 // Zero is used to indicate element not found. The first id is one.
6289 id = ++max_index_id_;
6290 index_ids_.Insert(ConstantIndexIdPair(
6291 ConstantIndexIdPair::Key(instance_id, index), id));
6292 }
6293 return id;
6294 }
6295
6296 enum {
6297 kAnyInstance = -1
6298 };
6299
6300 // Get or create an identifier for an instance field belonging to the
6301 // given instance.
6302 // The space of identifiers assigned to instance fields is split into
6303 // parts based on the instance that contains the field.
6304 // If compiler can prove that instance has a single SSA name in the compiled
6305 // function then we use that SSA name to distinguish fields of this object
6306 // from the same fields in other objects.
6307 // If multiple SSA names can point to the same object then we use
6308 // kAnyInstance instead of a concrete SSA name.
6309 intptr_t GetInstanceFieldId(Definition* defn, const Field& field) {
6310 ASSERT(field.is_static() == (defn == NULL));
6311
6312 intptr_t instance_id = kAnyInstance;
6313
6314 if (defn != NULL) {
6315 AllocateObjectInstr* alloc = defn->AsAllocateObject();
6316 if ((alloc != NULL) && !CanBeAliased(alloc)) {
6317 instance_id = alloc->ssa_temp_index();
6318 ASSERT(instance_id != kAnyInstance);
6319 } 6243 }
6320 } 6244 }
6321 6245 }
6322 return GetFieldId(instance_id, field); 6246
6323 } 6247 void ComputeKillSets() {
6324 6248 for (intptr_t i = 0; i < aliases_.length(); ++i) {
6325 intptr_t GetInstanceVMFieldId(Definition* defn, intptr_t offset) { 6249 const Place* alias = aliases_[i];
6326 intptr_t instance_id = kAnyInstance; 6250 // Add all representatives to the kill set.
6327 6251 AddAllRepresentatives(alias->id(), alias->id());
6328 ASSERT(defn != NULL); 6252 ComputeKillSet(alias);
6329 if ((defn->IsAllocateObject() || 6253 }
6330 defn->IsCreateArray() || 6254
6331 (defn->IsStaticCall() && 6255 if (FLAG_trace_load_optimization) {
6332 defn->AsStaticCall()->IsRecognizedFactory())) && 6256 OS::Print("Aliases KILL sets:\n");
6333 !CanBeAliased(defn)) { 6257 for (intptr_t i = 0; i < aliases_.length(); ++i) {
6334 instance_id = defn->ssa_temp_index(); 6258 const Place* alias = aliases_[i];
6335 ASSERT(instance_id != kAnyInstance); 6259 BitVector* kill = GetKilledSet(alias->id());
6336 } 6260
6337 6261 OS::Print("%s: ", alias->ToCString());
6338 intptr_t id = vm_field_ids_.Lookup(VMFieldIdPair::Key(instance_id, offset)); 6262 if (kill != NULL) {
6339 if (id == 0) { 6263 PrintSet(kill);
6340 id = ++max_vm_field_id_; 6264 }
6341 vm_field_ids_.Insert( 6265 OS::Print("\n");
6342 VMFieldIdPair(VMFieldIdPair::Key(instance_id, offset), id)); 6266 }
6343 } 6267 }
6344 return id; 6268 }
6345 } 6269
6346 6270 void InsertAlias(const Place* alias) {
6347 intptr_t GetInstanceIndexId(Definition* defn, intptr_t index) { 6271 aliases_map_.Insert(alias);
6348 intptr_t instance_id = kAnyInstance; 6272 aliases_.Add(alias);
6349 6273 }
6350 ASSERT(defn != NULL); 6274
6351 if ((defn->IsCreateArray() || 6275 const Place* CanonicalizeAlias(const Place& alias) {
6352 (defn->IsStaticCall() && 6276 const Place* canonical = aliases_map_.Lookup(&alias);
6353 defn->AsStaticCall()->IsRecognizedFactory())) && 6277 if (canonical == NULL) {
6354 !CanBeAliased(defn)) { 6278 canonical = Place::Wrap(isolate_,
6355 instance_id = defn->ssa_temp_index(); 6279 alias,
6356 ASSERT(instance_id != kAnyInstance); 6280 kAnyInstanceAnyIndexAlias + aliases_.length());
6357 } 6281 InsertAlias(canonical);
6358 6282 }
6359 return GetIndexId(instance_id, index); 6283 return canonical;
6360 } 6284 }
6361 6285
6362 intptr_t GetUnknownIndexId(Definition* defn) { 6286 BitVector* GetRepresentativesSet(intptr_t alias) {
6363 intptr_t instance_id = kAnyInstance; 6287 return (alias < representatives_.length()) ? representatives_[alias] : NULL;
6364 6288 }
6365 ASSERT(defn != NULL); 6289
6366 if ((defn->IsCreateArray() || 6290 BitVector* EnsureSet(GrowableArray<BitVector*>* sets,
6367 (defn->IsStaticCall() && 6291 intptr_t alias) {
6368 defn->AsStaticCall()->IsRecognizedFactory())) && 6292 while (sets->length() <= alias) {
6369 !CanBeAliased(defn)) { 6293 sets->Add(NULL);
6370 instance_id = defn->ssa_temp_index(); 6294 }
6371 ASSERT(instance_id != kAnyInstance); 6295
6372 } 6296 BitVector* set = (*sets)[alias];
6373 6297 if (set == NULL) {
6374 intptr_t id = unknown_index_ids_.Lookup( 6298 (*sets)[alias] = set = new(isolate_) BitVector(max_place_id());
6375 UnknownIndexIdPair::Key(instance_id)); 6299 }
6376 if (id == 0) { 6300 return set;
6377 // Zero is used to indicate element not found. The first id is one. 6301 }
6378 id = ++max_unknown_index_id_; 6302
6379 unknown_index_ids_.Insert( 6303 void AddAllRepresentatives(const Place* to, intptr_t from) {
6380 UnknownIndexIdPair(UnknownIndexIdPair::Key(instance_id), id)); 6304 AddAllRepresentatives(to->id(), from);
6381 } 6305 }
6382 return id; 6306
6383 } 6307 void AddAllRepresentatives(intptr_t to, intptr_t from) {
6384 6308 BitVector* from_set = GetRepresentativesSet(from);
6385 // Get or create an identifier for a static field. 6309 if (from_set != NULL) {
6386 intptr_t GetStaticFieldId(const Field& field) { 6310 EnsureSet(&killed_, to)->AddAll(from_set);
6387 ASSERT(field.is_static()); 6311 }
6388 return GetFieldId(kAnyInstance, field); 6312 }
6313
6314 void CrossAlias(const Place* to, const Place& from) {
6315 const intptr_t from_id = LookupAliasId(from);
6316 if (from_id == kNoAlias) {
6317 return;
6318 }
6319 CrossAlias(to, from_id);
6320 }
6321
6322 void CrossAlias(const Place* to, intptr_t from) {
6323 AddAllRepresentatives(to->id(), from);
6324 AddAllRepresentatives(from, to->id());
6325 }
6326
6327 // When computing kill sets we let less generic alias insert its
6328 // representatives into more generic alias'es kill set. For example
6329 // when visiting alias X[*] instead of searching for all aliases X[C]
6330 // and inserting their representatives into kill set for X[*] we update
6331 // kill set for X[*] each time we visit new X[C] for some C.
6332 // There is an exception however: if both aliases are parametric like *[C]
6333 // and X[*] which cross alias when X is an aliased allocation then we use
6334 // artificial aliases that contain all possible representatives for the given
6335 // alias for any value of the parameter to compute resulting kill set.
6336 void ComputeKillSet(const Place* alias) {
6337 switch (alias->kind()) {
6338 case Place::kIndexed: // Either *[*] or X[*] alias.
6339 if (alias->instance() == NULL) {
6340 // *[*] aliases with X[*], X[C], *[C].
6341 AddAllRepresentatives(alias, kAnyConstantIndexedAlias);
6342 AddAllRepresentatives(alias, kAnyAllocationIndexedAlias);
6343 } else if (CanBeAliased(alias->instance())) {
6344 // X[*] aliases with X[C].
6345 // If X can be aliased then X[*] also aliases with *[C], *[*].
6346 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
6347 AddAllRepresentatives(alias, kUnknownInstanceConstantIndexedAlias);
6348 }
6349 break;
6350
6351 case Place::kConstantIndexed: // Either X[C] or *[C] alias.
6352 if (alias->instance() == NULL) {
6353 // *[C] aliases with X[C], X[*], *[*].
6354 AddAllRepresentatives(alias, kAnyAllocationIndexedAlias);
6355 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
6356 } else {
6357 // X[C] aliases with X[*].
6358 // If X can be aliased then X[C] also aliases with *[C], *[*].
6359 CrossAlias(alias, alias->CopyWithoutIndex());
6360 if (CanBeAliased(alias->instance())) {
6361 CrossAlias(alias, alias->CopyWithoutInstance());
6362 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
6363 }
6364 }
6365 break;
6366
6367 case Place::kField:
6368 case Place::kVMField:
6369 if (CanBeAliased(alias->instance())) {
6370 // X.f or X.@offs alias with *.f and *.@offs respectively.
6371 CrossAlias(alias, alias->CopyWithoutInstance());
6372 }
6373
6374 case Place::kContext:
6375 return;
6376
6377 case Place::kNone:
6378 UNREACHABLE();
6379 }
6389 } 6380 }
6390 6381
6391 // Returns true if the given load is unaffected by external side-effects. 6382 // Returns true if the given load is unaffected by external side-effects.
6392 // This essentially means that no stores to the same location can 6383 // This essentially means that no stores to the same location can
6393 // occur in other functions. 6384 // occur in other functions.
6394 bool IsIndependentFromEffects(Place* place) { 6385 bool IsIndependentFromEffects(Place* place) {
6395 if (place->IsFinalField()) { 6386 if (place->IsFinalField()) {
6396 // Note that we can't use LoadField's is_immutable attribute here because 6387 // Note that we can't use LoadField's is_immutable attribute here because
6397 // some VM-fields (those that have no corresponding Field object and 6388 // some VM-fields (those that have no corresponding Field object and
6398 // accessed through offset alone) can share offset but have different 6389 // accessed through offset alone) can share offset but have different
6399 // immutability properties. 6390 // immutability properties.
6400 // One example is the length property of growable and fixed size list. If 6391 // One example is the length property of growable and fixed size list. If
6401 // loads of these two properties occur in the same function for the same 6392 // loads of these two properties occur in the same function for the same
6402 // receiver then they will get the same expression number. However 6393 // receiver then they will get the same expression number. However
6403 // immutability of the length of fixed size list does not mean that 6394 // immutability of the length of fixed size list does not mean that
6404 // growable list also has immutable property. Thus we will make a 6395 // growable list also has immutable property. Thus we will make a
6405 // conservative assumption for the VM-properties. 6396 // conservative assumption for the VM-properties.
6406 // TODO(vegorov): disambiguate immutable and non-immutable VM-fields with 6397 // TODO(vegorov): disambiguate immutable and non-immutable VM-fields with
6407 // the same offset e.g. through recognized kind. 6398 // the same offset e.g. through recognized kind.
6408 return true; 6399 return true;
6409 } 6400 }
6410 6401
6411 if (((place->kind() == Place::kField) || 6402 return ((place->kind() == Place::kField) ||
6412 (place->kind() == Place::kVMField)) && 6403 (place->kind() == Place::kVMField)) &&
6413 (place->instance() != NULL)) { 6404 !CanBeAliased(place->instance());
6414 AllocateObjectInstr* alloc = place->instance()->AsAllocateObject(); 6405 }
6415 return (alloc != NULL) && !CanBeAliased(alloc); 6406
6407 // Returns true if there are direct loads from the given place.
6408 bool HasLoadsFromPlace(Definition* defn, const Place* place) {
6409 ASSERT((place->kind() == Place::kField) ||
6410 (place->kind() == Place::kVMField));
6411 ASSERT(place->instance() == defn);
6412
6413 for (Value* use = defn->input_use_list();
6414 use != NULL;
6415 use = use->next_use()) {
6416 bool is_load = false, is_store;
6417 Place load_place(use->instruction(), &is_load, &is_store);
6418
6419 if (is_load && load_place.Equals(place)) {
6420 return true;
6421 }
6416 } 6422 }
6417 6423
6418 return false; 6424 return false;
6419 } 6425 }
6420 6426
6421 class FieldIdPair { 6427 // Check if any use of the definition can create an alias.
6422 public: 6428 // Can add more objects into aliasing_worklist_.
6423 struct Key { 6429 bool AnyUseCreatesAlias(Definition* defn) {
6424 Key(intptr_t instance_id, const Field* field) 6430 for (Value* use = defn->input_use_list();
6425 : instance_id_(instance_id), field_(field) { } 6431 use != NULL;
6432 use = use->next_use()) {
6433 Instruction* instr = use->instruction();
6434 if (instr->IsPushArgument() ||
6435 (instr->IsStoreIndexed()
6436 && (use->use_index() == StoreIndexedInstr::kValuePos)) ||
6437 instr->IsStoreStaticField() ||
6438 instr->IsPhi() ||
6439 instr->IsAssertAssignable() ||
6440 instr->IsRedefinition()) {
6441 return true;
6442 } else if ((instr->IsStoreInstanceField()
6443 && (use->use_index() != StoreInstanceFieldInstr::kInstancePos))) {
6444 ASSERT(use->use_index() == StoreInstanceFieldInstr::kValuePos);
6445 // If we store this value into an object that is not aliased itself
6446 // and we never load again then the store does not create an alias.
6447 StoreInstanceFieldInstr* store = instr->AsStoreInstanceField();
6448 Definition* instance = store->instance()->definition();
6449 if (instance->IsAllocateObject() && !instance->Identity().IsAliased()) {
6450 bool is_load, is_store;
6451 Place store_place(instr, &is_load, &is_store);
6426 6452
6427 intptr_t instance_id_; 6453 if (!HasLoadsFromPlace(instance, &store_place)) {
6428 const Field* field_; 6454 // No loads found that match this store. If it is yet unknown if
6429 }; 6455 // the object is not aliased then optimistically assume this but
6456 // add it to the worklist to check its uses transitively.
6457 if (instance->Identity().IsUnknown()) {
6458 instance->SetIdentity(AliasIdentity::NotAliased());
6459 aliasing_worklist_.Add(instance);
6460 }
6461 continue;
6462 }
6463 }
6430 6464
6431 typedef intptr_t Value; 6465 return true;
6432 typedef FieldIdPair Pair; 6466 }
6467 }
6468 return false;
6469 }
6433 6470
6434 FieldIdPair(Key key, Value value) : key_(key), value_(value) { } 6471 // Mark any value stored into the given object as potentially aliased.
6435 6472 void MarkStoredValuesEscaping(Definition* defn) {
6436 static Key KeyOf(Pair kv) { 6473 if (!defn->IsAllocateObject()) {
6437 return kv.key_; 6474 return;
6438 } 6475 }
6439 6476
6440 static Value ValueOf(Pair kv) { 6477 // Find all stores into this object.
6441 return kv.value_; 6478 for (Value* use = defn->input_use_list();
6479 use != NULL;
6480 use = use->next_use()) {
6481 if ((use->use_index() == StoreInstanceFieldInstr::kInstancePos) &&
6482 use->instruction()->IsStoreInstanceField()) {
6483 StoreInstanceFieldInstr* store =
6484 use->instruction()->AsStoreInstanceField();
6485 Definition* value = store->value()->definition();
6486 if (value->Identity().IsNotAliased()) {
6487 value->SetIdentity(AliasIdentity::Aliased());
6488 identity_rollback_.Add(value);
6489
6490 // Add to worklist to propagate the mark transitively.
6491 aliasing_worklist_.Add(value);
6492 }
6493 }
6442 } 6494 }
6495 }
6443 6496
6444 static intptr_t Hashcode(Key key) { 6497 // Determine if the given definition can't be aliased.
6445 return String::Handle(key.field_->name()).Hash(); 6498 void ComputeAliasing(Definition* alloc) {
6499 ASSERT(alloc->Identity().IsUnknown());
6500 ASSERT(aliasing_worklist_.is_empty());
6501
6502 alloc->SetIdentity(AliasIdentity::NotAliased());
6503 aliasing_worklist_.Add(alloc);
6504
6505 while (!aliasing_worklist_.is_empty()) {
6506 Definition* defn = aliasing_worklist_.RemoveLast();
6507
6508 // If the definition in the worklist was optimistically marked as
6509 // not-aliased check that optimistic assumption still holds: check if
6510 // any of its uses can create an alias.
6511 if (!defn->Identity().IsAliased() && AnyUseCreatesAlias(defn)) {
6512 defn->SetIdentity(AliasIdentity::Aliased());
6513 identity_rollback_.Add(defn);
6514 }
6515
6516 // If the allocation site is marked as aliased conservatively mark
6517 // any values stored into the object aliased too.
6518 if (defn->Identity().IsAliased()) {
6519 MarkStoredValuesEscaping(defn);
6520 }
6446 } 6521 }
6447 6522 }
6448 static inline bool IsKeyEqual(Pair kv, Key key) {
6449 return (KeyOf(kv).field_->raw() == key.field_->raw()) &&
6450 (KeyOf(kv).instance_id_ == key.instance_id_);
6451 }
6452
6453 private:
6454 Key key_;
6455 Value value_;
6456 };
6457
6458 class ConstantIndexIdPair {
6459 public:
6460 struct Key {
6461 Key(intptr_t instance_id, intptr_t index)
6462 : instance_id_(instance_id), index_(index) { }
6463
6464 intptr_t instance_id_;
6465 intptr_t index_;
6466 };
6467 typedef intptr_t Value;
6468 typedef ConstantIndexIdPair Pair;
6469
6470 ConstantIndexIdPair(Key key, Value value) : key_(key), value_(value) { }
6471
6472 static Key KeyOf(ConstantIndexIdPair kv) {
6473 return kv.key_;
6474 }
6475
6476 static Value ValueOf(ConstantIndexIdPair kv) {
6477 return kv.value_;
6478 }
6479
6480 static intptr_t Hashcode(Key key) {
6481 return (key.instance_id_ + 1) * 1024 + key.index_;
6482 }
6483
6484 static inline bool IsKeyEqual(ConstantIndexIdPair kv, Key key) {
6485 return (KeyOf(kv).index_ == key.index_)
6486 && (KeyOf(kv).instance_id_ == key.instance_id_);
6487 }
6488
6489 private:
6490 Key key_;
6491 Value value_;
6492 };
6493
6494 class UnknownIndexIdPair {
6495 public:
6496 typedef intptr_t Key;
6497 typedef intptr_t Value;
6498 typedef UnknownIndexIdPair Pair;
6499
6500 UnknownIndexIdPair(Key key, Value value) : key_(key), value_(value) { }
6501
6502 static Key KeyOf(UnknownIndexIdPair kv) {
6503 return kv.key_;
6504 }
6505
6506 static Value ValueOf(UnknownIndexIdPair kv) {
6507 return kv.value_;
6508 }
6509
6510 static intptr_t Hashcode(Key key) {
6511 return key + 1;
6512 }
6513
6514 static inline bool IsKeyEqual(UnknownIndexIdPair kv, Key key) {
6515 return KeyOf(kv) == key;
6516 }
6517
6518 private:
6519 Key key_;
6520 Value value_;
6521 };
6522
6523 class VMFieldIdPair {
6524 public:
6525 struct Key {
6526 Key(intptr_t instance_id, intptr_t offset)
6527 : instance_id_(instance_id), offset_(offset) { }
6528
6529 intptr_t instance_id_;
6530 intptr_t offset_;
6531 };
6532
6533 typedef intptr_t Value;
6534 typedef VMFieldIdPair Pair;
6535
6536 VMFieldIdPair(Key key, Value value) : key_(key), value_(value) { }
6537
6538 static Key KeyOf(Pair kv) {
6539 return kv.key_;
6540 }
6541
6542 static Value ValueOf(Pair kv) {
6543 return kv.value_;
6544 }
6545
6546 static intptr_t Hashcode(Key key) {
6547 return (key.instance_id_ + 1) * 1024 + key.offset_;
6548 }
6549
6550 static inline bool IsKeyEqual(Pair kv, Key key) {
6551 return (KeyOf(kv).offset_ == key.offset_) &&
6552 (KeyOf(kv).instance_id_ == key.instance_id_);
6553 }
6554
6555 private:
6556 Key key_;
6557 Value value_;
6558 };
6559 6523
6560 Isolate* isolate_; 6524 Isolate* isolate_;
6561 6525
6562 const ZoneGrowableArray<Place*>& places_; 6526 const ZoneGrowableArray<Place*>& places_;
6563 6527
6564 const PhiPlaceMoves* phi_moves_; 6528 const PhiPlaceMoves* phi_moves_;
6565 6529
6566 // Maps alias index to a set of ssa indexes corresponding to loads with the 6530 // A list of all seen aliases and a map that allows looking up canonical
6567 // given alias. 6531 // alias object.
6568 GrowableArray<BitVector*> sets_; 6532 GrowableArray<const Place*> aliases_;
6533 DirectChainedHashMap<PointerKeyValueTrait<const Place> > aliases_map_;
6569 6534
6535 // Maps alias id to set of ids of places representing the alias.
6536 // Place represents an alias if this alias is least generic alias for
6537 // the place.
6538 // (see ToAlias for the definition of least generic alias).
6539 GrowableArray<BitVector*> representatives_;
6540
6541 // Maps alias id to set of ids of places aliased.
6542 GrowableArray<BitVector*> killed_;
6543
6544 // Set of ids of places that can be affected by side-effects other than
6545 // explicit stores (i.e. through calls).
6570 BitVector* aliased_by_effects_; 6546 BitVector* aliased_by_effects_;
6571 6547
6572 // Table mapping static field to their id used during optimization pass. 6548 // Worklist used during alias analysis.
6573 intptr_t max_field_id_; 6549 GrowableArray<Definition*> aliasing_worklist_;
6574 DirectChainedHashMap<FieldIdPair> field_ids_;
6575 6550
6576 intptr_t max_index_id_; 6551 // List of definitions that had their identity set to Aliased. At the end
6577 DirectChainedHashMap<ConstantIndexIdPair> index_ids_; 6552 // of load optimization their identity will be rolled back to Unknown to
6578 6553 // avoid treating them as Aliased at later stages without checking first
6579 intptr_t max_unknown_index_id_; 6554 // as optimizations can potentially eliminate instructions leading to
6580 DirectChainedHashMap<UnknownIndexIdPair> unknown_index_ids_; 6555 // aliasing.
6581 6556 GrowableArray<Definition*> identity_rollback_;
6582 intptr_t max_vm_field_id_;
6583 DirectChainedHashMap<VMFieldIdPair> vm_field_ids_;
6584 }; 6557 };
6585 6558
6586 6559
6587 static Definition* GetStoredValue(Instruction* instr) { 6560 static Definition* GetStoredValue(Instruction* instr) {
6588 if (instr->IsStoreIndexed()) { 6561 if (instr->IsStoreIndexed()) {
6589 return instr->AsStoreIndexed()->value()->definition(); 6562 return instr->AsStoreIndexed()->value()->definition();
6590 } 6563 }
6591 6564
6592 StoreInstanceFieldInstr* store_instance_field = instr->AsStoreInstanceField(); 6565 StoreInstanceFieldInstr* store_instance_field = instr->AsStoreInstanceField();
6593 if (store_instance_field != NULL) { 6566 if (store_instance_field != NULL) {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
6636 if (FLAG_trace_optimization) { 6609 if (FLAG_trace_optimization) {
6637 OS::Print("phi dependent place %s\n", place->ToCString()); 6610 OS::Print("phi dependent place %s\n", place->ToCString());
6638 } 6611 }
6639 6612
6640 Place input_place(*place); 6613 Place input_place(*place);
6641 for (intptr_t j = 0; j < phi->InputCount(); j++) { 6614 for (intptr_t j = 0; j < phi->InputCount(); j++) {
6642 input_place.set_instance(phi->InputAt(j)->definition()); 6615 input_place.set_instance(phi->InputAt(j)->definition());
6643 6616
6644 Place* result = map->Lookup(&input_place); 6617 Place* result = map->Lookup(&input_place);
6645 if (result == NULL) { 6618 if (result == NULL) {
6646 input_place.set_id(places->length()); 6619 result = Place::Wrap(isolate, input_place, places->length());
6647 result = Place::Wrap(isolate, input_place);
6648 map->Insert(result); 6620 map->Insert(result);
6649 places->Add(result); 6621 places->Add(result);
6650 if (FLAG_trace_optimization) { 6622 if (FLAG_trace_optimization) {
6651 OS::Print(" adding place %s as %" Pd "\n", 6623 OS::Print(" adding place %s as %" Pd "\n",
6652 result->ToCString(), 6624 result->ToCString(),
6653 result->id()); 6625 result->id());
6654 } 6626 }
6655 } 6627 }
6656 phi_moves->CreateOutgoingMove(isolate, 6628 phi_moves->CreateOutgoingMove(isolate,
6657 block->PredecessorAt(j), 6629 block->PredecessorAt(j),
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
6691 !instr_it.Done(); 6663 !instr_it.Done();
6692 instr_it.Advance()) { 6664 instr_it.Advance()) {
6693 Instruction* instr = instr_it.Current(); 6665 Instruction* instr = instr_it.Current();
6694 Place place(instr, &has_loads, &has_stores); 6666 Place place(instr, &has_loads, &has_stores);
6695 if (place.kind() == Place::kNone) { 6667 if (place.kind() == Place::kNone) {
6696 continue; 6668 continue;
6697 } 6669 }
6698 6670
6699 Place* result = map->Lookup(&place); 6671 Place* result = map->Lookup(&place);
6700 if (result == NULL) { 6672 if (result == NULL) {
6701 place.set_id(places->length()); 6673 result = Place::Wrap(isolate, place, places->length());
6702 result = Place::Wrap(isolate, place);
6703 map->Insert(result); 6674 map->Insert(result);
6704 places->Add(result); 6675 places->Add(result);
6705 6676
6706 if (FLAG_trace_optimization) { 6677 if (FLAG_trace_optimization) {
6707 OS::Print("numbering %s as %" Pd "\n", 6678 OS::Print("numbering %s as %" Pd "\n",
6708 result->ToCString(), 6679 result->ToCString(),
6709 result->id()); 6680 result->id());
6710 } 6681 }
6711 } 6682 }
6712 6683
6713 instr->set_place_id(result->id()); 6684 instr->set_place_id(result->id());
6714 } 6685 }
6715 } 6686 }
6716 6687
6717 if ((mode == kOptimizeLoads) && !has_loads) { 6688 if ((mode == kOptimizeLoads) && !has_loads) {
6718 return NULL; 6689 return NULL;
6719 } 6690 }
6720 if ((mode == kOptimizeStores) && !has_stores) { 6691 if ((mode == kOptimizeStores) && !has_stores) {
6721 return NULL; 6692 return NULL;
6722 } 6693 }
6723 6694
6724 PhiPlaceMoves* phi_moves = ComputePhiMoves(map, places); 6695 PhiPlaceMoves* phi_moves = ComputePhiMoves(map, places);
6725 6696
6726 // Build aliasing sets mapping aliases to loads. 6697 // Build aliasing sets mapping aliases to loads.
6727 AliasedSet* aliased_set = new(isolate) AliasedSet(isolate, places, phi_moves); 6698 return new(isolate) AliasedSet(isolate, places, phi_moves);
6728 for (intptr_t i = 0; i < places->length(); i++) {
6729 Place* place = (*places)[i];
6730 aliased_set->AddRepresentative(place);
6731 }
6732
6733 aliased_set->EnsureAliasingForUnknownIndices();
6734
6735 return aliased_set;
6736 } 6699 }
6737 6700
6738 6701
6739 class LoadOptimizer : public ValueObject { 6702 class LoadOptimizer : public ValueObject {
6740 public: 6703 public:
6741 LoadOptimizer(FlowGraph* graph, 6704 LoadOptimizer(FlowGraph* graph,
6742 AliasedSet* aliased_set, 6705 AliasedSet* aliased_set,
6743 DirectChainedHashMap<PointerKeyValueTrait<Place> >* map) 6706 DirectChainedHashMap<PointerKeyValueTrait<Place> >* map)
6744 : graph_(graph), 6707 : graph_(graph),
6745 map_(map), 6708 map_(map),
(...skipping 13 matching lines...) Expand all
6759 out_.Add(NULL); 6722 out_.Add(NULL);
6760 gen_.Add(new(I) BitVector(aliased_set_->max_place_id())); 6723 gen_.Add(new(I) BitVector(aliased_set_->max_place_id()));
6761 kill_.Add(new(I) BitVector(aliased_set_->max_place_id())); 6724 kill_.Add(new(I) BitVector(aliased_set_->max_place_id()));
6762 in_.Add(new(I) BitVector(aliased_set_->max_place_id())); 6725 in_.Add(new(I) BitVector(aliased_set_->max_place_id()));
6763 6726
6764 exposed_values_.Add(NULL); 6727 exposed_values_.Add(NULL);
6765 out_values_.Add(NULL); 6728 out_values_.Add(NULL);
6766 } 6729 }
6767 } 6730 }
6768 6731
6732 ~LoadOptimizer() {
6733 aliased_set_->RollbackAliasedIdentites();
6734 }
6735
6769 Isolate* isolate() const { return graph_->isolate(); } 6736 Isolate* isolate() const { return graph_->isolate(); }
6770 6737
6771 static bool OptimizeGraph(FlowGraph* graph) { 6738 static bool OptimizeGraph(FlowGraph* graph) {
6772 ASSERT(FLAG_load_cse); 6739 ASSERT(FLAG_load_cse);
6773 if (FLAG_trace_load_optimization) { 6740 if (FLAG_trace_load_optimization) {
6774 FlowGraphPrinter::PrintGraph("Before LoadOptimizer", graph); 6741 FlowGraphPrinter::PrintGraph("Before LoadOptimizer", graph);
6775 } 6742 }
6776 6743
6777 DirectChainedHashMap<PointerKeyValueTrait<Place> > map; 6744 DirectChainedHashMap<PointerKeyValueTrait<Place> > map;
6778 AliasedSet* aliased_set = NumberPlaces(graph, &map, kOptimizeLoads); 6745 AliasedSet* aliased_set = NumberPlaces(graph, &map, kOptimizeLoads);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
6824 BitVector* gen = gen_[preorder_number]; 6791 BitVector* gen = gen_[preorder_number];
6825 6792
6826 ZoneGrowableArray<Definition*>* exposed_values = NULL; 6793 ZoneGrowableArray<Definition*>* exposed_values = NULL;
6827 ZoneGrowableArray<Definition*>* out_values = NULL; 6794 ZoneGrowableArray<Definition*>* out_values = NULL;
6828 6795
6829 for (ForwardInstructionIterator instr_it(block); 6796 for (ForwardInstructionIterator instr_it(block);
6830 !instr_it.Done(); 6797 !instr_it.Done();
6831 instr_it.Advance()) { 6798 instr_it.Advance()) {
6832 Instruction* instr = instr_it.Current(); 6799 Instruction* instr = instr_it.Current();
6833 6800
6834 const Alias alias = aliased_set_->ComputeAliasForStore(instr); 6801 BitVector* killed = NULL;
6835 if (!alias.IsNone()) { 6802 if (aliased_set_->IsStore(instr, &killed)) {
6836 // Interfering stores kill only loads from the same offset.
6837 BitVector* killed = aliased_set_->Get(alias);
6838
6839 if (killed != NULL) { 6803 if (killed != NULL) {
6840 kill->AddAll(killed); 6804 kill->AddAll(killed);
6841 // There is no need to clear out_values when clearing GEN set 6805 // There is no need to clear out_values when clearing GEN set
6842 // because only those values that are in the GEN set 6806 // because only those values that are in the GEN set
6843 // will ever be used. 6807 // will ever be used.
6844 gen->RemoveAll(killed); 6808 gen->RemoveAll(killed);
6845 } 6809 }
6846 6810
6847 // Only forward stores to normal arrays, float64, and simd arrays 6811 // Only forward stores to normal arrays, float64, and simd arrays
6848 // to loads because other array stores (intXX/uintXX/float32) 6812 // to loads because other array stores (intXX/uintXX/float32)
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
6892 // The reason to ignore escaping objects is that final fields are 6856 // The reason to ignore escaping objects is that final fields are
6893 // initialized in constructor that potentially can be not inlined into 6857 // initialized in constructor that potentially can be not inlined into
6894 // the function that we are currently optimizing. However at the same 6858 // the function that we are currently optimizing. However at the same
6895 // time we assume that values of the final fields can be forwarded 6859 // time we assume that values of the final fields can be forwarded
6896 // across side-effects. If we add 'null' as known values for these 6860 // across side-effects. If we add 'null' as known values for these
6897 // fields here we will incorrectly propagate this null across 6861 // fields here we will incorrectly propagate this null across
6898 // constructor invocation. 6862 // constructor invocation.
6899 // TODO(vegorov): record null-values at least for not final fields of 6863 // TODO(vegorov): record null-values at least for not final fields of
6900 // escaping object. 6864 // escaping object.
6901 AllocateObjectInstr* alloc = instr->AsAllocateObject(); 6865 AllocateObjectInstr* alloc = instr->AsAllocateObject();
6902 if ((alloc != NULL) && !AliasedSet::CanBeAliased(alloc)) { 6866 if ((alloc != NULL) && !aliased_set_->CanBeAliased(alloc)) {
6903 for (Value* use = alloc->input_use_list(); 6867 for (Value* use = alloc->input_use_list();
6904 use != NULL; 6868 use != NULL;
6905 use = use->next_use()) { 6869 use = use->next_use()) {
6906 // Look for all immediate loads from this object. 6870 // Look for all immediate loads from this object.
6907 if (use->use_index() != 0) { 6871 if (use->use_index() != 0) {
6908 continue; 6872 continue;
6909 } 6873 }
6910 6874
6911 LoadFieldInstr* load = use->instruction()->AsLoadField(); 6875 LoadFieldInstr* load = use->instruction()->AsLoadField();
6912 if (load != NULL) { 6876 if (load != NULL) {
(...skipping 771 matching lines...) Expand 10 before | Expand all | Expand 10 after
7684 // Initialize live-out for exit blocks since it won't be computed 7648 // Initialize live-out for exit blocks since it won't be computed
7685 // otherwise during the fixed point iteration. 7649 // otherwise during the fixed point iteration.
7686 live_out->CopyFrom(all_places); 7650 live_out->CopyFrom(all_places);
7687 } 7651 }
7688 continue; 7652 continue;
7689 } 7653 }
7690 7654
7691 // Handle loads. 7655 // Handle loads.
7692 Definition* defn = instr->AsDefinition(); 7656 Definition* defn = instr->AsDefinition();
7693 if ((defn != NULL) && IsLoadEliminationCandidate(defn)) { 7657 if ((defn != NULL) && IsLoadEliminationCandidate(defn)) {
7694 const Alias alias = aliased_set_->ComputeAlias(&place); 7658 const intptr_t alias = aliased_set_->LookupAliasId(place.ToAlias());
7695 live_in->AddAll(aliased_set_->Get(alias)); 7659 live_in->AddAll(aliased_set_->GetKilledSet(alias));
7696 continue; 7660 continue;
7697 } 7661 }
7698 } 7662 }
7699 exposed_stores_[postorder_number] = exposed_stores; 7663 exposed_stores_[postorder_number] = exposed_stores;
7700 } 7664 }
7701 if (FLAG_trace_load_optimization) { 7665 if (FLAG_trace_load_optimization) {
7702 Dump(); 7666 Dump();
7703 OS::Print("---\n"); 7667 OS::Print("---\n");
7704 } 7668 }
7705 } 7669 }
(...skipping 2170 matching lines...) Expand 10 before | Expand all | Expand 10 after
9876 // TODO(srdjan): --source-lines needs deopt environments to get at 9840 // TODO(srdjan): --source-lines needs deopt environments to get at
9877 // the code for this instruction, however, leaving the environment 9841 // the code for this instruction, however, leaving the environment
9878 // changes code. 9842 // changes code.
9879 current->RemoveEnvironment(); 9843 current->RemoveEnvironment();
9880 } 9844 }
9881 } 9845 }
9882 } 9846 }
9883 } 9847 }
9884 9848
9885 9849
9850 enum SafeUseCheck { kOptimisticCheck, kStrictCheck };
9851
9852 // Check if the use is safe for allocation sinking. Allocation sinking
9853 // candidates can only be used at store instructions:
9854 //
9855 // - any store into the allocation candidate itself is unconditionally safe
9856 // as it just changes the rematerialization state of this candidate;
9857 // - store into another object is only safe if another object is allocation
9858 // candidate.
9859 //
9860 // We use a simple fix-point algorithm to discover the set of valid candidates
9861 // (see CollectCandidates method), that's why this IsSafeUse can operate in two
9862 // modes:
9863 //
9864 // - optimistic, when every allocation is assumed to be an allocation
9865 // sinking candidate;
9866 // - strict, when only marked allocations are assumed to be allocation
9867 // sinking candidates.
9868 //
9869 // Fix-point algorithm in CollectCandiates first collects a set of allocations
9870 // optimistically and then checks each collected candidate strictly and unmarks
9871 // invalid candidates transitively until only strictly valid ones remain.
9872 static bool IsSafeUse(Value* use, SafeUseCheck check_type) {
9873 if (use->instruction()->IsMaterializeObject()) {
9874 return true;
9875 }
9876
9877 StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField();
9878 if (store != NULL) {
9879 if (use == store->value()) {
9880 Definition* instance = store->instance()->definition();
9881 return instance->IsAllocateObject() &&
9882 ((check_type == kOptimisticCheck) ||
9883 instance->Identity().IsAllocationSinkingCandidate());
9884 }
9885 return true;
9886 }
9887
9888 return false;
9889 }
9890
9891
9886 // Right now we are attempting to sink allocation only into 9892 // Right now we are attempting to sink allocation only into
9887 // deoptimization exit. So candidate should only be used in StoreInstanceField 9893 // deoptimization exit. So candidate should only be used in StoreInstanceField
9888 // instructions that write into fields of the allocated object. 9894 // instructions that write into fields of the allocated object.
9889 // We do not support materialization of the object that has type arguments. 9895 // We do not support materialization of the object that has type arguments.
9890 static bool IsAllocationSinkingCandidate(AllocateObjectInstr* alloc) { 9896 static bool IsAllocationSinkingCandidate(AllocateObjectInstr* alloc,
9897 SafeUseCheck check_type) {
9891 for (Value* use = alloc->input_use_list(); 9898 for (Value* use = alloc->input_use_list();
9892 use != NULL; 9899 use != NULL;
9893 use = use->next_use()) { 9900 use = use->next_use()) {
9894 if (!(use->instruction()->IsStoreInstanceField() && 9901 if (!IsSafeUse(use, check_type)) {
9895 (use->use_index() == 0))) { 9902 if (FLAG_trace_optimization) {
9903 OS::Print("use of %s at %s is unsafe for allocation sinking\n",
9904 alloc->ToCString(),
9905 use->instruction()->ToCString());
9906 }
9896 return false; 9907 return false;
9897 } 9908 }
9898 } 9909 }
9899 9910
9900 return true; 9911 return true;
9901 } 9912 }
9902 9913
9903 9914
9915 // If the given use is a store into an object then return an object we are
9916 // storing into.
9917 static Definition* StoreInto(Value* use) {
9918 StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField();
9919 if (store != NULL) {
9920 return store->instance()->definition();
9921 }
9922
9923 return NULL;
9924 }
9925
9926
9904 // Remove the given allocation from the graph. It is not observable. 9927 // Remove the given allocation from the graph. It is not observable.
9905 // If deoptimization occurs the object will be materialized. 9928 // If deoptimization occurs the object will be materialized.
9906 static void EliminateAllocation(AllocateObjectInstr* alloc) { 9929 void AllocationSinking::EliminateAllocation(AllocateObjectInstr* alloc) {
9907 ASSERT(IsAllocationSinkingCandidate(alloc)); 9930 ASSERT(IsAllocationSinkingCandidate(alloc, kStrictCheck));
9908 9931
9909 if (FLAG_trace_optimization) { 9932 if (FLAG_trace_optimization) {
9910 OS::Print("removing allocation from the graph: v%" Pd "\n", 9933 OS::Print("removing allocation from the graph: v%" Pd "\n",
9911 alloc->ssa_temp_index()); 9934 alloc->ssa_temp_index());
9912 } 9935 }
9913 9936
9914 // As an allocation sinking candidate it is only used in stores to its own 9937 // As an allocation sinking candidate it is only used in stores to its own
9915 // fields. Remove these stores. 9938 // fields. Remove these stores.
9916 for (Value* use = alloc->input_use_list(); 9939 for (Value* use = alloc->input_use_list();
9917 use != NULL; 9940 use != NULL;
9918 use = alloc->input_use_list()) { 9941 use = alloc->input_use_list()) {
9919 use->instruction()->RemoveFromGraph(); 9942 use->instruction()->RemoveFromGraph();
9920 } 9943 }
9921 9944
9922 // There should be no environment uses. The pass replaced them with 9945 // There should be no environment uses. The pass replaced them with
9923 // MaterializeObject instructions. 9946 // MaterializeObject instructions.
9924 ASSERT(alloc->env_use_list() == NULL); 9947 #ifdef DEBUG
9948 for (Value* use = alloc->env_use_list();
9949 use != NULL;
9950 use = use->next_use()) {
9951 ASSERT(use->instruction()->IsMaterializeObject());
9952 }
9953 #endif
9925 ASSERT(alloc->input_use_list() == NULL); 9954 ASSERT(alloc->input_use_list() == NULL);
9926 alloc->RemoveFromGraph(); 9955 alloc->RemoveFromGraph();
9927 if (alloc->ArgumentCount() > 0) { 9956 if (alloc->ArgumentCount() > 0) {
9928 ASSERT(alloc->ArgumentCount() == 1); 9957 ASSERT(alloc->ArgumentCount() == 1);
9929 for (intptr_t i = 0; i < alloc->ArgumentCount(); ++i) { 9958 for (intptr_t i = 0; i < alloc->ArgumentCount(); ++i) {
9930 alloc->PushArgumentAt(i)->RemoveFromGraph(); 9959 alloc->PushArgumentAt(i)->RemoveFromGraph();
9931 } 9960 }
9932 } 9961 }
9933 } 9962 }
9934 9963
9935 9964
9936 void AllocationSinking::Optimize() { 9965 // Find allocation instructions that can be potentially eliminated and
9937 GrowableArray<AllocateObjectInstr*> candidates(5); 9966 // rematerialized at deoptimization exits if needed. See IsSafeUse
9938 9967 // for the description of algorithm used below.
9939 // Collect sinking candidates. 9968 void AllocationSinking::CollectCandidates() {
9940 const GrowableArray<BlockEntryInstr*>& postorder = flow_graph_->postorder(); 9969 // Optimistically collect all potential candidates.
9941 for (BlockIterator block_it(postorder); 9970 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator();
9942 !block_it.Done(); 9971 !block_it.Done();
9943 block_it.Advance()) { 9972 block_it.Advance()) {
9944 BlockEntryInstr* block = block_it.Current(); 9973 BlockEntryInstr* block = block_it.Current();
9945 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { 9974 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
9946 AllocateObjectInstr* alloc = it.Current()->AsAllocateObject(); 9975 AllocateObjectInstr* alloc = it.Current()->AsAllocateObject();
9947 if ((alloc != NULL) && IsAllocationSinkingCandidate(alloc)) { 9976 if ((alloc != NULL) &&
9948 if (FLAG_trace_optimization) { 9977 IsAllocationSinkingCandidate(alloc, kOptimisticCheck)) {
9949 OS::Print("discovered allocation sinking candidate: v%" Pd "\n", 9978 alloc->SetIdentity(AliasIdentity::AllocationSinkingCandidate());
9950 alloc->ssa_temp_index()); 9979 candidates_.Add(alloc);
9951 } 9980 }
9952 9981 }
9953 // All sinking candidate are known to be not aliased. 9982 }
9954 alloc->SetIdentity(kIdentityNotAliased); 9983
9955 9984 // Transitively unmark all candidates that are not strictly valid.
9956 candidates.Add(alloc); 9985 bool changed;
9957 } 9986 do {
9958 } 9987 changed = false;
9959 } 9988 for (intptr_t i = 0; i < candidates_.length(); i++) {
9989 AllocateObjectInstr* alloc = candidates_[i];
9990 if (alloc->Identity().IsAllocationSinkingCandidate()) {
9991 if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) {
9992 alloc->SetIdentity(AliasIdentity::Unknown());
9993 changed = true;
9994 }
9995 }
9996 }
9997 } while (changed);
9998
9999 // Shrink the list of candidates removing all unmarked ones.
10000 intptr_t j = 0;
10001 for (intptr_t i = 0; i < candidates_.length(); i++) {
10002 AllocateObjectInstr* alloc = candidates_[i];
10003 if (alloc->Identity().IsAllocationSinkingCandidate()) {
10004 if (FLAG_trace_optimization) {
10005 OS::Print("discovered allocation sinking candidate: v%" Pd "\n",
10006 alloc->ssa_temp_index());
10007 }
10008
10009 if (j != i) {
10010 candidates_[j] = alloc;
10011 }
10012 j++;
10013 }
10014 }
10015 candidates_.TruncateTo(j);
10016 }
10017
10018
10019 // If materialization references an allocation sinking candidate then replace
10020 // this reference with a materialization which should have been computed for
10021 // this side-exit. CollectAllExits should have collected this exit.
10022 void AllocationSinking::NormalizeMaterializations() {
10023 for (intptr_t i = 0; i < candidates_.length(); i++) {
10024 Definition* alloc = candidates_[i];
10025
10026 Value* next_use;
10027 for (Value* use = alloc->input_use_list();
10028 use != NULL;
10029 use = next_use) {
10030 next_use = use->next_use();
10031 if (use->instruction()->IsMaterializeObject()) {
10032 use->BindTo(MaterializationFor(alloc, use->instruction()));
10033 }
10034 }
10035 }
10036 }
10037
10038
10039 // We transitively insert materializations at each deoptimization exit that
10040 // might see the given allocation (see ExitsCollector). Some of this
10041 // materializations are not actually used and some fail to compute because
10042 // they are inserted in the block that is not dominated by the allocation.
10043 // Remove them unused materializations from the graph.
10044 void AllocationSinking::RemoveUnusedMaterializations() {
10045 intptr_t j = 0;
10046 for (intptr_t i = 0; i < materializations_.length(); i++) {
10047 MaterializeObjectInstr* mat = materializations_[i];
10048 if ((mat->input_use_list() == NULL) && (mat->env_use_list() == NULL)) {
10049 // Check if this materialization failed to compute and remove any
10050 // unforwarded loads. There were no loads from any allocation sinking
10051 // candidate in the beggining so it is safe to assume that any encountered
10052 // load was inserted by CreateMaterializationAt.
10053 for (intptr_t i = 0; i < mat->InputCount(); i++) {
10054 LoadFieldInstr* load = mat->InputAt(i)->definition()->AsLoadField();
10055 if ((load != NULL) &&
10056 (load->instance()->definition() == mat->allocation())) {
10057 load->ReplaceUsesWith(flow_graph_->constant_null());
10058 load->RemoveFromGraph();
10059 }
10060 }
10061 mat->RemoveFromGraph();
10062 } else {
10063 if (j != i) {
10064 materializations_[j] = mat;
10065 }
10066 j++;
10067 }
10068 }
10069 materializations_.TruncateTo(j);
10070 }
10071
10072
10073 // Some candidates might stop being eligible for allocation sinking after
10074 // the load forwarding because they flow into phis that load forwarding
10075 // inserts. Discover such allocations and remove them from the list
10076 // of allocation sinking candidates undoing all changes that we did
10077 // in preparation for sinking these allocations.
10078 void AllocationSinking::DiscoverFailedCandidates() {
10079 // Transitively unmark all candidates that are not strictly valid.
10080 bool changed;
10081 do {
10082 changed = false;
10083 for (intptr_t i = 0; i < candidates_.length(); i++) {
10084 AllocateObjectInstr* alloc = candidates_[i];
10085 if (alloc->Identity().IsAllocationSinkingCandidate()) {
10086 if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) {
10087 alloc->SetIdentity(AliasIdentity::Unknown());
10088 changed = true;
10089 }
10090 }
10091 }
10092 } while (changed);
10093
10094 // Remove all failed candidates from the candidates list.
10095 intptr_t j = 0;
10096 for (intptr_t i = 0; i < candidates_.length(); i++) {
10097 AllocateObjectInstr* alloc = candidates_[i];
10098 if (!alloc->Identity().IsAllocationSinkingCandidate()) {
10099 if (FLAG_trace_optimization) {
10100 OS::Print("allocation v%" Pd " can't be eliminated\n",
10101 alloc->ssa_temp_index());
10102 }
10103
10104 #ifdef DEBUG
10105 for (Value* use = alloc->env_use_list();
10106 use != NULL;
10107 use = use->next_use()) {
10108 ASSERT(use->instruction()->IsMaterializeObject());
10109 }
10110 #endif
10111
10112 // All materializations will be removed from the graph. Remove inserted
10113 // loads first and detach materializations from allocation's environment
10114 // use list: we will reconstruct it when we start removing
10115 // materializations.
10116 alloc->set_env_use_list(NULL);
10117 for (Value* use = alloc->input_use_list();
10118 use != NULL;
10119 use = use->next_use()) {
10120 if (use->instruction()->IsLoadField()) {
10121 LoadFieldInstr* load = use->instruction()->AsLoadField();
10122 load->ReplaceUsesWith(flow_graph_->constant_null());
10123 load->RemoveFromGraph();
10124 } else {
10125 ASSERT(use->instruction()->IsMaterializeObject() ||
10126 use->instruction()->IsPhi() ||
10127 use->instruction()->IsStoreInstanceField());
10128 }
10129 }
10130 } else {
10131 if (j != i) {
10132 candidates_[j] = alloc;
10133 }
10134 j++;
10135 }
10136 }
10137
10138 if (j != candidates_.length()) { // Something was removed from candidates.
10139 intptr_t k = 0;
10140 for (intptr_t i = 0; i < materializations_.length(); i++) {
10141 MaterializeObjectInstr* mat = materializations_[i];
10142 if (!mat->allocation()->Identity().IsAllocationSinkingCandidate()) {
10143 // Restore environment uses of the allocation that were replaced
10144 // by this materialization and drop materialization.
10145 mat->ReplaceUsesWith(mat->allocation());
10146 mat->RemoveFromGraph();
10147 } else {
10148 if (k != i) {
10149 materializations_[k] = mat;
10150 }
10151 k++;
10152 }
10153 }
10154 materializations_.TruncateTo(k);
10155 }
10156
10157 candidates_.TruncateTo(j);
10158 }
10159
10160
10161 void AllocationSinking::Optimize() {
10162 CollectCandidates();
9960 10163
9961 // Insert MaterializeObject instructions that will describe the state of the 10164 // Insert MaterializeObject instructions that will describe the state of the
9962 // object at all deoptimization points. Each inserted materialization looks 10165 // object at all deoptimization points. Each inserted materialization looks
9963 // like this (where v_0 is allocation that we are going to eliminate): 10166 // like this (where v_0 is allocation that we are going to eliminate):
9964 // v_1 <- LoadField(v_0, field_1) 10167 // v_1 <- LoadField(v_0, field_1)
9965 // ... 10168 // ...
9966 // v_N <- LoadField(v_0, field_N) 10169 // v_N <- LoadField(v_0, field_N)
9967 // v_{N+1} <- MaterializeObject(field_1 = v_1, ..., field_N = v_{N}) 10170 // v_{N+1} <- MaterializeObject(field_1 = v_1, ..., field_N = v_{N})
9968 for (intptr_t i = 0; i < candidates.length(); i++) { 10171 for (intptr_t i = 0; i < candidates_.length(); i++) {
9969 InsertMaterializations(candidates[i]); 10172 InsertMaterializations(candidates_[i]);
9970 } 10173 }
9971 10174
9972 // Run load forwarding to eliminate LoadField instructions inserted above. 10175 // Run load forwarding to eliminate LoadField instructions inserted above.
9973 // All loads will be successfully eliminated because: 10176 // All loads will be successfully eliminated because:
9974 // a) they use fields (not offsets) and thus provide precise aliasing 10177 // a) they use fields (not offsets) and thus provide precise aliasing
9975 // information 10178 // information
9976 // b) candidate does not escape and thus its fields is not affected by 10179 // b) candidate does not escape and thus its fields is not affected by
9977 // external effects from calls. 10180 // external effects from calls.
9978 LoadOptimizer::OptimizeGraph(flow_graph_); 10181 LoadOptimizer::OptimizeGraph(flow_graph_);
9979 10182
9980 if (FLAG_trace_optimization) { 10183 NormalizeMaterializations();
9981 FlowGraphPrinter::PrintGraph("Sinking", flow_graph_); 10184
9982 } 10185 RemoveUnusedMaterializations();
10186
10187 // If any candidates are no longer eligible for allocation sinking abort
10188 // the optimization for them and undo any changes we did in preparation.
10189 DiscoverFailedCandidates();
9983 10190
9984 // At this point we have computed the state of object at each deoptimization 10191 // At this point we have computed the state of object at each deoptimization
9985 // point and we can eliminate it. Loads inserted above were forwarded so there 10192 // point and we can eliminate it. Loads inserted above were forwarded so there
9986 // are no uses of the allocation just as in the begging of the pass. 10193 // are no uses of the allocation just as in the begging of the pass.
9987 for (intptr_t i = 0; i < candidates.length(); i++) { 10194 for (intptr_t i = 0; i < candidates_.length(); i++) {
9988 EliminateAllocation(candidates[i]); 10195 EliminateAllocation(candidates_[i]);
9989 } 10196 }
9990 10197
9991 // Process materializations and unbox their arguments: materializations 10198 // Process materializations and unbox their arguments: materializations
9992 // are part of the environment and can materialize boxes for double/mint/simd 10199 // are part of the environment and can materialize boxes for double/mint/simd
9993 // values when needed. 10200 // values when needed.
9994 // TODO(vegorov): handle all box types here. 10201 // TODO(vegorov): handle all box types here.
9995 for (intptr_t i = 0; i < materializations_.length(); i++) { 10202 for (intptr_t i = 0; i < materializations_.length(); i++) {
9996 MaterializeObjectInstr* mat = materializations_[i]; 10203 MaterializeObjectInstr* mat = materializations_[i];
9997 for (intptr_t j = 0; j < mat->InputCount(); j++) { 10204 for (intptr_t j = 0; j < mat->InputCount(); j++) {
9998 Definition* defn = mat->InputAt(j)->definition(); 10205 Definition* defn = mat->InputAt(j)->definition();
9999 if (defn->IsBoxDouble() || 10206 if (defn->IsBoxDouble() ||
10000 defn->IsBoxFloat32x4() || 10207 defn->IsBoxFloat32x4() ||
10001 defn->IsBoxInt32x4()) { 10208 defn->IsBoxInt32x4()) {
10002 mat->InputAt(j)->BindTo(defn->InputAt(0)->definition()); 10209 mat->InputAt(j)->BindTo(defn->InputAt(0)->definition());
10003 } 10210 }
10004 } 10211 }
10005 } 10212 }
10006 } 10213 }
10007 10214
10008 10215
10009 // Remove materializations from the graph. Register allocator will treat them 10216 // Remove materializations from the graph. Register allocator will treat them
10010 // as part of the environment not as a real instruction. 10217 // as part of the environment not as a real instruction.
10011 void AllocationSinking::DetachMaterializations() { 10218 void AllocationSinking::DetachMaterializations() {
10012 for (intptr_t i = 0; i < materializations_.length(); i++) { 10219 for (intptr_t i = 0; i < materializations_.length(); i++) {
10013 ASSERT(materializations_[i]->input_use_list() == NULL);
10014 materializations_[i]->previous()->LinkTo(materializations_[i]->next()); 10220 materializations_[i]->previous()->LinkTo(materializations_[i]->next());
10015 } 10221 }
10016 } 10222 }
10017 10223
10018 10224
10019 // Add a field/offset to the list of fields if it is not yet present there. 10225 // Add a field/offset to the list of fields if it is not yet present there.
10020 static void AddSlot(ZoneGrowableArray<const Object*>* slots, 10226 static bool AddSlot(ZoneGrowableArray<const Object*>* slots,
10021 const Object& slot) { 10227 const Object& slot) {
10022 for (intptr_t i = 0; i < slots->length(); i++) { 10228 for (intptr_t i = 0; i < slots->length(); i++) {
10023 if ((*slots)[i]->raw() == slot.raw()) { 10229 if ((*slots)[i]->raw() == slot.raw()) {
10024 return; 10230 return false;
10025 } 10231 }
10026 } 10232 }
10027 slots->Add(&slot); 10233 slots->Add(&slot);
10234 return true;
10028 } 10235 }
10029 10236
10030 10237
10031 // Add given instruction to the list of the instructions if it is not yet 10238 // Find deoptimization exit for the given materialization assuming that all
10032 // present there. 10239 // materializations are emitted right before the instruction which is a
10033 static void AddInstruction(GrowableArray<Instruction*>* exits, 10240 // deoptimization exit.
10034 Instruction* exit) { 10241 static Instruction* ExitForMaterialization(MaterializeObjectInstr* mat) {
10035 ASSERT(!exit->IsGraphEntry()); 10242 while (mat->next()->IsMaterializeObject()) {
10036 for (intptr_t i = 0; i < exits->length(); i++) { 10243 mat = mat->next()->AsMaterializeObject();
10037 if ((*exits)[i] == exit) {
10038 return;
10039 }
10040 } 10244 }
10041 exits->Add(exit); 10245 return mat->next();
10042 } 10246 }
10043 10247
10044 10248
10249 // Given the deoptimization exit find first materialization that was inserted
10250 // before it.
10251 static Instruction* FirstMaterializationAt(Instruction* exit) {
10252 while (exit->previous()->IsMaterializeObject()) {
10253 exit = exit->previous();
10254 }
10255 return exit;
10256 }
10257
10258
10259 // Given the allocation and deoptimization exit try to find MaterializeObject
10260 // instruction corresponding to this allocation at this exit.
10261 MaterializeObjectInstr* AllocationSinking::MaterializationFor(
10262 Definition* alloc, Instruction* exit) {
10263 if (exit->IsMaterializeObject()) {
10264 exit = ExitForMaterialization(exit->AsMaterializeObject());
10265 }
10266
10267 for (MaterializeObjectInstr* mat = exit->previous()->AsMaterializeObject();
10268 mat != NULL;
10269 mat = mat->previous()->AsMaterializeObject()) {
10270 if (mat->allocation() == alloc) {
10271 return mat;
10272 }
10273 }
10274
10275 return NULL;
10276 }
10277
10278
10045 // Insert MaterializeObject instruction for the given allocation before 10279 // Insert MaterializeObject instruction for the given allocation before
10046 // the given instruction that can deoptimize. 10280 // the given instruction that can deoptimize.
10047 void AllocationSinking::CreateMaterializationAt( 10281 void AllocationSinking::CreateMaterializationAt(
10048 Instruction* exit, 10282 Instruction* exit,
10049 AllocateObjectInstr* alloc, 10283 AllocateObjectInstr* alloc,
10050 const Class& cls, 10284 const Class& cls,
10051 const ZoneGrowableArray<const Object*>& slots) { 10285 const ZoneGrowableArray<const Object*>& slots) {
10052 ZoneGrowableArray<Value*>* values = 10286 ZoneGrowableArray<Value*>* values =
10053 new(I) ZoneGrowableArray<Value*>(slots.length()); 10287 new(I) ZoneGrowableArray<Value*>(slots.length());
10054 10288
10289 // All loads should be inserted before the first materialization so that
10290 // IR follows the following pattern: loads, materializations, deoptimizing
10291 // instruction.
10292 Instruction* load_point = FirstMaterializationAt(exit);
10293
10055 // Insert load instruction for every field. 10294 // Insert load instruction for every field.
10056 for (intptr_t i = 0; i < slots.length(); i++) { 10295 for (intptr_t i = 0; i < slots.length(); i++) {
10057 LoadFieldInstr* load = slots[i]->IsField() 10296 LoadFieldInstr* load = slots[i]->IsField()
10058 ? new(I) LoadFieldInstr( 10297 ? new(I) LoadFieldInstr(
10059 new(I) Value(alloc), 10298 new(I) Value(alloc),
10060 &Field::Cast(*slots[i]), 10299 &Field::Cast(*slots[i]),
10061 AbstractType::ZoneHandle(I), 10300 AbstractType::ZoneHandle(I),
10062 alloc->token_pos()) 10301 alloc->token_pos())
10063 : new(I) LoadFieldInstr( 10302 : new(I) LoadFieldInstr(
10064 new(I) Value(alloc), 10303 new(I) Value(alloc),
10065 Smi::Cast(*slots[i]).Value(), 10304 Smi::Cast(*slots[i]).Value(),
10066 AbstractType::ZoneHandle(I), 10305 AbstractType::ZoneHandle(I),
10067 alloc->token_pos()); 10306 alloc->token_pos());
10068 flow_graph_->InsertBefore( 10307 flow_graph_->InsertBefore(
10069 exit, load, NULL, FlowGraph::kValue); 10308 load_point, load, NULL, FlowGraph::kValue);
10070 values->Add(new(I) Value(load)); 10309 values->Add(new(I) Value(load));
10071 } 10310 }
10072 10311
10073 MaterializeObjectInstr* mat = 10312 MaterializeObjectInstr* mat =
10074 new(I) MaterializeObjectInstr(cls, slots, values); 10313 new(I) MaterializeObjectInstr(alloc, cls, slots, values);
10075 flow_graph_->InsertBefore(exit, mat, NULL, FlowGraph::kValue); 10314 flow_graph_->InsertBefore(exit, mat, NULL, FlowGraph::kValue);
10076 10315
10077 // Replace all mentions of this allocation with a newly inserted 10316 // Replace all mentions of this allocation with a newly inserted
10078 // MaterializeObject instruction. 10317 // MaterializeObject instruction.
10079 // We must preserve the identity: all mentions are replaced by the same 10318 // We must preserve the identity: all mentions are replaced by the same
10080 // materialization. 10319 // materialization.
10081 for (Environment::DeepIterator env_it(exit->env()); 10320 for (Environment::DeepIterator env_it(exit->env());
10082 !env_it.Done(); 10321 !env_it.Done();
10083 env_it.Advance()) { 10322 env_it.Advance()) {
10084 Value* use = env_it.CurrentValue(); 10323 Value* use = env_it.CurrentValue();
10085 if (use->definition() == alloc) { 10324 if (use->definition() == alloc) {
10086 use->RemoveFromUseList(); 10325 use->RemoveFromUseList();
10087 use->set_definition(mat); 10326 use->set_definition(mat);
10088 mat->AddEnvUse(use); 10327 mat->AddEnvUse(use);
10089 } 10328 }
10090 } 10329 }
10091 10330
10331 // Mark MaterializeObject as an environment use of this allocation.
10332 // This will allow us to discover it when we are looking for deoptimization
10333 // exits for another allocation that potentially flows into this one.
10334 Value* val = new(I) Value(alloc);
10335 val->set_instruction(mat);
10336 alloc->AddEnvUse(val);
10337
10092 // Record inserted materialization. 10338 // Record inserted materialization.
10093 materializations_.Add(mat); 10339 materializations_.Add(mat);
10094 } 10340 }
10095 10341
10096 10342
10343 // Add given instruction to the list of the instructions if it is not yet
10344 // present there.
10345 template<typename T>
10346 void AddInstruction(GrowableArray<T*>* list, T* value) {
10347 ASSERT(!value->IsGraphEntry());
10348 for (intptr_t i = 0; i < list->length(); i++) {
10349 if ((*list)[i] == value) {
10350 return;
10351 }
10352 }
10353 list->Add(value);
10354 }
10355
10356
10357 // Transitively collect all deoptimization exits that might need this allocation
10358 // rematerialized. It is not enough to collect only environment uses of this
10359 // allocation because it can flow into other objects that will be
10360 // dematerialized and that are referenced by deopt environments that
10361 // don't contain this allocation explicitly.
10362 void AllocationSinking::ExitsCollector::Collect(Definition* alloc) {
10363 for (Value* use = alloc->env_use_list();
10364 use != NULL;
10365 use = use->next_use()) {
10366 if (use->instruction()->IsMaterializeObject()) {
10367 AddInstruction(&exits_, ExitForMaterialization(
10368 use->instruction()->AsMaterializeObject()));
10369 } else {
10370 AddInstruction(&exits_, use->instruction());
10371 }
10372 }
10373
10374 // Check if this allocation is stored into any other allocation sinking
10375 // candidate and put it on worklist so that we conservatively collect all
10376 // exits for that candidate as well because they potentially might see
10377 // this object.
10378 for (Value* use = alloc->input_use_list();
10379 use != NULL;
10380 use = use->next_use()) {
10381 Definition* obj = StoreInto(use);
10382 if ((obj != NULL) && (obj != alloc)) {
10383 AddInstruction(&worklist_, obj);
10384 }
10385 }
10386 }
10387
10388
10389 void AllocationSinking::ExitsCollector::CollectTransitively(Definition* alloc) {
10390 exits_.TruncateTo(0);
10391 worklist_.TruncateTo(0);
10392
10393 worklist_.Add(alloc);
10394
10395 // Note: worklist potentially will grow while we are iterating over it.
10396 // We are not removing allocations from the worklist not to waste space on
10397 // the side maintaining BitVector of already processed allocations: worklist
10398 // is expected to be very small thus linear search in it is just as effecient
10399 // as a bitvector.
10400 for (intptr_t i = 0; i < worklist_.length(); i++) {
10401 Collect(worklist_[i]);
10402 }
10403 }
10404
10405
10097 void AllocationSinking::InsertMaterializations(AllocateObjectInstr* alloc) { 10406 void AllocationSinking::InsertMaterializations(AllocateObjectInstr* alloc) {
10098 // Collect all fields that are written for this instance. 10407 // Collect all fields that are written for this instance.
10099 ZoneGrowableArray<const Object*>* slots = 10408 ZoneGrowableArray<const Object*>* slots =
10100 new(I) ZoneGrowableArray<const Object*>(5); 10409 new(I) ZoneGrowableArray<const Object*>(5);
10101 10410
10102 for (Value* use = alloc->input_use_list(); 10411 for (Value* use = alloc->input_use_list();
10103 use != NULL; 10412 use != NULL;
10104 use = use->next_use()) { 10413 use = use->next_use()) {
10105 StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField(); 10414 StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField();
10106 if (!store->field().IsNull()) { 10415 if ((store != NULL) && (store->instance()->definition() == alloc)) {
10107 AddSlot(slots, store->field()); 10416 if (!store->field().IsNull()) {
10108 } else { 10417 AddSlot(slots, store->field());
10109 AddSlot(slots, Smi::ZoneHandle(I, Smi::New(store->offset_in_bytes()))); 10418 } else {
10419 AddSlot(slots, Smi::ZoneHandle(I, Smi::New(store->offset_in_bytes())));
10420 }
10110 } 10421 }
10111 } 10422 }
10112 10423
10113 if (alloc->ArgumentCount() > 0) { 10424 if (alloc->ArgumentCount() > 0) {
10114 ASSERT(alloc->ArgumentCount() == 1); 10425 ASSERT(alloc->ArgumentCount() == 1);
10115 intptr_t type_args_offset = alloc->cls().type_arguments_field_offset(); 10426 intptr_t type_args_offset = alloc->cls().type_arguments_field_offset();
10116 AddSlot(slots, Smi::ZoneHandle(I, Smi::New(type_args_offset))); 10427 AddSlot(slots, Smi::ZoneHandle(I, Smi::New(type_args_offset)));
10117 } 10428 }
10118 10429
10119 // Collect all instructions that mention this object in the environment. 10430 // Collect all instructions that mention this object in the environment.
10120 GrowableArray<Instruction*> exits(10); 10431 exits_collector_.CollectTransitively(alloc);
10121 for (Value* use = alloc->env_use_list();
10122 use != NULL;
10123 use = use->next_use()) {
10124 AddInstruction(&exits, use->instruction());
10125 }
10126 10432
10127 // Insert materializations at environment uses. 10433 // Insert materializations at environment uses.
10128 for (intptr_t i = 0; i < exits.length(); i++) { 10434 for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) {
10129 CreateMaterializationAt(exits[i], alloc, alloc->cls(), *slots); 10435 CreateMaterializationAt(
10436 exits_collector_.exits()[i], alloc, alloc->cls(), *slots);
10130 } 10437 }
10131 } 10438 }
10132 10439
10133 10440
10134 } // namespace dart 10441 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_optimizer.h ('k') | runtime/vm/il_printer.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698