OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. |
| 4 |
| 5 #include "vm/redundancy_elimination.h" |
| 6 |
| 7 #include "vm/bit_vector.h" |
| 8 #include "vm/flow_graph.h" |
| 9 #include "vm/hash_map.h" |
| 10 #include "vm/il_printer.h" |
| 11 #include "vm/intermediate_language.h" |
| 12 #include "vm/stack_frame.h" |
| 13 |
| 14 namespace dart { |
| 15 |
| 16 |
| 17 DEFINE_FLAG(bool, dead_store_elimination, true, "Eliminate dead stores"); |
| 18 DEFINE_FLAG(bool, load_cse, true, "Use redundant load elimination."); |
| 19 DEFINE_FLAG(bool, trace_load_optimization, false, |
| 20 "Print live sets for load optimization pass."); |
| 21 |
| 22 DECLARE_FLAG(bool, fields_may_be_reset); |
| 23 DECLARE_FLAG(bool, precompilation); |
| 24 DECLARE_FLAG(bool, trace_optimization); |
| 25 |
| 26 // Quick access to the current zone. |
| 27 #define Z (zone()) |
| 28 |
| 29 |
| 30 class CSEInstructionMap : public ValueObject { |
| 31 public: |
| 32 // Right now CSE and LICM track a single effect: possible externalization of |
| 33 // strings. |
| 34 // Other effects like modifications of fields are tracked in a separate load |
| 35 // forwarding pass via Alias structure. |
| 36 COMPILE_ASSERT(EffectSet::kLastEffect == 1); |
| 37 |
| 38 CSEInstructionMap() : independent_(), dependent_() { } |
| 39 explicit CSEInstructionMap(const CSEInstructionMap& other) |
| 40 : ValueObject(), |
| 41 independent_(other.independent_), |
| 42 dependent_(other.dependent_) { |
| 43 } |
| 44 |
| 45 void RemoveAffected(EffectSet effects) { |
| 46 if (!effects.IsNone()) { |
| 47 dependent_.Clear(); |
| 48 } |
| 49 } |
| 50 |
| 51 Instruction* Lookup(Instruction* other) const { |
| 52 return GetMapFor(other)->Lookup(other); |
| 53 } |
| 54 |
| 55 void Insert(Instruction* instr) { |
| 56 return GetMapFor(instr)->Insert(instr); |
| 57 } |
| 58 |
| 59 private: |
| 60 typedef DirectChainedHashMap<PointerKeyValueTrait<Instruction> > Map; |
| 61 |
| 62 Map* GetMapFor(Instruction* instr) { |
| 63 return instr->Dependencies().IsNone() ? &independent_ : &dependent_; |
| 64 } |
| 65 |
| 66 const Map* GetMapFor(Instruction* instr) const { |
| 67 return instr->Dependencies().IsNone() ? &independent_ : &dependent_; |
| 68 } |
| 69 |
| 70 // All computations that are not affected by any side-effect. |
| 71 // Majority of computations are not affected by anything and will be in |
| 72 // this map. |
| 73 Map independent_; |
| 74 |
| 75 // All computations that are affected by side effect. |
| 76 Map dependent_; |
| 77 }; |
| 78 |
| 79 |
| 80 // Place describes an abstract location (e.g. field) that IR can load |
| 81 // from or store to. |
| 82 // |
| 83 // Places are also used to describe wild-card locations also known as aliases, |
| 84 // that essentially represent sets of places that alias each other. Places A |
| 85 // and B are said to alias each other if store into A can affect load from B. |
| 86 // |
| 87 // We distinguish the following aliases: |
| 88 // |
| 89 // - for fields |
| 90 // - *.f, *.@offs - field inside some object; |
| 91 // - X.f, X.@offs - field inside an allocated object X; |
| 92 // - for indexed accesses |
| 93 // - *[*] - non-constant index inside some object; |
| 94 // - *[C] - constant index inside some object; |
| 95 // - X[*] - non-constant index inside an allocated object X; |
| 96 // - X[C] - constant index inside an allocated object X. |
| 97 // |
| 98 // Constant indexed places are divided into two subcategories: |
| 99 // |
| 100 // - Access to homogeneous array-like objects: Array, ImmutableArray, |
| 101 // OneByteString, TwoByteString. These objects can only be accessed |
| 102 // on element by element basis with all elements having the same size. |
| 103 // This means X[C] aliases X[K] if and only if C === K. |
| 104 // - TypedData accesses. TypedData allow to read one of the primitive |
| 105 // data types at the given byte offset. When TypedData is accessed through |
| 106 // index operator on a typed array or a typed array view it is guaranteed |
| 107 // that the byte offset is always aligned by the element size. We write |
| 108 // these accesses as X[C|S], where C is constant byte offset and S is size |
| 109 // of the data type. Obviously X[C|S] and X[K|U] alias if and only if either |
| 110 // C = RoundDown(K, S) or K = RoundDown(C, U). |
| 111 // Note that not all accesses to typed data are aligned: e.g. ByteData |
| 112 // allows unanaligned access through it's get*/set* methods. |
| 113 // Check in Place::SetIndex ensures that we never create a place X[C|S] |
| 114 // such that C is not aligned by S. |
| 115 // |
| 116 // Separating allocations from other objects improves precision of the |
| 117 // load forwarding pass because of the following two properties: |
| 118 // |
| 119 // - if X can be proven to have no aliases itself (i.e. there is no other SSA |
| 120 // variable that points to X) then no place inside X can be aliased with any |
| 121 // wildcard dependent place (*.f, *.@offs, *[*], *[C]); |
| 122 // - given allocations X and Y no place inside X can be aliased with any place |
| 123 // inside Y even if any of them or both escape. |
| 124 // |
| 125 // It important to realize that single place can belong to multiple aliases. |
| 126 // For example place X.f with aliased allocation X belongs both to X.f and *.f |
| 127 // aliases. Likewise X[C] with non-aliased allocation X belongs to X[C] and X[*] |
| 128 // aliases. |
| 129 // |
| 130 class Place : public ValueObject { |
| 131 public: |
| 132 enum Kind { |
| 133 kNone, |
| 134 |
| 135 // Field location. For instance fields is represented as a pair of a Field |
| 136 // object and an instance (SSA definition) that is being accessed. |
| 137 // For static fields instance is NULL. |
| 138 kField, |
| 139 |
| 140 // VMField location. Represented as a pair of an instance (SSA definition) |
| 141 // being accessed and offset to the field. |
| 142 kVMField, |
| 143 |
| 144 // Indexed location with a non-constant index. |
| 145 kIndexed, |
| 146 |
| 147 // Indexed location with a constant index. |
| 148 kConstantIndexed, |
| 149 }; |
| 150 |
| 151 // Size of the element accessed by constant index. Size is only important |
| 152 // for TypedData because those accesses can alias even when constant indexes |
| 153 // are not the same: X[0|4] aliases X[0|2] and X[2|2]. |
| 154 enum ElementSize { |
| 155 // If indexed access is not a TypedData access then element size is not |
| 156 // important because there is only a single possible access size depending |
| 157 // on the receiver - X[C] aliases X[K] if and only if C == K. |
| 158 // This is the size set for Array, ImmutableArray, OneByteString and |
| 159 // TwoByteString accesses. |
| 160 kNoSize, |
| 161 |
| 162 // 1 byte (Int8List, Uint8List, Uint8ClampedList). |
| 163 kInt8, |
| 164 |
| 165 // 2 bytes (Int16List, Uint16List). |
| 166 kInt16, |
| 167 |
| 168 // 4 bytes (Int32List, Uint32List, Float32List). |
| 169 kInt32, |
| 170 |
| 171 // 8 bytes (Int64List, Uint64List, Float64List). |
| 172 kInt64, |
| 173 |
| 174 // 16 bytes (Int32x4List, Float32x4List, Float64x2List). |
| 175 kInt128, |
| 176 |
| 177 kLargestElementSize = kInt128, |
| 178 }; |
| 179 |
| 180 Place(const Place& other) |
| 181 : ValueObject(), |
| 182 flags_(other.flags_), |
| 183 instance_(other.instance_), |
| 184 raw_selector_(other.raw_selector_), |
| 185 id_(other.id_) { |
| 186 } |
| 187 |
| 188 // Construct a place from instruction if instruction accesses any place. |
| 189 // Otherwise constructs kNone place. |
| 190 Place(Instruction* instr, bool* is_load, bool* is_store) |
| 191 : flags_(0), |
| 192 instance_(NULL), |
| 193 raw_selector_(0), |
| 194 id_(0) { |
| 195 switch (instr->tag()) { |
| 196 case Instruction::kLoadField: { |
| 197 LoadFieldInstr* load_field = instr->AsLoadField(); |
| 198 set_representation(load_field->representation()); |
| 199 instance_ = load_field->instance()->definition()->OriginalDefinition(); |
| 200 if (load_field->field() != NULL) { |
| 201 set_kind(kField); |
| 202 field_ = load_field->field(); |
| 203 } else { |
| 204 set_kind(kVMField); |
| 205 offset_in_bytes_ = load_field->offset_in_bytes(); |
| 206 } |
| 207 *is_load = true; |
| 208 break; |
| 209 } |
| 210 |
| 211 case Instruction::kStoreInstanceField: { |
| 212 StoreInstanceFieldInstr* store = |
| 213 instr->AsStoreInstanceField(); |
| 214 set_representation(store->RequiredInputRepresentation( |
| 215 StoreInstanceFieldInstr::kValuePos)); |
| 216 instance_ = store->instance()->definition()->OriginalDefinition(); |
| 217 if (!store->field().IsNull()) { |
| 218 set_kind(kField); |
| 219 field_ = &store->field(); |
| 220 } else { |
| 221 set_kind(kVMField); |
| 222 offset_in_bytes_ = store->offset_in_bytes(); |
| 223 } |
| 224 *is_store = true; |
| 225 break; |
| 226 } |
| 227 |
| 228 case Instruction::kLoadStaticField: |
| 229 set_kind(kField); |
| 230 set_representation(instr->AsLoadStaticField()->representation()); |
| 231 field_ = &instr->AsLoadStaticField()->StaticField(); |
| 232 *is_load = true; |
| 233 break; |
| 234 |
| 235 case Instruction::kStoreStaticField: |
| 236 set_kind(kField); |
| 237 set_representation(instr->AsStoreStaticField()-> |
| 238 RequiredInputRepresentation(StoreStaticFieldInstr::kValuePos)); |
| 239 field_ = &instr->AsStoreStaticField()->field(); |
| 240 *is_store = true; |
| 241 break; |
| 242 |
| 243 case Instruction::kLoadIndexed: { |
| 244 LoadIndexedInstr* load_indexed = instr->AsLoadIndexed(); |
| 245 set_representation(load_indexed->representation()); |
| 246 instance_ = load_indexed->array()->definition()->OriginalDefinition(); |
| 247 SetIndex(load_indexed->index()->definition(), |
| 248 load_indexed->index_scale(), |
| 249 load_indexed->class_id()); |
| 250 *is_load = true; |
| 251 break; |
| 252 } |
| 253 |
| 254 case Instruction::kStoreIndexed: { |
| 255 StoreIndexedInstr* store_indexed = instr->AsStoreIndexed(); |
| 256 set_representation(store_indexed-> |
| 257 RequiredInputRepresentation(StoreIndexedInstr::kValuePos)); |
| 258 instance_ = store_indexed->array()->definition()->OriginalDefinition(); |
| 259 SetIndex(store_indexed->index()->definition(), |
| 260 store_indexed->index_scale(), |
| 261 store_indexed->class_id()); |
| 262 *is_store = true; |
| 263 break; |
| 264 } |
| 265 |
| 266 default: |
| 267 break; |
| 268 } |
| 269 } |
| 270 |
| 271 // Create object representing *[*] alias. |
| 272 static Place* CreateAnyInstanceAnyIndexAlias(Zone* zone, |
| 273 intptr_t id) { |
| 274 return Wrap(zone, Place( |
| 275 EncodeFlags(kIndexed, kNoRepresentation, kNoSize), |
| 276 NULL, |
| 277 0), id); |
| 278 } |
| 279 |
| 280 // Return least generic alias for this place. Given that aliases are |
| 281 // essentially sets of places we define least generic alias as a smallest |
| 282 // alias that contains this place. |
| 283 // |
| 284 // We obtain such alias by a simple transformation: |
| 285 // |
| 286 // - for places that depend on an instance X.f, X.@offs, X[i], X[C] |
| 287 // we drop X if X is not an allocation because in this case X does not |
| 288 // posess an identity obtaining aliases *.f, *.@offs, *[i] and *[C] |
| 289 // respectively; |
| 290 // - for non-constant indexed places X[i] we drop information about the |
| 291 // index obtaining alias X[*]. |
| 292 // - we drop information about representation, but keep element size |
| 293 // if any. |
| 294 // |
| 295 Place ToAlias() const { |
| 296 return Place( |
| 297 RepresentationBits::update(kNoRepresentation, flags_), |
| 298 (DependsOnInstance() && IsAllocation(instance())) ? instance() : NULL, |
| 299 (kind() == kIndexed) ? 0 : raw_selector_); |
| 300 } |
| 301 |
| 302 bool DependsOnInstance() const { |
| 303 switch (kind()) { |
| 304 case kField: |
| 305 case kVMField: |
| 306 case kIndexed: |
| 307 case kConstantIndexed: |
| 308 return true; |
| 309 |
| 310 case kNone: |
| 311 return false; |
| 312 } |
| 313 |
| 314 UNREACHABLE(); |
| 315 return false; |
| 316 } |
| 317 |
| 318 // Given instance dependent alias X.f, X.@offs, X[C], X[*] return |
| 319 // wild-card dependent alias *.f, *.@offs, *[C] or *[*] respectively. |
| 320 Place CopyWithoutInstance() const { |
| 321 ASSERT(DependsOnInstance()); |
| 322 return Place(flags_, NULL, raw_selector_); |
| 323 } |
| 324 |
| 325 // Given alias X[C] or *[C] return X[*] and *[*] respectively. |
| 326 Place CopyWithoutIndex() const { |
| 327 ASSERT(kind() == kConstantIndexed); |
| 328 return Place(EncodeFlags(kIndexed, kNoRepresentation, kNoSize), |
| 329 instance_, |
| 330 0); |
| 331 } |
| 332 |
| 333 // Given alias X[ByteOffs|S] and a larger element size S', return |
| 334 // alias X[RoundDown(ByteOffs, S')|S'] - this is the byte offset of a larger |
| 335 // typed array element that contains this typed array element. |
| 336 // In other words this method computes the only possible place with the given |
| 337 // size that can alias this place (due to alignment restrictions). |
| 338 // For example for X[9|kInt8] and target size kInt32 we would return |
| 339 // X[8|kInt32]. |
| 340 Place ToLargerElement(ElementSize to) const { |
| 341 ASSERT(kind() == kConstantIndexed); |
| 342 ASSERT(element_size() != kNoSize); |
| 343 ASSERT(element_size() < to); |
| 344 return Place(ElementSizeBits::update(to, flags_), |
| 345 instance_, |
| 346 RoundByteOffset(to, index_constant_)); |
| 347 } |
| 348 |
| 349 |
| 350 intptr_t id() const { return id_; } |
| 351 |
| 352 Kind kind() const { return KindBits::decode(flags_); } |
| 353 |
| 354 Representation representation() const { |
| 355 return RepresentationBits::decode(flags_); |
| 356 } |
| 357 |
| 358 Definition* instance() const { |
| 359 ASSERT(DependsOnInstance()); |
| 360 return instance_; |
| 361 } |
| 362 |
| 363 void set_instance(Definition* def) { |
| 364 ASSERT(DependsOnInstance()); |
| 365 instance_ = def->OriginalDefinition(); |
| 366 } |
| 367 |
| 368 const Field& field() const { |
| 369 ASSERT(kind() == kField); |
| 370 return *field_; |
| 371 } |
| 372 |
| 373 intptr_t offset_in_bytes() const { |
| 374 ASSERT(kind() == kVMField); |
| 375 return offset_in_bytes_; |
| 376 } |
| 377 |
| 378 Definition* index() const { |
| 379 ASSERT(kind() == kIndexed); |
| 380 return index_; |
| 381 } |
| 382 |
| 383 ElementSize element_size() const { |
| 384 return ElementSizeBits::decode(flags_); |
| 385 } |
| 386 |
| 387 intptr_t index_constant() const { |
| 388 ASSERT(kind() == kConstantIndexed); |
| 389 return index_constant_; |
| 390 } |
| 391 |
| 392 static const char* DefinitionName(Definition* def) { |
| 393 if (def == NULL) { |
| 394 return "*"; |
| 395 } else { |
| 396 return Thread::Current()->zone()->PrintToString( |
| 397 "v%" Pd, def->ssa_temp_index()); |
| 398 } |
| 399 } |
| 400 |
| 401 const char* ToCString() const { |
| 402 switch (kind()) { |
| 403 case kNone: |
| 404 return "<none>"; |
| 405 |
| 406 case kField: { |
| 407 const char* field_name = String::Handle(field().name()).ToCString(); |
| 408 if (field().is_static()) { |
| 409 return Thread::Current()->zone()->PrintToString( |
| 410 "<%s>", field_name); |
| 411 } else { |
| 412 return Thread::Current()->zone()->PrintToString( |
| 413 "<%s.%s>", DefinitionName(instance()), field_name); |
| 414 } |
| 415 } |
| 416 |
| 417 case kVMField: |
| 418 return Thread::Current()->zone()->PrintToString( |
| 419 "<%s.@%" Pd ">", |
| 420 DefinitionName(instance()), |
| 421 offset_in_bytes()); |
| 422 |
| 423 case kIndexed: |
| 424 return Thread::Current()->zone()->PrintToString( |
| 425 "<%s[%s]>", |
| 426 DefinitionName(instance()), |
| 427 DefinitionName(index())); |
| 428 |
| 429 case kConstantIndexed: |
| 430 if (element_size() == kNoSize) { |
| 431 return Thread::Current()->zone()->PrintToString( |
| 432 "<%s[%" Pd "]>", |
| 433 DefinitionName(instance()), |
| 434 index_constant()); |
| 435 } else { |
| 436 return Thread::Current()->zone()->PrintToString( |
| 437 "<%s[%" Pd "|%" Pd "]>", |
| 438 DefinitionName(instance()), |
| 439 index_constant(), |
| 440 ElementSizeMultiplier(element_size())); |
| 441 } |
| 442 } |
| 443 UNREACHABLE(); |
| 444 return "<?>"; |
| 445 } |
| 446 |
| 447 // Fields that are considered immutable by load optimization. |
| 448 // Handle static finals as non-final with precompilation because |
| 449 // they may be reset to uninitialized after compilation. |
| 450 bool IsImmutableField() const { |
| 451 return (kind() == kField) |
| 452 && field().is_final() |
| 453 && (!field().is_static() || !FLAG_fields_may_be_reset); |
| 454 } |
| 455 |
| 456 intptr_t Hashcode() const { |
| 457 return (flags_ * 63 + reinterpret_cast<intptr_t>(instance_)) * 31 + |
| 458 FieldHashcode(); |
| 459 } |
| 460 |
| 461 bool Equals(const Place* other) const { |
| 462 return (flags_ == other->flags_) && |
| 463 (instance_ == other->instance_) && |
| 464 SameField(other); |
| 465 } |
| 466 |
| 467 // Create a zone allocated copy of this place and assign given id to it. |
| 468 static Place* Wrap(Zone* zone, const Place& place, intptr_t id); |
| 469 |
| 470 static bool IsAllocation(Definition* defn) { |
| 471 return (defn != NULL) && |
| 472 (defn->IsAllocateObject() || |
| 473 defn->IsCreateArray() || |
| 474 defn->IsAllocateUninitializedContext() || |
| 475 (defn->IsStaticCall() && |
| 476 defn->AsStaticCall()->IsRecognizedFactory())); |
| 477 } |
| 478 |
| 479 private: |
| 480 Place(uword flags, Definition* instance, intptr_t selector) |
| 481 : flags_(flags), |
| 482 instance_(instance), |
| 483 raw_selector_(selector), |
| 484 id_(0) { |
| 485 } |
| 486 |
| 487 bool SameField(const Place* other) const { |
| 488 return (kind() == kField) ? (field().raw() == other->field().raw()) |
| 489 : (offset_in_bytes_ == other->offset_in_bytes_); |
| 490 } |
| 491 |
| 492 intptr_t FieldHashcode() const { |
| 493 return (kind() == kField) ? reinterpret_cast<intptr_t>(field().raw()) |
| 494 : offset_in_bytes_; |
| 495 } |
| 496 |
| 497 void set_representation(Representation rep) { |
| 498 flags_ = RepresentationBits::update(rep, flags_); |
| 499 } |
| 500 |
| 501 void set_kind(Kind kind) { |
| 502 flags_ = KindBits::update(kind, flags_); |
| 503 } |
| 504 |
| 505 void set_element_size(ElementSize scale) { |
| 506 flags_ = ElementSizeBits::update(scale, flags_); |
| 507 } |
| 508 |
| 509 void SetIndex(Definition* index, intptr_t scale, intptr_t class_id) { |
| 510 ConstantInstr* index_constant = index->AsConstant(); |
| 511 if ((index_constant != NULL) && index_constant->value().IsSmi()) { |
| 512 const intptr_t index_value = Smi::Cast(index_constant->value()).Value(); |
| 513 const ElementSize size = ElementSizeFor(class_id); |
| 514 const bool is_typed_data = (size != kNoSize); |
| 515 |
| 516 // If we are writing into the typed data scale the index to |
| 517 // get byte offset. Otherwise ignore the scale. |
| 518 if (!is_typed_data) { |
| 519 scale = 1; |
| 520 } |
| 521 |
| 522 // Guard against potential multiplication overflow and negative indices. |
| 523 if ((0 <= index_value) && (index_value < (kMaxInt32 / scale))) { |
| 524 const intptr_t scaled_index = index_value * scale; |
| 525 |
| 526 // Guard against unaligned byte offsets. |
| 527 if (!is_typed_data || |
| 528 Utils::IsAligned(scaled_index, ElementSizeMultiplier(size))) { |
| 529 set_kind(kConstantIndexed); |
| 530 set_element_size(size); |
| 531 index_constant_ = scaled_index; |
| 532 return; |
| 533 } |
| 534 } |
| 535 |
| 536 // Fallthrough: create generic _[*] place. |
| 537 } |
| 538 |
| 539 set_kind(kIndexed); |
| 540 index_ = index; |
| 541 } |
| 542 |
| 543 static uword EncodeFlags(Kind kind, Representation rep, ElementSize scale) { |
| 544 ASSERT((kind == kConstantIndexed) || (scale == kNoSize)); |
| 545 return KindBits::encode(kind) | |
| 546 RepresentationBits::encode(rep) | |
| 547 ElementSizeBits::encode(scale); |
| 548 } |
| 549 |
| 550 static ElementSize ElementSizeFor(intptr_t class_id) { |
| 551 switch (class_id) { |
| 552 case kArrayCid: |
| 553 case kImmutableArrayCid: |
| 554 case kOneByteStringCid: |
| 555 case kTwoByteStringCid: |
| 556 // Object arrays and strings do not allow accessing them through |
| 557 // different types. No need to attach scale. |
| 558 return kNoSize; |
| 559 |
| 560 case kTypedDataInt8ArrayCid: |
| 561 case kTypedDataUint8ArrayCid: |
| 562 case kTypedDataUint8ClampedArrayCid: |
| 563 case kExternalTypedDataUint8ArrayCid: |
| 564 case kExternalTypedDataUint8ClampedArrayCid: |
| 565 return kInt8; |
| 566 |
| 567 case kTypedDataInt16ArrayCid: |
| 568 case kTypedDataUint16ArrayCid: |
| 569 return kInt16; |
| 570 |
| 571 case kTypedDataInt32ArrayCid: |
| 572 case kTypedDataUint32ArrayCid: |
| 573 case kTypedDataFloat32ArrayCid: |
| 574 return kInt32; |
| 575 |
| 576 case kTypedDataInt64ArrayCid: |
| 577 case kTypedDataUint64ArrayCid: |
| 578 case kTypedDataFloat64ArrayCid: |
| 579 return kInt64; |
| 580 |
| 581 case kTypedDataInt32x4ArrayCid: |
| 582 case kTypedDataFloat32x4ArrayCid: |
| 583 case kTypedDataFloat64x2ArrayCid: |
| 584 return kInt128; |
| 585 |
| 586 default: |
| 587 UNREACHABLE(); |
| 588 return kNoSize; |
| 589 } |
| 590 } |
| 591 |
| 592 static intptr_t ElementSizeMultiplier(ElementSize size) { |
| 593 return 1 << (static_cast<intptr_t>(size) - static_cast<intptr_t>(kInt8)); |
| 594 } |
| 595 |
| 596 static intptr_t RoundByteOffset(ElementSize size, intptr_t offset) { |
| 597 return offset & ~(ElementSizeMultiplier(size) - 1); |
| 598 } |
| 599 |
| 600 class KindBits : public BitField<uword, Kind, 0, 3> {}; |
| 601 class RepresentationBits : |
| 602 public BitField<uword, Representation, KindBits::kNextBit, 11> {}; |
| 603 class ElementSizeBits : |
| 604 public BitField<uword, ElementSize, RepresentationBits::kNextBit, 3> {}; |
| 605 |
| 606 uword flags_; |
| 607 Definition* instance_; |
| 608 union { |
| 609 intptr_t raw_selector_; |
| 610 const Field* field_; |
| 611 intptr_t offset_in_bytes_; |
| 612 intptr_t index_constant_; |
| 613 Definition* index_; |
| 614 }; |
| 615 |
| 616 intptr_t id_; |
| 617 }; |
| 618 |
| 619 |
| 620 class ZonePlace : public ZoneAllocated { |
| 621 public: |
| 622 explicit ZonePlace(const Place& place) : place_(place) { } |
| 623 |
| 624 Place* place() { return &place_; } |
| 625 |
| 626 private: |
| 627 Place place_; |
| 628 }; |
| 629 |
| 630 |
| 631 Place* Place::Wrap(Zone* zone, const Place& place, intptr_t id) { |
| 632 Place* wrapped = (new(zone) ZonePlace(place))->place(); |
| 633 wrapped->id_ = id; |
| 634 return wrapped; |
| 635 } |
| 636 |
| 637 |
| 638 // Correspondence between places connected through outgoing phi moves on the |
| 639 // edge that targets join. |
| 640 class PhiPlaceMoves : public ZoneAllocated { |
| 641 public: |
| 642 // Record a move from the place with id |from| to the place with id |to| at |
| 643 // the given block. |
| 644 void CreateOutgoingMove(Zone* zone, |
| 645 BlockEntryInstr* block, intptr_t from, intptr_t to) { |
| 646 const intptr_t block_num = block->preorder_number(); |
| 647 while (moves_.length() <= block_num) { |
| 648 moves_.Add(NULL); |
| 649 } |
| 650 |
| 651 if (moves_[block_num] == NULL) { |
| 652 moves_[block_num] = new(zone) ZoneGrowableArray<Move>(5); |
| 653 } |
| 654 |
| 655 moves_[block_num]->Add(Move(from, to)); |
| 656 } |
| 657 |
| 658 class Move { |
| 659 public: |
| 660 Move(intptr_t from, intptr_t to) : from_(from), to_(to) { } |
| 661 |
| 662 intptr_t from() const { return from_; } |
| 663 intptr_t to() const { return to_; } |
| 664 |
| 665 private: |
| 666 intptr_t from_; |
| 667 intptr_t to_; |
| 668 }; |
| 669 |
| 670 typedef const ZoneGrowableArray<Move>* MovesList; |
| 671 |
| 672 MovesList GetOutgoingMoves(BlockEntryInstr* block) const { |
| 673 const intptr_t block_num = block->preorder_number(); |
| 674 return (block_num < moves_.length()) ? |
| 675 moves_[block_num] : NULL; |
| 676 } |
| 677 |
| 678 private: |
| 679 GrowableArray<ZoneGrowableArray<Move>* > moves_; |
| 680 }; |
| 681 |
| 682 |
| 683 // A map from aliases to a set of places sharing the alias. Additionally |
| 684 // carries a set of places that can be aliased by side-effects, essentially |
| 685 // those that are affected by calls. |
| 686 class AliasedSet : public ZoneAllocated { |
| 687 public: |
| 688 AliasedSet(Zone* zone, |
| 689 DirectChainedHashMap<PointerKeyValueTrait<Place> >* places_map, |
| 690 ZoneGrowableArray<Place*>* places, |
| 691 PhiPlaceMoves* phi_moves) |
| 692 : zone_(zone), |
| 693 places_map_(places_map), |
| 694 places_(*places), |
| 695 phi_moves_(phi_moves), |
| 696 aliases_(5), |
| 697 aliases_map_(), |
| 698 typed_data_access_sizes_(), |
| 699 representatives_(), |
| 700 killed_(), |
| 701 aliased_by_effects_(new(zone) BitVector(zone, places->length())) { |
| 702 InsertAlias(Place::CreateAnyInstanceAnyIndexAlias(zone_, |
| 703 kAnyInstanceAnyIndexAlias)); |
| 704 for (intptr_t i = 0; i < places_.length(); i++) { |
| 705 AddRepresentative(places_[i]); |
| 706 } |
| 707 ComputeKillSets(); |
| 708 } |
| 709 |
| 710 intptr_t LookupAliasId(const Place& alias) { |
| 711 const Place* result = aliases_map_.Lookup(&alias); |
| 712 return (result != NULL) ? result->id() : static_cast<intptr_t>(kNoAlias); |
| 713 } |
| 714 |
| 715 BitVector* GetKilledSet(intptr_t alias) { |
| 716 return (alias < killed_.length()) ? killed_[alias] : NULL; |
| 717 } |
| 718 |
| 719 intptr_t max_place_id() const { return places().length(); } |
| 720 bool IsEmpty() const { return max_place_id() == 0; } |
| 721 |
| 722 BitVector* aliased_by_effects() const { return aliased_by_effects_; } |
| 723 |
| 724 const ZoneGrowableArray<Place*>& places() const { |
| 725 return places_; |
| 726 } |
| 727 |
| 728 Place* LookupCanonical(Place* place) const { |
| 729 return places_map_->Lookup(place); |
| 730 } |
| 731 |
| 732 void PrintSet(BitVector* set) { |
| 733 bool comma = false; |
| 734 for (BitVector::Iterator it(set); |
| 735 !it.Done(); |
| 736 it.Advance()) { |
| 737 if (comma) { |
| 738 THR_Print(", "); |
| 739 } |
| 740 THR_Print("%s", places_[it.Current()]->ToCString()); |
| 741 comma = true; |
| 742 } |
| 743 } |
| 744 |
| 745 const PhiPlaceMoves* phi_moves() const { return phi_moves_; } |
| 746 |
| 747 void RollbackAliasedIdentites() { |
| 748 for (intptr_t i = 0; i < identity_rollback_.length(); ++i) { |
| 749 identity_rollback_[i]->SetIdentity(AliasIdentity::Unknown()); |
| 750 } |
| 751 } |
| 752 |
| 753 // Returns false if the result of an allocation instruction can't be aliased |
| 754 // by another SSA variable and true otherwise. |
| 755 bool CanBeAliased(Definition* alloc) { |
| 756 if (!Place::IsAllocation(alloc)) { |
| 757 return true; |
| 758 } |
| 759 |
| 760 if (alloc->Identity().IsUnknown()) { |
| 761 ComputeAliasing(alloc); |
| 762 } |
| 763 |
| 764 return !alloc->Identity().IsNotAliased(); |
| 765 } |
| 766 |
| 767 enum { |
| 768 kNoAlias = 0 |
| 769 }; |
| 770 |
| 771 private: |
| 772 enum { |
| 773 // Artificial alias that is used to collect all representatives of the |
| 774 // *[C], X[C] aliases for arbitrary C. |
| 775 kAnyConstantIndexedAlias = 1, |
| 776 |
| 777 // Artificial alias that is used to collect all representatives of |
| 778 // *[C] alias for arbitrary C. |
| 779 kUnknownInstanceConstantIndexedAlias = 2, |
| 780 |
| 781 // Artificial alias that is used to collect all representatives of |
| 782 // X[*] alias for all X. |
| 783 kAnyAllocationIndexedAlias = 3, |
| 784 |
| 785 // *[*] alias. |
| 786 kAnyInstanceAnyIndexAlias = 4 |
| 787 }; |
| 788 |
| 789 // Compute least generic alias for the place and assign alias id to it. |
| 790 void AddRepresentative(Place* place) { |
| 791 if (!place->IsImmutableField()) { |
| 792 const Place* alias = CanonicalizeAlias(place->ToAlias()); |
| 793 EnsureSet(&representatives_, alias->id())->Add(place->id()); |
| 794 |
| 795 // Update cumulative representative sets that are used during |
| 796 // killed sets computation. |
| 797 if (alias->kind() == Place::kConstantIndexed) { |
| 798 if (CanBeAliased(alias->instance())) { |
| 799 EnsureSet(&representatives_, kAnyConstantIndexedAlias)-> |
| 800 Add(place->id()); |
| 801 } |
| 802 |
| 803 if (alias->instance() == NULL) { |
| 804 EnsureSet(&representatives_, kUnknownInstanceConstantIndexedAlias)-> |
| 805 Add(place->id()); |
| 806 } |
| 807 |
| 808 // Collect all element sizes used to access TypedData arrays in |
| 809 // the function. This is used to skip sizes without representatives |
| 810 // when computing kill sets. |
| 811 if (alias->element_size() != Place::kNoSize) { |
| 812 typed_data_access_sizes_.Add(alias->element_size()); |
| 813 } |
| 814 } else if ((alias->kind() == Place::kIndexed) && |
| 815 CanBeAliased(place->instance())) { |
| 816 EnsureSet(&representatives_, kAnyAllocationIndexedAlias)-> |
| 817 Add(place->id()); |
| 818 } |
| 819 |
| 820 if (!IsIndependentFromEffects(place)) { |
| 821 aliased_by_effects_->Add(place->id()); |
| 822 } |
| 823 } |
| 824 } |
| 825 |
| 826 void ComputeKillSets() { |
| 827 for (intptr_t i = 0; i < aliases_.length(); ++i) { |
| 828 const Place* alias = aliases_[i]; |
| 829 // Add all representatives to the kill set. |
| 830 AddAllRepresentatives(alias->id(), alias->id()); |
| 831 ComputeKillSet(alias); |
| 832 } |
| 833 |
| 834 if (FLAG_trace_load_optimization) { |
| 835 THR_Print("Aliases KILL sets:\n"); |
| 836 for (intptr_t i = 0; i < aliases_.length(); ++i) { |
| 837 const Place* alias = aliases_[i]; |
| 838 BitVector* kill = GetKilledSet(alias->id()); |
| 839 |
| 840 THR_Print("%s: ", alias->ToCString()); |
| 841 if (kill != NULL) { |
| 842 PrintSet(kill); |
| 843 } |
| 844 THR_Print("\n"); |
| 845 } |
| 846 } |
| 847 } |
| 848 |
| 849 void InsertAlias(const Place* alias) { |
| 850 aliases_map_.Insert(alias); |
| 851 aliases_.Add(alias); |
| 852 } |
| 853 |
| 854 const Place* CanonicalizeAlias(const Place& alias) { |
| 855 const Place* canonical = aliases_map_.Lookup(&alias); |
| 856 if (canonical == NULL) { |
| 857 canonical = Place::Wrap(zone_, |
| 858 alias, |
| 859 kAnyInstanceAnyIndexAlias + aliases_.length()); |
| 860 InsertAlias(canonical); |
| 861 } |
| 862 ASSERT(aliases_map_.Lookup(&alias) == canonical); |
| 863 return canonical; |
| 864 } |
| 865 |
| 866 BitVector* GetRepresentativesSet(intptr_t alias) { |
| 867 return (alias < representatives_.length()) ? representatives_[alias] : NULL; |
| 868 } |
| 869 |
| 870 BitVector* EnsureSet(GrowableArray<BitVector*>* sets, |
| 871 intptr_t alias) { |
| 872 while (sets->length() <= alias) { |
| 873 sets->Add(NULL); |
| 874 } |
| 875 |
| 876 BitVector* set = (*sets)[alias]; |
| 877 if (set == NULL) { |
| 878 (*sets)[alias] = set = new(zone_) BitVector(zone_, max_place_id()); |
| 879 } |
| 880 return set; |
| 881 } |
| 882 |
| 883 void AddAllRepresentatives(const Place* to, intptr_t from) { |
| 884 AddAllRepresentatives(to->id(), from); |
| 885 } |
| 886 |
| 887 void AddAllRepresentatives(intptr_t to, intptr_t from) { |
| 888 BitVector* from_set = GetRepresentativesSet(from); |
| 889 if (from_set != NULL) { |
| 890 EnsureSet(&killed_, to)->AddAll(from_set); |
| 891 } |
| 892 } |
| 893 |
| 894 void CrossAlias(const Place* to, const Place& from) { |
| 895 const intptr_t from_id = LookupAliasId(from); |
| 896 if (from_id == kNoAlias) { |
| 897 return; |
| 898 } |
| 899 CrossAlias(to, from_id); |
| 900 } |
| 901 |
| 902 void CrossAlias(const Place* to, intptr_t from) { |
| 903 AddAllRepresentatives(to->id(), from); |
| 904 AddAllRepresentatives(from, to->id()); |
| 905 } |
| 906 |
| 907 // When computing kill sets we let less generic alias insert its |
| 908 // representatives into more generic alias'es kill set. For example |
| 909 // when visiting alias X[*] instead of searching for all aliases X[C] |
| 910 // and inserting their representatives into kill set for X[*] we update |
| 911 // kill set for X[*] each time we visit new X[C] for some C. |
| 912 // There is an exception however: if both aliases are parametric like *[C] |
| 913 // and X[*] which cross alias when X is an aliased allocation then we use |
| 914 // artificial aliases that contain all possible representatives for the given |
| 915 // alias for any value of the parameter to compute resulting kill set. |
| 916 void ComputeKillSet(const Place* alias) { |
| 917 switch (alias->kind()) { |
| 918 case Place::kIndexed: // Either *[*] or X[*] alias. |
| 919 if (alias->instance() == NULL) { |
| 920 // *[*] aliases with X[*], X[C], *[C]. |
| 921 AddAllRepresentatives(alias, kAnyConstantIndexedAlias); |
| 922 AddAllRepresentatives(alias, kAnyAllocationIndexedAlias); |
| 923 } else if (CanBeAliased(alias->instance())) { |
| 924 // X[*] aliases with X[C]. |
| 925 // If X can be aliased then X[*] also aliases with *[C], *[*]. |
| 926 CrossAlias(alias, kAnyInstanceAnyIndexAlias); |
| 927 AddAllRepresentatives(alias, kUnknownInstanceConstantIndexedAlias); |
| 928 } |
| 929 break; |
| 930 |
| 931 case Place::kConstantIndexed: // Either X[C] or *[C] alias. |
| 932 if (alias->element_size() != Place::kNoSize) { |
| 933 const bool has_aliased_instance = |
| 934 (alias->instance() != NULL) && CanBeAliased(alias->instance()); |
| 935 |
| 936 // If this is a TypedData access then X[C|S] aliases larger elements |
| 937 // covering this one X[RoundDown(C, S')|S'] for all S' > S and |
| 938 // all smaller elements being covered by this one X[C'|S'] for |
| 939 // some S' < S and all C' such that C = RoundDown(C', S). |
| 940 // In the loop below it's enough to only propagate aliasing to |
| 941 // larger aliases because propagation is symmetric: smaller aliases |
| 942 // (if there are any) would update kill set for this alias when they |
| 943 // are visited. |
| 944 for (intptr_t i = static_cast<intptr_t>(alias->element_size()) + 1; |
| 945 i <= Place::kLargestElementSize; |
| 946 i++) { |
| 947 // Skip element sizes that a guaranteed to have no representatives. |
| 948 if (!typed_data_access_sizes_.Contains(alias->element_size())) { |
| 949 continue; |
| 950 } |
| 951 |
| 952 // X[C|S] aliases with X[RoundDown(C, S')|S'] and likewise |
| 953 // *[C|S] aliases with *[RoundDown(C, S')|S']. |
| 954 const Place larger_alias = |
| 955 alias->ToLargerElement(static_cast<Place::ElementSize>(i)); |
| 956 CrossAlias(alias, larger_alias); |
| 957 if (has_aliased_instance) { |
| 958 // If X is an aliased instance then X[C|S] aliases |
| 959 // with *[RoundDown(C, S')|S']. |
| 960 CrossAlias(alias, larger_alias.CopyWithoutInstance()); |
| 961 } |
| 962 } |
| 963 } |
| 964 |
| 965 if (alias->instance() == NULL) { |
| 966 // *[C] aliases with X[C], X[*], *[*]. |
| 967 AddAllRepresentatives(alias, kAnyAllocationIndexedAlias); |
| 968 CrossAlias(alias, kAnyInstanceAnyIndexAlias); |
| 969 } else { |
| 970 // X[C] aliases with X[*]. |
| 971 // If X can be aliased then X[C] also aliases with *[C], *[*]. |
| 972 CrossAlias(alias, alias->CopyWithoutIndex()); |
| 973 if (CanBeAliased(alias->instance())) { |
| 974 CrossAlias(alias, alias->CopyWithoutInstance()); |
| 975 CrossAlias(alias, kAnyInstanceAnyIndexAlias); |
| 976 } |
| 977 } |
| 978 break; |
| 979 |
| 980 case Place::kField: |
| 981 case Place::kVMField: |
| 982 if (CanBeAliased(alias->instance())) { |
| 983 // X.f or X.@offs alias with *.f and *.@offs respectively. |
| 984 CrossAlias(alias, alias->CopyWithoutInstance()); |
| 985 } |
| 986 break; |
| 987 |
| 988 case Place::kNone: |
| 989 UNREACHABLE(); |
| 990 } |
| 991 } |
| 992 |
| 993 // Returns true if the given load is unaffected by external side-effects. |
| 994 // This essentially means that no stores to the same location can |
| 995 // occur in other functions. |
| 996 bool IsIndependentFromEffects(Place* place) { |
| 997 if (place->IsImmutableField()) { |
| 998 // Note that we can't use LoadField's is_immutable attribute here because |
| 999 // some VM-fields (those that have no corresponding Field object and |
| 1000 // accessed through offset alone) can share offset but have different |
| 1001 // immutability properties. |
| 1002 // One example is the length property of growable and fixed size list. If |
| 1003 // loads of these two properties occur in the same function for the same |
| 1004 // receiver then they will get the same expression number. However |
| 1005 // immutability of the length of fixed size list does not mean that |
| 1006 // growable list also has immutable property. Thus we will make a |
| 1007 // conservative assumption for the VM-properties. |
| 1008 // TODO(vegorov): disambiguate immutable and non-immutable VM-fields with |
| 1009 // the same offset e.g. through recognized kind. |
| 1010 return true; |
| 1011 } |
| 1012 |
| 1013 return ((place->kind() == Place::kField) || |
| 1014 (place->kind() == Place::kVMField)) && |
| 1015 !CanBeAliased(place->instance()); |
| 1016 } |
| 1017 |
| 1018 // Returns true if there are direct loads from the given place. |
| 1019 bool HasLoadsFromPlace(Definition* defn, const Place* place) { |
| 1020 ASSERT((place->kind() == Place::kField) || |
| 1021 (place->kind() == Place::kVMField)); |
| 1022 |
| 1023 for (Value* use = defn->input_use_list(); |
| 1024 use != NULL; |
| 1025 use = use->next_use()) { |
| 1026 Instruction* instr = use->instruction(); |
| 1027 if ((instr->IsRedefinition() || |
| 1028 instr->IsAssertAssignable()) && |
| 1029 HasLoadsFromPlace(instr->AsDefinition(), place)) { |
| 1030 return true; |
| 1031 } |
| 1032 bool is_load = false, is_store; |
| 1033 Place load_place(instr, &is_load, &is_store); |
| 1034 |
| 1035 if (is_load && load_place.Equals(place)) { |
| 1036 return true; |
| 1037 } |
| 1038 } |
| 1039 |
| 1040 return false; |
| 1041 } |
| 1042 |
| 1043 // Check if any use of the definition can create an alias. |
| 1044 // Can add more objects into aliasing_worklist_. |
| 1045 bool AnyUseCreatesAlias(Definition* defn) { |
| 1046 for (Value* use = defn->input_use_list(); |
| 1047 use != NULL; |
| 1048 use = use->next_use()) { |
| 1049 Instruction* instr = use->instruction(); |
| 1050 if (instr->IsPushArgument() || |
| 1051 (instr->IsStoreIndexed() |
| 1052 && (use->use_index() == StoreIndexedInstr::kValuePos)) || |
| 1053 instr->IsStoreStaticField() || |
| 1054 instr->IsPhi()) { |
| 1055 return true; |
| 1056 } else if ((instr->IsAssertAssignable() || instr->IsRedefinition()) && |
| 1057 AnyUseCreatesAlias(instr->AsDefinition())) { |
| 1058 return true; |
| 1059 } else if ((instr->IsStoreInstanceField() |
| 1060 && (use->use_index() != StoreInstanceFieldInstr::kInstancePos))) { |
| 1061 ASSERT(use->use_index() == StoreInstanceFieldInstr::kValuePos); |
| 1062 // If we store this value into an object that is not aliased itself |
| 1063 // and we never load again then the store does not create an alias. |
| 1064 StoreInstanceFieldInstr* store = instr->AsStoreInstanceField(); |
| 1065 Definition* instance = |
| 1066 store->instance()->definition()->OriginalDefinition(); |
| 1067 if (Place::IsAllocation(instance) && |
| 1068 !instance->Identity().IsAliased()) { |
| 1069 bool is_load, is_store; |
| 1070 Place store_place(instr, &is_load, &is_store); |
| 1071 |
| 1072 if (!HasLoadsFromPlace(instance, &store_place)) { |
| 1073 // No loads found that match this store. If it is yet unknown if |
| 1074 // the object is not aliased then optimistically assume this but |
| 1075 // add it to the worklist to check its uses transitively. |
| 1076 if (instance->Identity().IsUnknown()) { |
| 1077 instance->SetIdentity(AliasIdentity::NotAliased()); |
| 1078 aliasing_worklist_.Add(instance); |
| 1079 } |
| 1080 continue; |
| 1081 } |
| 1082 } |
| 1083 return true; |
| 1084 } |
| 1085 } |
| 1086 return false; |
| 1087 } |
| 1088 |
| 1089 // Mark any value stored into the given object as potentially aliased. |
| 1090 void MarkStoredValuesEscaping(Definition* defn) { |
| 1091 // Find all stores into this object. |
| 1092 for (Value* use = defn->input_use_list(); |
| 1093 use != NULL; |
| 1094 use = use->next_use()) { |
| 1095 if (use->instruction()->IsRedefinition() || |
| 1096 use->instruction()->IsAssertAssignable()) { |
| 1097 MarkStoredValuesEscaping(use->instruction()->AsDefinition()); |
| 1098 continue; |
| 1099 } |
| 1100 if ((use->use_index() == StoreInstanceFieldInstr::kInstancePos) && |
| 1101 use->instruction()->IsStoreInstanceField()) { |
| 1102 StoreInstanceFieldInstr* store = |
| 1103 use->instruction()->AsStoreInstanceField(); |
| 1104 Definition* value = store->value()->definition()->OriginalDefinition(); |
| 1105 if (value->Identity().IsNotAliased()) { |
| 1106 value->SetIdentity(AliasIdentity::Aliased()); |
| 1107 identity_rollback_.Add(value); |
| 1108 |
| 1109 // Add to worklist to propagate the mark transitively. |
| 1110 aliasing_worklist_.Add(value); |
| 1111 } |
| 1112 } |
| 1113 } |
| 1114 } |
| 1115 |
| 1116 // Determine if the given definition can't be aliased. |
| 1117 void ComputeAliasing(Definition* alloc) { |
| 1118 ASSERT(Place::IsAllocation(alloc)); |
| 1119 ASSERT(alloc->Identity().IsUnknown()); |
| 1120 ASSERT(aliasing_worklist_.is_empty()); |
| 1121 |
| 1122 alloc->SetIdentity(AliasIdentity::NotAliased()); |
| 1123 aliasing_worklist_.Add(alloc); |
| 1124 |
| 1125 while (!aliasing_worklist_.is_empty()) { |
| 1126 Definition* defn = aliasing_worklist_.RemoveLast(); |
| 1127 ASSERT(Place::IsAllocation(defn)); |
| 1128 // If the definition in the worklist was optimistically marked as |
| 1129 // not-aliased check that optimistic assumption still holds: check if |
| 1130 // any of its uses can create an alias. |
| 1131 if (!defn->Identity().IsAliased() && AnyUseCreatesAlias(defn)) { |
| 1132 defn->SetIdentity(AliasIdentity::Aliased()); |
| 1133 identity_rollback_.Add(defn); |
| 1134 } |
| 1135 |
| 1136 // If the allocation site is marked as aliased conservatively mark |
| 1137 // any values stored into the object aliased too. |
| 1138 if (defn->Identity().IsAliased()) { |
| 1139 MarkStoredValuesEscaping(defn); |
| 1140 } |
| 1141 } |
| 1142 } |
| 1143 |
| 1144 Zone* zone_; |
| 1145 |
| 1146 DirectChainedHashMap<PointerKeyValueTrait<Place> >* places_map_; |
| 1147 |
| 1148 const ZoneGrowableArray<Place*>& places_; |
| 1149 |
| 1150 const PhiPlaceMoves* phi_moves_; |
| 1151 |
| 1152 // A list of all seen aliases and a map that allows looking up canonical |
| 1153 // alias object. |
| 1154 GrowableArray<const Place*> aliases_; |
| 1155 DirectChainedHashMap<PointerKeyValueTrait<const Place> > aliases_map_; |
| 1156 |
| 1157 SmallSet<Place::ElementSize> typed_data_access_sizes_; |
| 1158 |
| 1159 // Maps alias id to set of ids of places representing the alias. |
| 1160 // Place represents an alias if this alias is least generic alias for |
| 1161 // the place. |
| 1162 // (see ToAlias for the definition of least generic alias). |
| 1163 GrowableArray<BitVector*> representatives_; |
| 1164 |
| 1165 // Maps alias id to set of ids of places aliased. |
| 1166 GrowableArray<BitVector*> killed_; |
| 1167 |
| 1168 // Set of ids of places that can be affected by side-effects other than |
| 1169 // explicit stores (i.e. through calls). |
| 1170 BitVector* aliased_by_effects_; |
| 1171 |
| 1172 // Worklist used during alias analysis. |
| 1173 GrowableArray<Definition*> aliasing_worklist_; |
| 1174 |
| 1175 // List of definitions that had their identity set to Aliased. At the end |
| 1176 // of load optimization their identity will be rolled back to Unknown to |
| 1177 // avoid treating them as Aliased at later stages without checking first |
| 1178 // as optimizations can potentially eliminate instructions leading to |
| 1179 // aliasing. |
| 1180 GrowableArray<Definition*> identity_rollback_; |
| 1181 }; |
| 1182 |
| 1183 |
| 1184 static Definition* GetStoredValue(Instruction* instr) { |
| 1185 if (instr->IsStoreIndexed()) { |
| 1186 return instr->AsStoreIndexed()->value()->definition(); |
| 1187 } |
| 1188 |
| 1189 StoreInstanceFieldInstr* store_instance_field = instr->AsStoreInstanceField(); |
| 1190 if (store_instance_field != NULL) { |
| 1191 return store_instance_field->value()->definition(); |
| 1192 } |
| 1193 |
| 1194 StoreStaticFieldInstr* store_static_field = instr->AsStoreStaticField(); |
| 1195 if (store_static_field != NULL) { |
| 1196 return store_static_field->value()->definition(); |
| 1197 } |
| 1198 |
| 1199 UNREACHABLE(); // Should only be called for supported store instructions. |
| 1200 return NULL; |
| 1201 } |
| 1202 |
| 1203 |
| 1204 static bool IsPhiDependentPlace(Place* place) { |
| 1205 return ((place->kind() == Place::kField) || |
| 1206 (place->kind() == Place::kVMField)) && |
| 1207 (place->instance() != NULL) && |
| 1208 place->instance()->IsPhi(); |
| 1209 } |
| 1210 |
| 1211 |
| 1212 // For each place that depends on a phi ensure that equivalent places |
| 1213 // corresponding to phi input are numbered and record outgoing phi moves |
| 1214 // for each block which establish correspondence between phi dependent place |
| 1215 // and phi input's place that is flowing in. |
| 1216 static PhiPlaceMoves* ComputePhiMoves( |
| 1217 DirectChainedHashMap<PointerKeyValueTrait<Place> >* map, |
| 1218 ZoneGrowableArray<Place*>* places) { |
| 1219 Thread* thread = Thread::Current(); |
| 1220 Zone* zone = thread->zone(); |
| 1221 PhiPlaceMoves* phi_moves = new(zone) PhiPlaceMoves(); |
| 1222 |
| 1223 for (intptr_t i = 0; i < places->length(); i++) { |
| 1224 Place* place = (*places)[i]; |
| 1225 |
| 1226 if (IsPhiDependentPlace(place)) { |
| 1227 PhiInstr* phi = place->instance()->AsPhi(); |
| 1228 BlockEntryInstr* block = phi->GetBlock(); |
| 1229 |
| 1230 if (FLAG_trace_optimization) { |
| 1231 THR_Print("phi dependent place %s\n", place->ToCString()); |
| 1232 } |
| 1233 |
| 1234 Place input_place(*place); |
| 1235 for (intptr_t j = 0; j < phi->InputCount(); j++) { |
| 1236 input_place.set_instance(phi->InputAt(j)->definition()); |
| 1237 |
| 1238 Place* result = map->Lookup(&input_place); |
| 1239 if (result == NULL) { |
| 1240 result = Place::Wrap(zone, input_place, places->length()); |
| 1241 map->Insert(result); |
| 1242 places->Add(result); |
| 1243 if (FLAG_trace_optimization) { |
| 1244 THR_Print(" adding place %s as %" Pd "\n", |
| 1245 result->ToCString(), |
| 1246 result->id()); |
| 1247 } |
| 1248 } |
| 1249 phi_moves->CreateOutgoingMove(zone, |
| 1250 block->PredecessorAt(j), |
| 1251 result->id(), |
| 1252 place->id()); |
| 1253 } |
| 1254 } |
| 1255 } |
| 1256 |
| 1257 return phi_moves; |
| 1258 } |
| 1259 |
| 1260 |
| 1261 enum CSEMode { |
| 1262 kOptimizeLoads, |
| 1263 kOptimizeStores |
| 1264 }; |
| 1265 |
| 1266 |
| 1267 static AliasedSet* NumberPlaces( |
| 1268 FlowGraph* graph, |
| 1269 DirectChainedHashMap<PointerKeyValueTrait<Place> >* map, |
| 1270 CSEMode mode) { |
| 1271 // Loads representing different expression ids will be collected and |
| 1272 // used to build per offset kill sets. |
| 1273 Zone* zone = graph->zone(); |
| 1274 ZoneGrowableArray<Place*>* places = |
| 1275 new(zone) ZoneGrowableArray<Place*>(10); |
| 1276 |
| 1277 bool has_loads = false; |
| 1278 bool has_stores = false; |
| 1279 for (BlockIterator it = graph->reverse_postorder_iterator(); |
| 1280 !it.Done(); |
| 1281 it.Advance()) { |
| 1282 BlockEntryInstr* block = it.Current(); |
| 1283 |
| 1284 for (ForwardInstructionIterator instr_it(block); |
| 1285 !instr_it.Done(); |
| 1286 instr_it.Advance()) { |
| 1287 Instruction* instr = instr_it.Current(); |
| 1288 Place place(instr, &has_loads, &has_stores); |
| 1289 if (place.kind() == Place::kNone) { |
| 1290 continue; |
| 1291 } |
| 1292 |
| 1293 Place* result = map->Lookup(&place); |
| 1294 if (result == NULL) { |
| 1295 result = Place::Wrap(zone, place, places->length()); |
| 1296 map->Insert(result); |
| 1297 places->Add(result); |
| 1298 |
| 1299 if (FLAG_trace_optimization) { |
| 1300 THR_Print("numbering %s as %" Pd "\n", |
| 1301 result->ToCString(), |
| 1302 result->id()); |
| 1303 } |
| 1304 } |
| 1305 |
| 1306 instr->set_place_id(result->id()); |
| 1307 } |
| 1308 } |
| 1309 |
| 1310 if ((mode == kOptimizeLoads) && !has_loads) { |
| 1311 return NULL; |
| 1312 } |
| 1313 if ((mode == kOptimizeStores) && !has_stores) { |
| 1314 return NULL; |
| 1315 } |
| 1316 |
| 1317 PhiPlaceMoves* phi_moves = ComputePhiMoves(map, places); |
| 1318 |
| 1319 // Build aliasing sets mapping aliases to loads. |
| 1320 return new(zone) AliasedSet(zone, map, places, phi_moves); |
| 1321 } |
| 1322 |
| 1323 |
| 1324 // Load instructions handled by load elimination. |
| 1325 static bool IsLoadEliminationCandidate(Instruction* instr) { |
| 1326 return instr->IsLoadField() |
| 1327 || instr->IsLoadIndexed() |
| 1328 || instr->IsLoadStaticField(); |
| 1329 } |
| 1330 |
| 1331 |
| 1332 static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets, |
| 1333 intptr_t loop_header_index, |
| 1334 Instruction* instr) { |
| 1335 return IsLoadEliminationCandidate(instr) && |
| 1336 (sets != NULL) && |
| 1337 instr->HasPlaceId() && |
| 1338 ((*sets)[loop_header_index] != NULL) && |
| 1339 (*sets)[loop_header_index]->Contains(instr->place_id()); |
| 1340 } |
| 1341 |
| 1342 |
| 1343 LICM::LICM(FlowGraph* flow_graph) : flow_graph_(flow_graph) { |
| 1344 ASSERT(flow_graph->is_licm_allowed()); |
| 1345 } |
| 1346 |
| 1347 |
| 1348 void LICM::Hoist(ForwardInstructionIterator* it, |
| 1349 BlockEntryInstr* pre_header, |
| 1350 Instruction* current) { |
| 1351 if (current->IsCheckClass()) { |
| 1352 current->AsCheckClass()->set_licm_hoisted(true); |
| 1353 } else if (current->IsCheckSmi()) { |
| 1354 current->AsCheckSmi()->set_licm_hoisted(true); |
| 1355 } else if (current->IsCheckEitherNonSmi()) { |
| 1356 current->AsCheckEitherNonSmi()->set_licm_hoisted(true); |
| 1357 } else if (current->IsCheckArrayBound()) { |
| 1358 current->AsCheckArrayBound()->set_licm_hoisted(true); |
| 1359 } |
| 1360 if (FLAG_trace_optimization) { |
| 1361 THR_Print("Hoisting instruction %s:%" Pd " from B%" Pd " to B%" Pd "\n", |
| 1362 current->DebugName(), |
| 1363 current->GetDeoptId(), |
| 1364 current->GetBlock()->block_id(), |
| 1365 pre_header->block_id()); |
| 1366 } |
| 1367 // Move the instruction out of the loop. |
| 1368 current->RemoveEnvironment(); |
| 1369 if (it != NULL) { |
| 1370 it->RemoveCurrentFromGraph(); |
| 1371 } else { |
| 1372 current->RemoveFromGraph(); |
| 1373 } |
| 1374 GotoInstr* last = pre_header->last_instruction()->AsGoto(); |
| 1375 // Using kind kEffect will not assign a fresh ssa temporary index. |
| 1376 flow_graph()->InsertBefore(last, current, last->env(), FlowGraph::kEffect); |
| 1377 current->CopyDeoptIdFrom(*last); |
| 1378 } |
| 1379 |
| 1380 |
| 1381 void LICM::TrySpecializeSmiPhi(PhiInstr* phi, |
| 1382 BlockEntryInstr* header, |
| 1383 BlockEntryInstr* pre_header) { |
| 1384 if (phi->Type()->ToCid() == kSmiCid) { |
| 1385 return; |
| 1386 } |
| 1387 |
| 1388 // Check if there is only a single kDynamicCid input to the phi that |
| 1389 // comes from the pre-header. |
| 1390 const intptr_t kNotFound = -1; |
| 1391 intptr_t non_smi_input = kNotFound; |
| 1392 for (intptr_t i = 0; i < phi->InputCount(); ++i) { |
| 1393 Value* input = phi->InputAt(i); |
| 1394 if (input->Type()->ToCid() != kSmiCid) { |
| 1395 if ((non_smi_input != kNotFound) || |
| 1396 (input->Type()->ToCid() != kDynamicCid)) { |
| 1397 // There are multiple kDynamicCid inputs or there is an input that is |
| 1398 // known to be non-smi. |
| 1399 return; |
| 1400 } else { |
| 1401 non_smi_input = i; |
| 1402 } |
| 1403 } |
| 1404 } |
| 1405 |
| 1406 if ((non_smi_input == kNotFound) || |
| 1407 (phi->block()->PredecessorAt(non_smi_input) != pre_header)) { |
| 1408 return; |
| 1409 } |
| 1410 |
| 1411 CheckSmiInstr* check = NULL; |
| 1412 for (Value* use = phi->input_use_list(); |
| 1413 (use != NULL) && (check == NULL); |
| 1414 use = use->next_use()) { |
| 1415 check = use->instruction()->AsCheckSmi(); |
| 1416 } |
| 1417 |
| 1418 if (check == NULL) { |
| 1419 return; |
| 1420 } |
| 1421 |
| 1422 // Host CheckSmi instruction and make this phi smi one. |
| 1423 Hoist(NULL, pre_header, check); |
| 1424 |
| 1425 // Replace value we are checking with phi's input. |
| 1426 check->value()->BindTo(phi->InputAt(non_smi_input)->definition()); |
| 1427 |
| 1428 phi->UpdateType(CompileType::FromCid(kSmiCid)); |
| 1429 } |
| 1430 |
| 1431 |
| 1432 void LICM::OptimisticallySpecializeSmiPhis() { |
| 1433 if (!flow_graph()->function().allows_hoisting_check_class() || |
| 1434 FLAG_precompilation) { |
| 1435 // Do not hoist any: Either deoptimized on a hoisted check, |
| 1436 // or compiling precompiled code where we can't do optimistic |
| 1437 // hoisting of checks. |
| 1438 return; |
| 1439 } |
| 1440 |
| 1441 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = |
| 1442 flow_graph()->LoopHeaders(); |
| 1443 |
| 1444 for (intptr_t i = 0; i < loop_headers.length(); ++i) { |
| 1445 JoinEntryInstr* header = loop_headers[i]->AsJoinEntry(); |
| 1446 // Skip loop that don't have a pre-header block. |
| 1447 BlockEntryInstr* pre_header = header->ImmediateDominator(); |
| 1448 if (pre_header == NULL) continue; |
| 1449 |
| 1450 for (PhiIterator it(header); !it.Done(); it.Advance()) { |
| 1451 TrySpecializeSmiPhi(it.Current(), header, pre_header); |
| 1452 } |
| 1453 } |
| 1454 } |
| 1455 |
| 1456 |
| 1457 void LICM::Optimize() { |
| 1458 if (!flow_graph()->function().allows_hoisting_check_class()) { |
| 1459 // Do not hoist any. |
| 1460 return; |
| 1461 } |
| 1462 |
| 1463 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = |
| 1464 flow_graph()->LoopHeaders(); |
| 1465 |
| 1466 ZoneGrowableArray<BitVector*>* loop_invariant_loads = |
| 1467 flow_graph()->loop_invariant_loads(); |
| 1468 |
| 1469 BlockEffects* block_effects = flow_graph()->block_effects(); |
| 1470 |
| 1471 for (intptr_t i = 0; i < loop_headers.length(); ++i) { |
| 1472 BlockEntryInstr* header = loop_headers[i]; |
| 1473 // Skip loop that don't have a pre-header block. |
| 1474 BlockEntryInstr* pre_header = header->ImmediateDominator(); |
| 1475 if (pre_header == NULL) continue; |
| 1476 |
| 1477 for (BitVector::Iterator loop_it(header->loop_info()); |
| 1478 !loop_it.Done(); |
| 1479 loop_it.Advance()) { |
| 1480 BlockEntryInstr* block = flow_graph()->preorder()[loop_it.Current()]; |
| 1481 for (ForwardInstructionIterator it(block); |
| 1482 !it.Done(); |
| 1483 it.Advance()) { |
| 1484 Instruction* current = it.Current(); |
| 1485 if ((current->AllowsCSE() && |
| 1486 block_effects->CanBeMovedTo(current, pre_header)) || |
| 1487 IsLoopInvariantLoad(loop_invariant_loads, i, current)) { |
| 1488 bool inputs_loop_invariant = true; |
| 1489 for (int i = 0; i < current->InputCount(); ++i) { |
| 1490 Definition* input_def = current->InputAt(i)->definition(); |
| 1491 if (!input_def->GetBlock()->Dominates(pre_header)) { |
| 1492 inputs_loop_invariant = false; |
| 1493 break; |
| 1494 } |
| 1495 } |
| 1496 if (inputs_loop_invariant && |
| 1497 !current->IsAssertAssignable() && |
| 1498 !current->IsAssertBoolean()) { |
| 1499 // TODO(fschneider): Enable hoisting of Assert-instructions |
| 1500 // if it safe to do. |
| 1501 Hoist(&it, pre_header, current); |
| 1502 } |
| 1503 } |
| 1504 } |
| 1505 } |
| 1506 } |
| 1507 } |
| 1508 |
| 1509 |
| 1510 class LoadOptimizer : public ValueObject { |
| 1511 public: |
| 1512 LoadOptimizer(FlowGraph* graph, AliasedSet* aliased_set) |
| 1513 : graph_(graph), |
| 1514 aliased_set_(aliased_set), |
| 1515 in_(graph_->preorder().length()), |
| 1516 out_(graph_->preorder().length()), |
| 1517 gen_(graph_->preorder().length()), |
| 1518 kill_(graph_->preorder().length()), |
| 1519 exposed_values_(graph_->preorder().length()), |
| 1520 out_values_(graph_->preorder().length()), |
| 1521 phis_(5), |
| 1522 worklist_(5), |
| 1523 congruency_worklist_(6), |
| 1524 in_worklist_(NULL), |
| 1525 forwarded_(false) { |
| 1526 const intptr_t num_blocks = graph_->preorder().length(); |
| 1527 for (intptr_t i = 0; i < num_blocks; i++) { |
| 1528 out_.Add(NULL); |
| 1529 gen_.Add(new(Z) BitVector(Z, aliased_set_->max_place_id())); |
| 1530 kill_.Add(new(Z) BitVector(Z, aliased_set_->max_place_id())); |
| 1531 in_.Add(new(Z) BitVector(Z, aliased_set_->max_place_id())); |
| 1532 |
| 1533 exposed_values_.Add(NULL); |
| 1534 out_values_.Add(NULL); |
| 1535 } |
| 1536 } |
| 1537 |
| 1538 ~LoadOptimizer() { |
| 1539 aliased_set_->RollbackAliasedIdentites(); |
| 1540 } |
| 1541 |
| 1542 Isolate* isolate() const { return graph_->isolate(); } |
| 1543 Zone* zone() const { return graph_->zone(); } |
| 1544 |
| 1545 static bool OptimizeGraph(FlowGraph* graph) { |
| 1546 ASSERT(FLAG_load_cse); |
| 1547 if (FLAG_trace_load_optimization) { |
| 1548 FlowGraphPrinter::PrintGraph("Before LoadOptimizer", graph); |
| 1549 } |
| 1550 |
| 1551 DirectChainedHashMap<PointerKeyValueTrait<Place> > map; |
| 1552 AliasedSet* aliased_set = NumberPlaces(graph, &map, kOptimizeLoads); |
| 1553 if ((aliased_set != NULL) && !aliased_set->IsEmpty()) { |
| 1554 // If any loads were forwarded return true from Optimize to run load |
| 1555 // forwarding again. This will allow to forward chains of loads. |
| 1556 // This is especially important for context variables as they are built |
| 1557 // as loads from loaded context. |
| 1558 // TODO(vegorov): renumber newly discovered congruences during the |
| 1559 // forwarding to forward chains without running whole pass twice. |
| 1560 LoadOptimizer load_optimizer(graph, aliased_set); |
| 1561 return load_optimizer.Optimize(); |
| 1562 } |
| 1563 return false; |
| 1564 } |
| 1565 |
| 1566 private: |
| 1567 bool Optimize() { |
| 1568 ComputeInitialSets(); |
| 1569 ComputeOutSets(); |
| 1570 ComputeOutValues(); |
| 1571 if (graph_->is_licm_allowed()) { |
| 1572 MarkLoopInvariantLoads(); |
| 1573 } |
| 1574 ForwardLoads(); |
| 1575 EmitPhis(); |
| 1576 |
| 1577 if (FLAG_trace_load_optimization) { |
| 1578 FlowGraphPrinter::PrintGraph("After LoadOptimizer", graph_); |
| 1579 } |
| 1580 |
| 1581 return forwarded_; |
| 1582 } |
| 1583 |
| 1584 // Compute sets of loads generated and killed by each block. |
| 1585 // Additionally compute upwards exposed and generated loads for each block. |
| 1586 // Exposed loads are those that can be replaced if a corresponding |
| 1587 // reaching load will be found. |
| 1588 // Loads that are locally redundant will be replaced as we go through |
| 1589 // instructions. |
| 1590 void ComputeInitialSets() { |
| 1591 for (BlockIterator block_it = graph_->reverse_postorder_iterator(); |
| 1592 !block_it.Done(); |
| 1593 block_it.Advance()) { |
| 1594 BlockEntryInstr* block = block_it.Current(); |
| 1595 const intptr_t preorder_number = block->preorder_number(); |
| 1596 |
| 1597 BitVector* kill = kill_[preorder_number]; |
| 1598 BitVector* gen = gen_[preorder_number]; |
| 1599 |
| 1600 ZoneGrowableArray<Definition*>* exposed_values = NULL; |
| 1601 ZoneGrowableArray<Definition*>* out_values = NULL; |
| 1602 |
| 1603 for (ForwardInstructionIterator instr_it(block); |
| 1604 !instr_it.Done(); |
| 1605 instr_it.Advance()) { |
| 1606 Instruction* instr = instr_it.Current(); |
| 1607 |
| 1608 bool is_load = false, is_store = false; |
| 1609 Place place(instr, &is_load, &is_store); |
| 1610 |
| 1611 BitVector* killed = NULL; |
| 1612 if (is_store) { |
| 1613 const intptr_t alias_id = |
| 1614 aliased_set_->LookupAliasId(place.ToAlias()); |
| 1615 if (alias_id != AliasedSet::kNoAlias) { |
| 1616 killed = aliased_set_->GetKilledSet(alias_id); |
| 1617 } else if (!place.IsImmutableField()) { |
| 1618 // We encountered unknown alias: this means intrablock load |
| 1619 // forwarding refined parameter of this store, for example |
| 1620 // |
| 1621 // o <- alloc() |
| 1622 // a.f <- o |
| 1623 // u <- a.f |
| 1624 // u.x <- null ;; this store alias is *.x |
| 1625 // |
| 1626 // after intrablock load forwarding |
| 1627 // |
| 1628 // o <- alloc() |
| 1629 // a.f <- o |
| 1630 // o.x <- null ;; this store alias is o.x |
| 1631 // |
| 1632 // In this case we fallback to using place id recorded in the |
| 1633 // instruction that still points to the old place with a more |
| 1634 // generic alias. |
| 1635 const intptr_t old_alias_id = aliased_set_->LookupAliasId( |
| 1636 aliased_set_->places()[instr->place_id()]->ToAlias()); |
| 1637 killed = aliased_set_->GetKilledSet(old_alias_id); |
| 1638 } |
| 1639 |
| 1640 if (killed != NULL) { |
| 1641 kill->AddAll(killed); |
| 1642 // There is no need to clear out_values when clearing GEN set |
| 1643 // because only those values that are in the GEN set |
| 1644 // will ever be used. |
| 1645 gen->RemoveAll(killed); |
| 1646 } |
| 1647 |
| 1648 // Only forward stores to normal arrays, float64, and simd arrays |
| 1649 // to loads because other array stores (intXX/uintXX/float32) |
| 1650 // may implicitly convert the value stored. |
| 1651 StoreIndexedInstr* array_store = instr->AsStoreIndexed(); |
| 1652 if ((array_store == NULL) || |
| 1653 (array_store->class_id() == kArrayCid) || |
| 1654 (array_store->class_id() == kTypedDataFloat64ArrayCid) || |
| 1655 (array_store->class_id() == kTypedDataFloat32ArrayCid) || |
| 1656 (array_store->class_id() == kTypedDataFloat32x4ArrayCid)) { |
| 1657 Place* canonical_place = aliased_set_->LookupCanonical(&place); |
| 1658 if (canonical_place != NULL) { |
| 1659 // Store has a corresponding numbered place that might have a |
| 1660 // load. Try forwarding stored value to it. |
| 1661 gen->Add(canonical_place->id()); |
| 1662 if (out_values == NULL) out_values = CreateBlockOutValues(); |
| 1663 (*out_values)[canonical_place->id()] = GetStoredValue(instr); |
| 1664 } |
| 1665 } |
| 1666 |
| 1667 ASSERT(!instr->IsDefinition() || |
| 1668 !IsLoadEliminationCandidate(instr->AsDefinition())); |
| 1669 continue; |
| 1670 } else if (is_load) { |
| 1671 // Check if this load needs renumbering because of the intrablock |
| 1672 // load forwarding. |
| 1673 const Place* canonical = aliased_set_->LookupCanonical(&place); |
| 1674 if ((canonical != NULL) && |
| 1675 (canonical->id() != instr->AsDefinition()->place_id())) { |
| 1676 instr->AsDefinition()->set_place_id(canonical->id()); |
| 1677 } |
| 1678 } |
| 1679 |
| 1680 // If instruction has effects then kill all loads affected. |
| 1681 if (!instr->Effects().IsNone()) { |
| 1682 kill->AddAll(aliased_set_->aliased_by_effects()); |
| 1683 // There is no need to clear out_values when removing values from GEN |
| 1684 // set because only those values that are in the GEN set |
| 1685 // will ever be used. |
| 1686 gen->RemoveAll(aliased_set_->aliased_by_effects()); |
| 1687 continue; |
| 1688 } |
| 1689 |
| 1690 Definition* defn = instr->AsDefinition(); |
| 1691 if (defn == NULL) { |
| 1692 continue; |
| 1693 } |
| 1694 |
| 1695 // For object allocation forward initial values of the fields to |
| 1696 // subsequent loads. For skip final fields. Final fields are |
| 1697 // initialized in constructor that potentially can be not inlined into |
| 1698 // the function that we are currently optimizing. However at the same |
| 1699 // time we assume that values of the final fields can be forwarded |
| 1700 // across side-effects. If we add 'null' as known values for these |
| 1701 // fields here we will incorrectly propagate this null across |
| 1702 // constructor invocation. |
| 1703 AllocateObjectInstr* alloc = instr->AsAllocateObject(); |
| 1704 if ((alloc != NULL)) { |
| 1705 for (Value* use = alloc->input_use_list(); |
| 1706 use != NULL; |
| 1707 use = use->next_use()) { |
| 1708 // Look for all immediate loads from this object. |
| 1709 if (use->use_index() != 0) { |
| 1710 continue; |
| 1711 } |
| 1712 |
| 1713 LoadFieldInstr* load = use->instruction()->AsLoadField(); |
| 1714 if (load != NULL) { |
| 1715 // Found a load. Initialize current value of the field to null for |
| 1716 // normal fields, or with type arguments. |
| 1717 |
| 1718 // Forward for all fields for non-escaping objects and only |
| 1719 // non-final fields and type arguments for escaping ones. |
| 1720 if (aliased_set_->CanBeAliased(alloc) && |
| 1721 (load->field() != NULL) && |
| 1722 load->field()->is_final()) { |
| 1723 continue; |
| 1724 } |
| 1725 |
| 1726 Definition* forward_def = graph_->constant_null(); |
| 1727 if (alloc->ArgumentCount() > 0) { |
| 1728 ASSERT(alloc->ArgumentCount() == 1); |
| 1729 intptr_t type_args_offset = |
| 1730 alloc->cls().type_arguments_field_offset(); |
| 1731 if (load->offset_in_bytes() == type_args_offset) { |
| 1732 forward_def = alloc->PushArgumentAt(0)->value()->definition(); |
| 1733 } |
| 1734 } |
| 1735 gen->Add(load->place_id()); |
| 1736 if (out_values == NULL) out_values = CreateBlockOutValues(); |
| 1737 (*out_values)[load->place_id()] = forward_def; |
| 1738 } |
| 1739 } |
| 1740 continue; |
| 1741 } |
| 1742 |
| 1743 if (!IsLoadEliminationCandidate(defn)) { |
| 1744 continue; |
| 1745 } |
| 1746 |
| 1747 const intptr_t place_id = defn->place_id(); |
| 1748 if (gen->Contains(place_id)) { |
| 1749 // This is a locally redundant load. |
| 1750 ASSERT((out_values != NULL) && ((*out_values)[place_id] != NULL)); |
| 1751 |
| 1752 Definition* replacement = (*out_values)[place_id]; |
| 1753 graph_->EnsureSSATempIndex(defn, replacement); |
| 1754 if (FLAG_trace_optimization) { |
| 1755 THR_Print("Replacing load v%" Pd " with v%" Pd "\n", |
| 1756 defn->ssa_temp_index(), |
| 1757 replacement->ssa_temp_index()); |
| 1758 } |
| 1759 |
| 1760 defn->ReplaceUsesWith(replacement); |
| 1761 instr_it.RemoveCurrentFromGraph(); |
| 1762 forwarded_ = true; |
| 1763 continue; |
| 1764 } else if (!kill->Contains(place_id)) { |
| 1765 // This is an exposed load: it is the first representative of a |
| 1766 // given expression id and it is not killed on the path from |
| 1767 // the block entry. |
| 1768 if (exposed_values == NULL) { |
| 1769 static const intptr_t kMaxExposedValuesInitialSize = 5; |
| 1770 exposed_values = new(Z) ZoneGrowableArray<Definition*>( |
| 1771 Utils::Minimum(kMaxExposedValuesInitialSize, |
| 1772 aliased_set_->max_place_id())); |
| 1773 } |
| 1774 |
| 1775 exposed_values->Add(defn); |
| 1776 } |
| 1777 |
| 1778 gen->Add(place_id); |
| 1779 |
| 1780 if (out_values == NULL) out_values = CreateBlockOutValues(); |
| 1781 (*out_values)[place_id] = defn; |
| 1782 } |
| 1783 |
| 1784 exposed_values_[preorder_number] = exposed_values; |
| 1785 out_values_[preorder_number] = out_values; |
| 1786 } |
| 1787 } |
| 1788 |
| 1789 static void PerformPhiMoves(PhiPlaceMoves::MovesList phi_moves, |
| 1790 BitVector* out, |
| 1791 BitVector* forwarded_loads) { |
| 1792 forwarded_loads->Clear(); |
| 1793 |
| 1794 for (intptr_t i = 0; i < phi_moves->length(); i++) { |
| 1795 const intptr_t from = (*phi_moves)[i].from(); |
| 1796 const intptr_t to = (*phi_moves)[i].to(); |
| 1797 if (from == to) continue; |
| 1798 |
| 1799 if (out->Contains(from)) { |
| 1800 forwarded_loads->Add(to); |
| 1801 } |
| 1802 } |
| 1803 |
| 1804 for (intptr_t i = 0; i < phi_moves->length(); i++) { |
| 1805 const intptr_t from = (*phi_moves)[i].from(); |
| 1806 const intptr_t to = (*phi_moves)[i].to(); |
| 1807 if (from == to) continue; |
| 1808 |
| 1809 out->Remove(to); |
| 1810 } |
| 1811 |
| 1812 out->AddAll(forwarded_loads); |
| 1813 } |
| 1814 |
| 1815 // Compute OUT sets by propagating them iteratively until fix point |
| 1816 // is reached. |
| 1817 void ComputeOutSets() { |
| 1818 BitVector* temp = new(Z) BitVector(Z, aliased_set_->max_place_id()); |
| 1819 BitVector* forwarded_loads = |
| 1820 new(Z) BitVector(Z, aliased_set_->max_place_id()); |
| 1821 BitVector* temp_out = new(Z) BitVector(Z, aliased_set_->max_place_id()); |
| 1822 |
| 1823 bool changed = true; |
| 1824 while (changed) { |
| 1825 changed = false; |
| 1826 |
| 1827 for (BlockIterator block_it = graph_->reverse_postorder_iterator(); |
| 1828 !block_it.Done(); |
| 1829 block_it.Advance()) { |
| 1830 BlockEntryInstr* block = block_it.Current(); |
| 1831 |
| 1832 const intptr_t preorder_number = block->preorder_number(); |
| 1833 |
| 1834 BitVector* block_in = in_[preorder_number]; |
| 1835 BitVector* block_out = out_[preorder_number]; |
| 1836 BitVector* block_kill = kill_[preorder_number]; |
| 1837 BitVector* block_gen = gen_[preorder_number]; |
| 1838 |
| 1839 // Compute block_in as the intersection of all out(p) where p |
| 1840 // is a predecessor of the current block. |
| 1841 if (block->IsGraphEntry()) { |
| 1842 temp->Clear(); |
| 1843 } else { |
| 1844 temp->SetAll(); |
| 1845 ASSERT(block->PredecessorCount() > 0); |
| 1846 for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |
| 1847 BlockEntryInstr* pred = block->PredecessorAt(i); |
| 1848 BitVector* pred_out = out_[pred->preorder_number()]; |
| 1849 if (pred_out == NULL) continue; |
| 1850 PhiPlaceMoves::MovesList phi_moves = |
| 1851 aliased_set_->phi_moves()->GetOutgoingMoves(pred); |
| 1852 if (phi_moves != NULL) { |
| 1853 // If there are phi moves, perform intersection with |
| 1854 // a copy of pred_out where the phi moves are applied. |
| 1855 temp_out->CopyFrom(pred_out); |
| 1856 PerformPhiMoves(phi_moves, temp_out, forwarded_loads); |
| 1857 pred_out = temp_out; |
| 1858 } |
| 1859 temp->Intersect(pred_out); |
| 1860 } |
| 1861 } |
| 1862 |
| 1863 if (!temp->Equals(*block_in) || (block_out == NULL)) { |
| 1864 // If IN set has changed propagate the change to OUT set. |
| 1865 block_in->CopyFrom(temp); |
| 1866 |
| 1867 temp->RemoveAll(block_kill); |
| 1868 temp->AddAll(block_gen); |
| 1869 |
| 1870 if ((block_out == NULL) || !block_out->Equals(*temp)) { |
| 1871 if (block_out == NULL) { |
| 1872 block_out = out_[preorder_number] = |
| 1873 new(Z) BitVector(Z, aliased_set_->max_place_id()); |
| 1874 } |
| 1875 block_out->CopyFrom(temp); |
| 1876 changed = true; |
| 1877 } |
| 1878 } |
| 1879 } |
| 1880 } |
| 1881 } |
| 1882 |
| 1883 // Compute out_values mappings by propagating them in reverse postorder once |
| 1884 // through the graph. Generate phis on back edges where eager merge is |
| 1885 // impossible. |
| 1886 // No replacement is done at this point and thus any out_value[place_id] is |
| 1887 // changed at most once: from NULL to an actual value. |
| 1888 // When merging incoming loads we might need to create a phi. |
| 1889 // These phis are not inserted at the graph immediately because some of them |
| 1890 // might become redundant after load forwarding is done. |
| 1891 void ComputeOutValues() { |
| 1892 GrowableArray<PhiInstr*> pending_phis(5); |
| 1893 ZoneGrowableArray<Definition*>* temp_forwarded_values = NULL; |
| 1894 |
| 1895 for (BlockIterator block_it = graph_->reverse_postorder_iterator(); |
| 1896 !block_it.Done(); |
| 1897 block_it.Advance()) { |
| 1898 BlockEntryInstr* block = block_it.Current(); |
| 1899 |
| 1900 const bool can_merge_eagerly = CanMergeEagerly(block); |
| 1901 |
| 1902 const intptr_t preorder_number = block->preorder_number(); |
| 1903 |
| 1904 ZoneGrowableArray<Definition*>* block_out_values = |
| 1905 out_values_[preorder_number]; |
| 1906 |
| 1907 |
| 1908 // If OUT set has changed then we have new values available out of |
| 1909 // the block. Compute these values creating phi where necessary. |
| 1910 for (BitVector::Iterator it(out_[preorder_number]); |
| 1911 !it.Done(); |
| 1912 it.Advance()) { |
| 1913 const intptr_t place_id = it.Current(); |
| 1914 |
| 1915 if (block_out_values == NULL) { |
| 1916 out_values_[preorder_number] = block_out_values = |
| 1917 CreateBlockOutValues(); |
| 1918 } |
| 1919 |
| 1920 if ((*block_out_values)[place_id] == NULL) { |
| 1921 ASSERT(block->PredecessorCount() > 0); |
| 1922 Definition* in_value = can_merge_eagerly ? |
| 1923 MergeIncomingValues(block, place_id) : NULL; |
| 1924 if ((in_value == NULL) && |
| 1925 (in_[preorder_number]->Contains(place_id))) { |
| 1926 PhiInstr* phi = new(Z) PhiInstr(block->AsJoinEntry(), |
| 1927 block->PredecessorCount()); |
| 1928 phi->set_place_id(place_id); |
| 1929 pending_phis.Add(phi); |
| 1930 in_value = phi; |
| 1931 } |
| 1932 (*block_out_values)[place_id] = in_value; |
| 1933 } |
| 1934 } |
| 1935 |
| 1936 // If the block has outgoing phi moves perform them. Use temporary list |
| 1937 // of values to ensure that cyclic moves are performed correctly. |
| 1938 PhiPlaceMoves::MovesList phi_moves = |
| 1939 aliased_set_->phi_moves()->GetOutgoingMoves(block); |
| 1940 if ((phi_moves != NULL) && (block_out_values != NULL)) { |
| 1941 if (temp_forwarded_values == NULL) { |
| 1942 temp_forwarded_values = CreateBlockOutValues(); |
| 1943 } |
| 1944 |
| 1945 for (intptr_t i = 0; i < phi_moves->length(); i++) { |
| 1946 const intptr_t from = (*phi_moves)[i].from(); |
| 1947 const intptr_t to = (*phi_moves)[i].to(); |
| 1948 if (from == to) continue; |
| 1949 |
| 1950 (*temp_forwarded_values)[to] = (*block_out_values)[from]; |
| 1951 } |
| 1952 |
| 1953 for (intptr_t i = 0; i < phi_moves->length(); i++) { |
| 1954 const intptr_t from = (*phi_moves)[i].from(); |
| 1955 const intptr_t to = (*phi_moves)[i].to(); |
| 1956 if (from == to) continue; |
| 1957 |
| 1958 (*block_out_values)[to] = (*temp_forwarded_values)[to]; |
| 1959 } |
| 1960 } |
| 1961 |
| 1962 if (FLAG_trace_load_optimization) { |
| 1963 THR_Print("B%" Pd "\n", block->block_id()); |
| 1964 THR_Print(" IN: "); |
| 1965 aliased_set_->PrintSet(in_[preorder_number]); |
| 1966 THR_Print("\n"); |
| 1967 |
| 1968 THR_Print(" KILL: "); |
| 1969 aliased_set_->PrintSet(kill_[preorder_number]); |
| 1970 THR_Print("\n"); |
| 1971 |
| 1972 THR_Print(" OUT: "); |
| 1973 aliased_set_->PrintSet(out_[preorder_number]); |
| 1974 THR_Print("\n"); |
| 1975 } |
| 1976 } |
| 1977 |
| 1978 // All blocks were visited. Fill pending phis with inputs |
| 1979 // that flow on back edges. |
| 1980 for (intptr_t i = 0; i < pending_phis.length(); i++) { |
| 1981 FillPhiInputs(pending_phis[i]); |
| 1982 } |
| 1983 } |
| 1984 |
| 1985 bool CanMergeEagerly(BlockEntryInstr* block) { |
| 1986 for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |
| 1987 BlockEntryInstr* pred = block->PredecessorAt(i); |
| 1988 if (pred->postorder_number() < block->postorder_number()) { |
| 1989 return false; |
| 1990 } |
| 1991 } |
| 1992 return true; |
| 1993 } |
| 1994 |
| 1995 void MarkLoopInvariantLoads() { |
| 1996 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers = |
| 1997 graph_->LoopHeaders(); |
| 1998 |
| 1999 ZoneGrowableArray<BitVector*>* invariant_loads = |
| 2000 new(Z) ZoneGrowableArray<BitVector*>(loop_headers.length()); |
| 2001 |
| 2002 for (intptr_t i = 0; i < loop_headers.length(); i++) { |
| 2003 BlockEntryInstr* header = loop_headers[i]; |
| 2004 BlockEntryInstr* pre_header = header->ImmediateDominator(); |
| 2005 if (pre_header == NULL) { |
| 2006 invariant_loads->Add(NULL); |
| 2007 continue; |
| 2008 } |
| 2009 |
| 2010 BitVector* loop_gen = new(Z) BitVector(Z, aliased_set_->max_place_id()); |
| 2011 for (BitVector::Iterator loop_it(header->loop_info()); |
| 2012 !loop_it.Done(); |
| 2013 loop_it.Advance()) { |
| 2014 const intptr_t preorder_number = loop_it.Current(); |
| 2015 loop_gen->AddAll(gen_[preorder_number]); |
| 2016 } |
| 2017 |
| 2018 for (BitVector::Iterator loop_it(header->loop_info()); |
| 2019 !loop_it.Done(); |
| 2020 loop_it.Advance()) { |
| 2021 const intptr_t preorder_number = loop_it.Current(); |
| 2022 loop_gen->RemoveAll(kill_[preorder_number]); |
| 2023 } |
| 2024 |
| 2025 if (FLAG_trace_optimization) { |
| 2026 for (BitVector::Iterator it(loop_gen); !it.Done(); it.Advance()) { |
| 2027 THR_Print("place %s is loop invariant for B%" Pd "\n", |
| 2028 aliased_set_->places()[it.Current()]->ToCString(), |
| 2029 header->block_id()); |
| 2030 } |
| 2031 } |
| 2032 |
| 2033 invariant_loads->Add(loop_gen); |
| 2034 } |
| 2035 |
| 2036 graph_->set_loop_invariant_loads(invariant_loads); |
| 2037 } |
| 2038 |
| 2039 // Compute incoming value for the given expression id. |
| 2040 // Will create a phi if different values are incoming from multiple |
| 2041 // predecessors. |
| 2042 Definition* MergeIncomingValues(BlockEntryInstr* block, intptr_t place_id) { |
| 2043 // First check if the same value is coming in from all predecessors. |
| 2044 static Definition* const kDifferentValuesMarker = |
| 2045 reinterpret_cast<Definition*>(-1); |
| 2046 Definition* incoming = NULL; |
| 2047 for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |
| 2048 BlockEntryInstr* pred = block->PredecessorAt(i); |
| 2049 ZoneGrowableArray<Definition*>* pred_out_values = |
| 2050 out_values_[pred->preorder_number()]; |
| 2051 if ((pred_out_values == NULL) || ((*pred_out_values)[place_id] == NULL)) { |
| 2052 return NULL; |
| 2053 } else if (incoming == NULL) { |
| 2054 incoming = (*pred_out_values)[place_id]; |
| 2055 } else if (incoming != (*pred_out_values)[place_id]) { |
| 2056 incoming = kDifferentValuesMarker; |
| 2057 } |
| 2058 } |
| 2059 |
| 2060 if (incoming != kDifferentValuesMarker) { |
| 2061 ASSERT(incoming != NULL); |
| 2062 return incoming; |
| 2063 } |
| 2064 |
| 2065 // Incoming values are different. Phi is required to merge. |
| 2066 PhiInstr* phi = new(Z) PhiInstr( |
| 2067 block->AsJoinEntry(), block->PredecessorCount()); |
| 2068 phi->set_place_id(place_id); |
| 2069 FillPhiInputs(phi); |
| 2070 return phi; |
| 2071 } |
| 2072 |
| 2073 void FillPhiInputs(PhiInstr* phi) { |
| 2074 BlockEntryInstr* block = phi->GetBlock(); |
| 2075 const intptr_t place_id = phi->place_id(); |
| 2076 |
| 2077 for (intptr_t i = 0; i < block->PredecessorCount(); i++) { |
| 2078 BlockEntryInstr* pred = block->PredecessorAt(i); |
| 2079 ZoneGrowableArray<Definition*>* pred_out_values = |
| 2080 out_values_[pred->preorder_number()]; |
| 2081 ASSERT((*pred_out_values)[place_id] != NULL); |
| 2082 |
| 2083 // Sets of outgoing values are not linked into use lists so |
| 2084 // they might contain values that were replaced and removed |
| 2085 // from the graph by this iteration. |
| 2086 // To prevent using them we additionally mark definitions themselves |
| 2087 // as replaced and store a pointer to the replacement. |
| 2088 Definition* replacement = (*pred_out_values)[place_id]->Replacement(); |
| 2089 Value* input = new(Z) Value(replacement); |
| 2090 phi->SetInputAt(i, input); |
| 2091 replacement->AddInputUse(input); |
| 2092 } |
| 2093 |
| 2094 graph_->AllocateSSAIndexes(phi); |
| 2095 phis_.Add(phi); // Postpone phi insertion until after load forwarding. |
| 2096 |
| 2097 if (FLAG_trace_load_optimization) { |
| 2098 THR_Print("created pending phi %s for %s at B%" Pd "\n", |
| 2099 phi->ToCString(), |
| 2100 aliased_set_->places()[place_id]->ToCString(), |
| 2101 block->block_id()); |
| 2102 } |
| 2103 } |
| 2104 |
| 2105 // Iterate over basic blocks and replace exposed loads with incoming |
| 2106 // values. |
| 2107 void ForwardLoads() { |
| 2108 for (BlockIterator block_it = graph_->reverse_postorder_iterator(); |
| 2109 !block_it.Done(); |
| 2110 block_it.Advance()) { |
| 2111 BlockEntryInstr* block = block_it.Current(); |
| 2112 |
| 2113 ZoneGrowableArray<Definition*>* loads = |
| 2114 exposed_values_[block->preorder_number()]; |
| 2115 if (loads == NULL) continue; // No exposed loads. |
| 2116 |
| 2117 BitVector* in = in_[block->preorder_number()]; |
| 2118 |
| 2119 for (intptr_t i = 0; i < loads->length(); i++) { |
| 2120 Definition* load = (*loads)[i]; |
| 2121 if (!in->Contains(load->place_id())) continue; // No incoming value. |
| 2122 |
| 2123 Definition* replacement = MergeIncomingValues(block, load->place_id()); |
| 2124 ASSERT(replacement != NULL); |
| 2125 |
| 2126 // Sets of outgoing values are not linked into use lists so |
| 2127 // they might contain values that were replace and removed |
| 2128 // from the graph by this iteration. |
| 2129 // To prevent using them we additionally mark definitions themselves |
| 2130 // as replaced and store a pointer to the replacement. |
| 2131 replacement = replacement->Replacement(); |
| 2132 |
| 2133 if (load != replacement) { |
| 2134 graph_->EnsureSSATempIndex(load, replacement); |
| 2135 |
| 2136 if (FLAG_trace_optimization) { |
| 2137 THR_Print("Replacing load v%" Pd " with v%" Pd "\n", |
| 2138 load->ssa_temp_index(), |
| 2139 replacement->ssa_temp_index()); |
| 2140 } |
| 2141 |
| 2142 load->ReplaceUsesWith(replacement); |
| 2143 load->RemoveFromGraph(); |
| 2144 load->SetReplacement(replacement); |
| 2145 forwarded_ = true; |
| 2146 } |
| 2147 } |
| 2148 } |
| 2149 } |
| 2150 |
| 2151 // Check if the given phi take the same value on all code paths. |
| 2152 // Eliminate it as redundant if this is the case. |
| 2153 // When analyzing phi operands assumes that only generated during |
| 2154 // this load phase can be redundant. They can be distinguished because |
| 2155 // they are not marked alive. |
| 2156 // TODO(vegorov): move this into a separate phase over all phis. |
| 2157 bool EliminateRedundantPhi(PhiInstr* phi) { |
| 2158 Definition* value = NULL; // Possible value of this phi. |
| 2159 |
| 2160 worklist_.Clear(); |
| 2161 if (in_worklist_ == NULL) { |
| 2162 in_worklist_ = new(Z) BitVector(Z, graph_->current_ssa_temp_index()); |
| 2163 } else { |
| 2164 in_worklist_->Clear(); |
| 2165 } |
| 2166 |
| 2167 worklist_.Add(phi); |
| 2168 in_worklist_->Add(phi->ssa_temp_index()); |
| 2169 |
| 2170 for (intptr_t i = 0; i < worklist_.length(); i++) { |
| 2171 PhiInstr* phi = worklist_[i]; |
| 2172 |
| 2173 for (intptr_t i = 0; i < phi->InputCount(); i++) { |
| 2174 Definition* input = phi->InputAt(i)->definition(); |
| 2175 if (input == phi) continue; |
| 2176 |
| 2177 PhiInstr* phi_input = input->AsPhi(); |
| 2178 if ((phi_input != NULL) && !phi_input->is_alive()) { |
| 2179 if (!in_worklist_->Contains(phi_input->ssa_temp_index())) { |
| 2180 worklist_.Add(phi_input); |
| 2181 in_worklist_->Add(phi_input->ssa_temp_index()); |
| 2182 } |
| 2183 continue; |
| 2184 } |
| 2185 |
| 2186 if (value == NULL) { |
| 2187 value = input; |
| 2188 } else if (value != input) { |
| 2189 return false; // This phi is not redundant. |
| 2190 } |
| 2191 } |
| 2192 } |
| 2193 |
| 2194 // All phis in the worklist are redundant and have the same computed |
| 2195 // value on all code paths. |
| 2196 ASSERT(value != NULL); |
| 2197 for (intptr_t i = 0; i < worklist_.length(); i++) { |
| 2198 worklist_[i]->ReplaceUsesWith(value); |
| 2199 } |
| 2200 |
| 2201 return true; |
| 2202 } |
| 2203 |
| 2204 // Returns true if definitions are congruent assuming their inputs |
| 2205 // are congruent. |
| 2206 bool CanBeCongruent(Definition* a, Definition* b) { |
| 2207 return (a->tag() == b->tag()) && |
| 2208 ((a->IsPhi() && (a->GetBlock() == b->GetBlock())) || |
| 2209 (a->AllowsCSE() && a->Dependencies().IsNone() && |
| 2210 a->AttributesEqual(b))); |
| 2211 } |
| 2212 |
| 2213 // Given two definitions check if they are congruent under assumption that |
| 2214 // their inputs will be proven congruent. If they are - add them to the |
| 2215 // worklist to check their inputs' congruency. |
| 2216 // Returns true if pair was added to the worklist or is already in the |
| 2217 // worklist and false if a and b are not congruent. |
| 2218 bool AddPairToCongruencyWorklist(Definition* a, Definition* b) { |
| 2219 if (!CanBeCongruent(a, b)) { |
| 2220 return false; |
| 2221 } |
| 2222 |
| 2223 // If a is already in the worklist check if it is being compared to b. |
| 2224 // Give up if it is not. |
| 2225 if (in_worklist_->Contains(a->ssa_temp_index())) { |
| 2226 for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) { |
| 2227 if (a == congruency_worklist_[i]) { |
| 2228 return (b == congruency_worklist_[i + 1]); |
| 2229 } |
| 2230 } |
| 2231 UNREACHABLE(); |
| 2232 } else if (in_worklist_->Contains(b->ssa_temp_index())) { |
| 2233 return AddPairToCongruencyWorklist(b, a); |
| 2234 } |
| 2235 |
| 2236 congruency_worklist_.Add(a); |
| 2237 congruency_worklist_.Add(b); |
| 2238 in_worklist_->Add(a->ssa_temp_index()); |
| 2239 return true; |
| 2240 } |
| 2241 |
| 2242 bool AreInputsCongruent(Definition* a, Definition* b) { |
| 2243 ASSERT(a->tag() == b->tag()); |
| 2244 ASSERT(a->InputCount() == b->InputCount()); |
| 2245 for (intptr_t j = 0; j < a->InputCount(); j++) { |
| 2246 Definition* inputA = a->InputAt(j)->definition(); |
| 2247 Definition* inputB = b->InputAt(j)->definition(); |
| 2248 |
| 2249 if (inputA != inputB) { |
| 2250 if (!AddPairToCongruencyWorklist(inputA, inputB)) { |
| 2251 return false; |
| 2252 } |
| 2253 } |
| 2254 } |
| 2255 return true; |
| 2256 } |
| 2257 |
| 2258 // Returns true if instruction dom dominates instruction other. |
| 2259 static bool Dominates(Instruction* dom, Instruction* other) { |
| 2260 BlockEntryInstr* dom_block = dom->GetBlock(); |
| 2261 BlockEntryInstr* other_block = other->GetBlock(); |
| 2262 |
| 2263 if (dom_block == other_block) { |
| 2264 for (Instruction* current = dom->next(); |
| 2265 current != NULL; |
| 2266 current = current->next()) { |
| 2267 if (current == other) { |
| 2268 return true; |
| 2269 } |
| 2270 } |
| 2271 return false; |
| 2272 } |
| 2273 |
| 2274 return dom_block->Dominates(other_block); |
| 2275 } |
| 2276 |
| 2277 // Replace the given phi with another if they are congruent. |
| 2278 // Returns true if succeeds. |
| 2279 bool ReplacePhiWith(PhiInstr* phi, PhiInstr* replacement) { |
| 2280 ASSERT(phi->InputCount() == replacement->InputCount()); |
| 2281 ASSERT(phi->block() == replacement->block()); |
| 2282 |
| 2283 congruency_worklist_.Clear(); |
| 2284 if (in_worklist_ == NULL) { |
| 2285 in_worklist_ = new(Z) BitVector(Z, graph_->current_ssa_temp_index()); |
| 2286 } else { |
| 2287 in_worklist_->Clear(); |
| 2288 } |
| 2289 |
| 2290 // During the comparison worklist contains pairs of definitions to be |
| 2291 // compared. |
| 2292 if (!AddPairToCongruencyWorklist(phi, replacement)) { |
| 2293 return false; |
| 2294 } |
| 2295 |
| 2296 // Process the worklist. It might grow during each comparison step. |
| 2297 for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) { |
| 2298 if (!AreInputsCongruent(congruency_worklist_[i], |
| 2299 congruency_worklist_[i + 1])) { |
| 2300 return false; |
| 2301 } |
| 2302 } |
| 2303 |
| 2304 // At this point worklist contains pairs of congruent definitions. |
| 2305 // Replace the one member of the pair with another maintaining proper |
| 2306 // domination relation between definitions and uses. |
| 2307 for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) { |
| 2308 Definition* a = congruency_worklist_[i]; |
| 2309 Definition* b = congruency_worklist_[i + 1]; |
| 2310 |
| 2311 // If these definitions are not phis then we need to pick up one |
| 2312 // that dominates another as the replacement: if a dominates b swap them. |
| 2313 // Note: both a and b are used as a phi input at the same block B which |
| 2314 // means a dominates B and b dominates B, which guarantees that either |
| 2315 // a dominates b or b dominates a. |
| 2316 if (!a->IsPhi()) { |
| 2317 if (Dominates(a, b)) { |
| 2318 Definition* t = a; |
| 2319 a = b; |
| 2320 b = t; |
| 2321 } |
| 2322 ASSERT(Dominates(b, a)); |
| 2323 } |
| 2324 |
| 2325 if (FLAG_trace_load_optimization) { |
| 2326 THR_Print("Replacing %s with congruent %s\n", |
| 2327 a->ToCString(), |
| 2328 b->ToCString()); |
| 2329 } |
| 2330 |
| 2331 a->ReplaceUsesWith(b); |
| 2332 if (a->IsPhi()) { |
| 2333 // We might be replacing a phi introduced by the load forwarding |
| 2334 // that is not inserted in the graph yet. |
| 2335 ASSERT(b->IsPhi()); |
| 2336 PhiInstr* phi_a = a->AsPhi(); |
| 2337 if (phi_a->is_alive()) { |
| 2338 phi_a->mark_dead(); |
| 2339 phi_a->block()->RemovePhi(phi_a); |
| 2340 phi_a->UnuseAllInputs(); |
| 2341 } |
| 2342 } else { |
| 2343 a->RemoveFromGraph(); |
| 2344 } |
| 2345 } |
| 2346 |
| 2347 return true; |
| 2348 } |
| 2349 |
| 2350 // Insert the given phi into the graph. Attempt to find an equal one in the |
| 2351 // target block first. |
| 2352 // Returns true if the phi was inserted and false if it was replaced. |
| 2353 bool EmitPhi(PhiInstr* phi) { |
| 2354 for (PhiIterator it(phi->block()); !it.Done(); it.Advance()) { |
| 2355 if (ReplacePhiWith(phi, it.Current())) { |
| 2356 return false; |
| 2357 } |
| 2358 } |
| 2359 |
| 2360 phi->mark_alive(); |
| 2361 phi->block()->InsertPhi(phi); |
| 2362 return true; |
| 2363 } |
| 2364 |
| 2365 // Phis have not yet been inserted into the graph but they have uses of |
| 2366 // their inputs. Insert the non-redundant ones and clear the input uses |
| 2367 // of the redundant ones. |
| 2368 void EmitPhis() { |
| 2369 // First eliminate all redundant phis. |
| 2370 for (intptr_t i = 0; i < phis_.length(); i++) { |
| 2371 PhiInstr* phi = phis_[i]; |
| 2372 if (!phi->HasUses() || EliminateRedundantPhi(phi)) { |
| 2373 phi->UnuseAllInputs(); |
| 2374 phis_[i] = NULL; |
| 2375 } |
| 2376 } |
| 2377 |
| 2378 // Now emit phis or replace them with equal phis already present in the |
| 2379 // graph. |
| 2380 for (intptr_t i = 0; i < phis_.length(); i++) { |
| 2381 PhiInstr* phi = phis_[i]; |
| 2382 if ((phi != NULL) && (!phi->HasUses() || !EmitPhi(phi))) { |
| 2383 phi->UnuseAllInputs(); |
| 2384 } |
| 2385 } |
| 2386 } |
| 2387 |
| 2388 ZoneGrowableArray<Definition*>* CreateBlockOutValues() { |
| 2389 ZoneGrowableArray<Definition*>* out = |
| 2390 new(Z) ZoneGrowableArray<Definition*>(aliased_set_->max_place_id()); |
| 2391 for (intptr_t i = 0; i < aliased_set_->max_place_id(); i++) { |
| 2392 out->Add(NULL); |
| 2393 } |
| 2394 return out; |
| 2395 } |
| 2396 |
| 2397 FlowGraph* graph_; |
| 2398 DirectChainedHashMap<PointerKeyValueTrait<Place> >* map_; |
| 2399 |
| 2400 // Mapping between field offsets in words and expression ids of loads from |
| 2401 // that offset. |
| 2402 AliasedSet* aliased_set_; |
| 2403 |
| 2404 // Per block sets of expression ids for loads that are: incoming (available |
| 2405 // on the entry), outgoing (available on the exit), generated and killed. |
| 2406 GrowableArray<BitVector*> in_; |
| 2407 GrowableArray<BitVector*> out_; |
| 2408 GrowableArray<BitVector*> gen_; |
| 2409 GrowableArray<BitVector*> kill_; |
| 2410 |
| 2411 // Per block list of upwards exposed loads. |
| 2412 GrowableArray<ZoneGrowableArray<Definition*>*> exposed_values_; |
| 2413 |
| 2414 // Per block mappings between expression ids and outgoing definitions that |
| 2415 // represent those ids. |
| 2416 GrowableArray<ZoneGrowableArray<Definition*>*> out_values_; |
| 2417 |
| 2418 // List of phis generated during ComputeOutValues and ForwardLoads. |
| 2419 // Some of these phis might be redundant and thus a separate pass is |
| 2420 // needed to emit only non-redundant ones. |
| 2421 GrowableArray<PhiInstr*> phis_; |
| 2422 |
| 2423 // Auxiliary worklist used by redundant phi elimination. |
| 2424 GrowableArray<PhiInstr*> worklist_; |
| 2425 GrowableArray<Definition*> congruency_worklist_; |
| 2426 BitVector* in_worklist_; |
| 2427 |
| 2428 |
| 2429 // True if any load was eliminated. |
| 2430 bool forwarded_; |
| 2431 |
| 2432 DISALLOW_COPY_AND_ASSIGN(LoadOptimizer); |
| 2433 }; |
| 2434 |
| 2435 |
| 2436 bool DominatorBasedCSE::Optimize(FlowGraph* graph) { |
| 2437 bool changed = false; |
| 2438 if (FLAG_load_cse) { |
| 2439 changed = LoadOptimizer::OptimizeGraph(graph) || changed; |
| 2440 } |
| 2441 |
| 2442 CSEInstructionMap map; |
| 2443 changed = OptimizeRecursive(graph, graph->graph_entry(), &map) || changed; |
| 2444 |
| 2445 return changed; |
| 2446 } |
| 2447 |
| 2448 |
| 2449 bool DominatorBasedCSE::OptimizeRecursive( |
| 2450 FlowGraph* graph, |
| 2451 BlockEntryInstr* block, |
| 2452 CSEInstructionMap* map) { |
| 2453 bool changed = false; |
| 2454 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |
| 2455 Instruction* current = it.Current(); |
| 2456 if (current->AllowsCSE()) { |
| 2457 Instruction* replacement = map->Lookup(current); |
| 2458 if ((replacement != NULL) && |
| 2459 graph->block_effects()->IsAvailableAt(replacement, block)) { |
| 2460 // Replace current with lookup result. |
| 2461 graph->ReplaceCurrentInstruction(&it, current, replacement); |
| 2462 changed = true; |
| 2463 continue; |
| 2464 } |
| 2465 |
| 2466 // For simplicity we assume that instruction either does not depend on |
| 2467 // anything or does not affect anything. If this is not the case then |
| 2468 // we should first remove affected instructions from the map and |
| 2469 // then add instruction to the map so that it does not kill itself. |
| 2470 ASSERT(current->Effects().IsNone() || current->Dependencies().IsNone()); |
| 2471 map->Insert(current); |
| 2472 } |
| 2473 |
| 2474 map->RemoveAffected(current->Effects()); |
| 2475 } |
| 2476 |
| 2477 // Process children in the dominator tree recursively. |
| 2478 intptr_t num_children = block->dominated_blocks().length(); |
| 2479 for (intptr_t i = 0; i < num_children; ++i) { |
| 2480 BlockEntryInstr* child = block->dominated_blocks()[i]; |
| 2481 if (i < num_children - 1) { |
| 2482 // Copy map. |
| 2483 CSEInstructionMap child_map(*map); |
| 2484 changed = OptimizeRecursive(graph, child, &child_map) || changed; |
| 2485 } else { |
| 2486 // Reuse map for the last child. |
| 2487 changed = OptimizeRecursive(graph, child, map) || changed; |
| 2488 } |
| 2489 } |
| 2490 return changed; |
| 2491 } |
| 2492 |
| 2493 |
| 2494 class StoreOptimizer : public LivenessAnalysis { |
| 2495 public: |
| 2496 StoreOptimizer(FlowGraph* graph, |
| 2497 AliasedSet* aliased_set, |
| 2498 DirectChainedHashMap<PointerKeyValueTrait<Place> >* map) |
| 2499 : LivenessAnalysis(aliased_set->max_place_id(), graph->postorder()), |
| 2500 graph_(graph), |
| 2501 map_(map), |
| 2502 aliased_set_(aliased_set), |
| 2503 exposed_stores_(graph_->postorder().length()) { |
| 2504 const intptr_t num_blocks = graph_->postorder().length(); |
| 2505 for (intptr_t i = 0; i < num_blocks; i++) { |
| 2506 exposed_stores_.Add(NULL); |
| 2507 } |
| 2508 } |
| 2509 |
| 2510 static void OptimizeGraph(FlowGraph* graph) { |
| 2511 ASSERT(FLAG_load_cse); |
| 2512 if (FLAG_trace_load_optimization) { |
| 2513 FlowGraphPrinter::PrintGraph("Before StoreOptimizer", graph); |
| 2514 } |
| 2515 |
| 2516 DirectChainedHashMap<PointerKeyValueTrait<Place> > map; |
| 2517 AliasedSet* aliased_set = NumberPlaces(graph, &map, kOptimizeStores); |
| 2518 if ((aliased_set != NULL) && !aliased_set->IsEmpty()) { |
| 2519 StoreOptimizer store_optimizer(graph, aliased_set, &map); |
| 2520 store_optimizer.Optimize(); |
| 2521 } |
| 2522 } |
| 2523 |
| 2524 private: |
| 2525 void Optimize() { |
| 2526 Analyze(); |
| 2527 if (FLAG_trace_load_optimization) { |
| 2528 Dump(); |
| 2529 } |
| 2530 EliminateDeadStores(); |
| 2531 if (FLAG_trace_load_optimization) { |
| 2532 FlowGraphPrinter::PrintGraph("After StoreOptimizer", graph_); |
| 2533 } |
| 2534 } |
| 2535 |
| 2536 bool CanEliminateStore(Instruction* instr) { |
| 2537 switch (instr->tag()) { |
| 2538 case Instruction::kStoreInstanceField: { |
| 2539 StoreInstanceFieldInstr* store_instance = instr->AsStoreInstanceField(); |
| 2540 // Can't eliminate stores that initialize fields. |
| 2541 return !(store_instance->is_potential_unboxed_initialization() || |
| 2542 store_instance->is_object_reference_initialization()); |
| 2543 } |
| 2544 case Instruction::kStoreIndexed: |
| 2545 case Instruction::kStoreStaticField: |
| 2546 return true; |
| 2547 default: |
| 2548 UNREACHABLE(); |
| 2549 return false; |
| 2550 } |
| 2551 } |
| 2552 |
| 2553 virtual void ComputeInitialSets() { |
| 2554 Zone* zone = graph_->zone(); |
| 2555 BitVector* all_places = new(zone) BitVector(zone, |
| 2556 aliased_set_->max_place_id()); |
| 2557 all_places->SetAll(); |
| 2558 for (BlockIterator block_it = graph_->postorder_iterator(); |
| 2559 !block_it.Done(); |
| 2560 block_it.Advance()) { |
| 2561 BlockEntryInstr* block = block_it.Current(); |
| 2562 const intptr_t postorder_number = block->postorder_number(); |
| 2563 |
| 2564 BitVector* kill = kill_[postorder_number]; |
| 2565 BitVector* live_in = live_in_[postorder_number]; |
| 2566 BitVector* live_out = live_out_[postorder_number]; |
| 2567 |
| 2568 ZoneGrowableArray<Instruction*>* exposed_stores = NULL; |
| 2569 |
| 2570 // Iterate backwards starting at the last instruction. |
| 2571 for (BackwardInstructionIterator instr_it(block); |
| 2572 !instr_it.Done(); |
| 2573 instr_it.Advance()) { |
| 2574 Instruction* instr = instr_it.Current(); |
| 2575 |
| 2576 bool is_load = false; |
| 2577 bool is_store = false; |
| 2578 Place place(instr, &is_load, &is_store); |
| 2579 if (place.IsImmutableField()) { |
| 2580 // Loads/stores of final fields do not participate. |
| 2581 continue; |
| 2582 } |
| 2583 |
| 2584 // Handle stores. |
| 2585 if (is_store) { |
| 2586 if (kill->Contains(instr->place_id())) { |
| 2587 if (!live_in->Contains(instr->place_id()) && |
| 2588 CanEliminateStore(instr)) { |
| 2589 if (FLAG_trace_optimization) { |
| 2590 THR_Print( |
| 2591 "Removing dead store to place %" Pd " in block B%" Pd "\n", |
| 2592 instr->place_id(), block->block_id()); |
| 2593 } |
| 2594 instr_it.RemoveCurrentFromGraph(); |
| 2595 } |
| 2596 } else if (!live_in->Contains(instr->place_id())) { |
| 2597 // Mark this store as down-ward exposed: They are the only |
| 2598 // candidates for the global store elimination. |
| 2599 if (exposed_stores == NULL) { |
| 2600 const intptr_t kMaxExposedStoresInitialSize = 5; |
| 2601 exposed_stores = new(zone) ZoneGrowableArray<Instruction*>( |
| 2602 Utils::Minimum(kMaxExposedStoresInitialSize, |
| 2603 aliased_set_->max_place_id())); |
| 2604 } |
| 2605 exposed_stores->Add(instr); |
| 2606 } |
| 2607 // Interfering stores kill only loads from the same place. |
| 2608 kill->Add(instr->place_id()); |
| 2609 live_in->Remove(instr->place_id()); |
| 2610 continue; |
| 2611 } |
| 2612 |
| 2613 // Handle side effects, deoptimization and function return. |
| 2614 if (!instr->Effects().IsNone() || |
| 2615 instr->CanDeoptimize() || |
| 2616 instr->IsThrow() || |
| 2617 instr->IsReThrow() || |
| 2618 instr->IsReturn()) { |
| 2619 // Instructions that return from the function, instructions with side |
| 2620 // effects and instructions that can deoptimize are considered as |
| 2621 // loads from all places. |
| 2622 live_in->CopyFrom(all_places); |
| 2623 if (instr->IsThrow() || instr->IsReThrow() || instr->IsReturn()) { |
| 2624 // Initialize live-out for exit blocks since it won't be computed |
| 2625 // otherwise during the fixed point iteration. |
| 2626 live_out->CopyFrom(all_places); |
| 2627 } |
| 2628 continue; |
| 2629 } |
| 2630 |
| 2631 // Handle loads. |
| 2632 Definition* defn = instr->AsDefinition(); |
| 2633 if ((defn != NULL) && IsLoadEliminationCandidate(defn)) { |
| 2634 const intptr_t alias = aliased_set_->LookupAliasId(place.ToAlias()); |
| 2635 live_in->AddAll(aliased_set_->GetKilledSet(alias)); |
| 2636 continue; |
| 2637 } |
| 2638 } |
| 2639 exposed_stores_[postorder_number] = exposed_stores; |
| 2640 } |
| 2641 if (FLAG_trace_load_optimization) { |
| 2642 Dump(); |
| 2643 THR_Print("---\n"); |
| 2644 } |
| 2645 } |
| 2646 |
| 2647 void EliminateDeadStores() { |
| 2648 // Iteration order does not matter here. |
| 2649 for (BlockIterator block_it = graph_->postorder_iterator(); |
| 2650 !block_it.Done(); |
| 2651 block_it.Advance()) { |
| 2652 BlockEntryInstr* block = block_it.Current(); |
| 2653 const intptr_t postorder_number = block->postorder_number(); |
| 2654 |
| 2655 BitVector* live_out = live_out_[postorder_number]; |
| 2656 |
| 2657 ZoneGrowableArray<Instruction*>* exposed_stores = |
| 2658 exposed_stores_[postorder_number]; |
| 2659 if (exposed_stores == NULL) continue; // No exposed stores. |
| 2660 |
| 2661 // Iterate over candidate stores. |
| 2662 for (intptr_t i = 0; i < exposed_stores->length(); ++i) { |
| 2663 Instruction* instr = (*exposed_stores)[i]; |
| 2664 bool is_load = false; |
| 2665 bool is_store = false; |
| 2666 Place place(instr, &is_load, &is_store); |
| 2667 ASSERT(!is_load && is_store); |
| 2668 if (place.IsImmutableField()) { |
| 2669 // Final field do not participate in dead store elimination. |
| 2670 continue; |
| 2671 } |
| 2672 // Eliminate a downward exposed store if the corresponding place is not |
| 2673 // in live-out. |
| 2674 if (!live_out->Contains(instr->place_id()) && |
| 2675 CanEliminateStore(instr)) { |
| 2676 if (FLAG_trace_optimization) { |
| 2677 THR_Print("Removing dead store to place %" Pd " block B%" Pd "\n", |
| 2678 instr->place_id(), block->block_id()); |
| 2679 } |
| 2680 instr->RemoveFromGraph(/* ignored */ false); |
| 2681 } |
| 2682 } |
| 2683 } |
| 2684 } |
| 2685 |
| 2686 FlowGraph* graph_; |
| 2687 DirectChainedHashMap<PointerKeyValueTrait<Place> >* map_; |
| 2688 |
| 2689 // Mapping between field offsets in words and expression ids of loads from |
| 2690 // that offset. |
| 2691 AliasedSet* aliased_set_; |
| 2692 |
| 2693 // Per block list of downward exposed stores. |
| 2694 GrowableArray<ZoneGrowableArray<Instruction*>*> exposed_stores_; |
| 2695 |
| 2696 DISALLOW_COPY_AND_ASSIGN(StoreOptimizer); |
| 2697 }; |
| 2698 |
| 2699 |
| 2700 void DeadStoreElimination::Optimize(FlowGraph* graph) { |
| 2701 if (FLAG_dead_store_elimination) { |
| 2702 StoreOptimizer::OptimizeGraph(graph); |
| 2703 } |
| 2704 } |
| 2705 |
| 2706 |
| 2707 enum SafeUseCheck { kOptimisticCheck, kStrictCheck }; |
| 2708 |
| 2709 // Check if the use is safe for allocation sinking. Allocation sinking |
| 2710 // candidates can only be used at store instructions: |
| 2711 // |
| 2712 // - any store into the allocation candidate itself is unconditionally safe |
| 2713 // as it just changes the rematerialization state of this candidate; |
| 2714 // - store into another object is only safe if another object is allocation |
| 2715 // candidate. |
| 2716 // |
| 2717 // We use a simple fix-point algorithm to discover the set of valid candidates |
| 2718 // (see CollectCandidates method), that's why this IsSafeUse can operate in two |
| 2719 // modes: |
| 2720 // |
| 2721 // - optimistic, when every allocation is assumed to be an allocation |
| 2722 // sinking candidate; |
| 2723 // - strict, when only marked allocations are assumed to be allocation |
| 2724 // sinking candidates. |
| 2725 // |
| 2726 // Fix-point algorithm in CollectCandiates first collects a set of allocations |
| 2727 // optimistically and then checks each collected candidate strictly and unmarks |
| 2728 // invalid candidates transitively until only strictly valid ones remain. |
| 2729 static bool IsSafeUse(Value* use, SafeUseCheck check_type) { |
| 2730 if (use->instruction()->IsMaterializeObject()) { |
| 2731 return true; |
| 2732 } |
| 2733 |
| 2734 StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField(); |
| 2735 if (store != NULL) { |
| 2736 if (use == store->value()) { |
| 2737 Definition* instance = store->instance()->definition(); |
| 2738 return instance->IsAllocateObject() && |
| 2739 ((check_type == kOptimisticCheck) || |
| 2740 instance->Identity().IsAllocationSinkingCandidate()); |
| 2741 } |
| 2742 return true; |
| 2743 } |
| 2744 |
| 2745 return false; |
| 2746 } |
| 2747 |
| 2748 |
| 2749 // Right now we are attempting to sink allocation only into |
| 2750 // deoptimization exit. So candidate should only be used in StoreInstanceField |
| 2751 // instructions that write into fields of the allocated object. |
| 2752 // We do not support materialization of the object that has type arguments. |
| 2753 static bool IsAllocationSinkingCandidate(Definition* alloc, |
| 2754 SafeUseCheck check_type) { |
| 2755 for (Value* use = alloc->input_use_list(); |
| 2756 use != NULL; |
| 2757 use = use->next_use()) { |
| 2758 if (!IsSafeUse(use, check_type)) { |
| 2759 if (FLAG_trace_optimization) { |
| 2760 THR_Print("use of %s at %s is unsafe for allocation sinking\n", |
| 2761 alloc->ToCString(), |
| 2762 use->instruction()->ToCString()); |
| 2763 } |
| 2764 return false; |
| 2765 } |
| 2766 } |
| 2767 |
| 2768 return true; |
| 2769 } |
| 2770 |
| 2771 |
| 2772 // If the given use is a store into an object then return an object we are |
| 2773 // storing into. |
| 2774 static Definition* StoreInto(Value* use) { |
| 2775 StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField(); |
| 2776 if (store != NULL) { |
| 2777 return store->instance()->definition(); |
| 2778 } |
| 2779 |
| 2780 return NULL; |
| 2781 } |
| 2782 |
| 2783 |
| 2784 // Remove the given allocation from the graph. It is not observable. |
| 2785 // If deoptimization occurs the object will be materialized. |
| 2786 void AllocationSinking::EliminateAllocation(Definition* alloc) { |
| 2787 ASSERT(IsAllocationSinkingCandidate(alloc, kStrictCheck)); |
| 2788 |
| 2789 if (FLAG_trace_optimization) { |
| 2790 THR_Print("removing allocation from the graph: v%" Pd "\n", |
| 2791 alloc->ssa_temp_index()); |
| 2792 } |
| 2793 |
| 2794 // As an allocation sinking candidate it is only used in stores to its own |
| 2795 // fields. Remove these stores. |
| 2796 for (Value* use = alloc->input_use_list(); |
| 2797 use != NULL; |
| 2798 use = alloc->input_use_list()) { |
| 2799 use->instruction()->RemoveFromGraph(); |
| 2800 } |
| 2801 |
| 2802 // There should be no environment uses. The pass replaced them with |
| 2803 // MaterializeObject instructions. |
| 2804 #ifdef DEBUG |
| 2805 for (Value* use = alloc->env_use_list(); |
| 2806 use != NULL; |
| 2807 use = use->next_use()) { |
| 2808 ASSERT(use->instruction()->IsMaterializeObject()); |
| 2809 } |
| 2810 #endif |
| 2811 ASSERT(alloc->input_use_list() == NULL); |
| 2812 alloc->RemoveFromGraph(); |
| 2813 if (alloc->ArgumentCount() > 0) { |
| 2814 ASSERT(alloc->ArgumentCount() == 1); |
| 2815 for (intptr_t i = 0; i < alloc->ArgumentCount(); ++i) { |
| 2816 alloc->PushArgumentAt(i)->RemoveFromGraph(); |
| 2817 } |
| 2818 } |
| 2819 } |
| 2820 |
| 2821 |
| 2822 // Find allocation instructions that can be potentially eliminated and |
| 2823 // rematerialized at deoptimization exits if needed. See IsSafeUse |
| 2824 // for the description of algorithm used below. |
| 2825 void AllocationSinking::CollectCandidates() { |
| 2826 // Optimistically collect all potential candidates. |
| 2827 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); |
| 2828 !block_it.Done(); |
| 2829 block_it.Advance()) { |
| 2830 BlockEntryInstr* block = block_it.Current(); |
| 2831 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) { |
| 2832 { AllocateObjectInstr* alloc = it.Current()->AsAllocateObject(); |
| 2833 if ((alloc != NULL) && |
| 2834 IsAllocationSinkingCandidate(alloc, kOptimisticCheck)) { |
| 2835 alloc->SetIdentity(AliasIdentity::AllocationSinkingCandidate()); |
| 2836 candidates_.Add(alloc); |
| 2837 } |
| 2838 } |
| 2839 { AllocateUninitializedContextInstr* alloc = |
| 2840 it.Current()->AsAllocateUninitializedContext(); |
| 2841 if ((alloc != NULL) && |
| 2842 IsAllocationSinkingCandidate(alloc, kOptimisticCheck)) { |
| 2843 alloc->SetIdentity(AliasIdentity::AllocationSinkingCandidate()); |
| 2844 candidates_.Add(alloc); |
| 2845 } |
| 2846 } |
| 2847 } |
| 2848 } |
| 2849 |
| 2850 // Transitively unmark all candidates that are not strictly valid. |
| 2851 bool changed; |
| 2852 do { |
| 2853 changed = false; |
| 2854 for (intptr_t i = 0; i < candidates_.length(); i++) { |
| 2855 Definition* alloc = candidates_[i]; |
| 2856 if (alloc->Identity().IsAllocationSinkingCandidate()) { |
| 2857 if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) { |
| 2858 alloc->SetIdentity(AliasIdentity::Unknown()); |
| 2859 changed = true; |
| 2860 } |
| 2861 } |
| 2862 } |
| 2863 } while (changed); |
| 2864 |
| 2865 // Shrink the list of candidates removing all unmarked ones. |
| 2866 intptr_t j = 0; |
| 2867 for (intptr_t i = 0; i < candidates_.length(); i++) { |
| 2868 Definition* alloc = candidates_[i]; |
| 2869 if (alloc->Identity().IsAllocationSinkingCandidate()) { |
| 2870 if (FLAG_trace_optimization) { |
| 2871 THR_Print("discovered allocation sinking candidate: v%" Pd "\n", |
| 2872 alloc->ssa_temp_index()); |
| 2873 } |
| 2874 |
| 2875 if (j != i) { |
| 2876 candidates_[j] = alloc; |
| 2877 } |
| 2878 j++; |
| 2879 } |
| 2880 } |
| 2881 candidates_.TruncateTo(j); |
| 2882 } |
| 2883 |
| 2884 |
| 2885 // If materialization references an allocation sinking candidate then replace |
| 2886 // this reference with a materialization which should have been computed for |
| 2887 // this side-exit. CollectAllExits should have collected this exit. |
| 2888 void AllocationSinking::NormalizeMaterializations() { |
| 2889 for (intptr_t i = 0; i < candidates_.length(); i++) { |
| 2890 Definition* alloc = candidates_[i]; |
| 2891 |
| 2892 Value* next_use; |
| 2893 for (Value* use = alloc->input_use_list(); |
| 2894 use != NULL; |
| 2895 use = next_use) { |
| 2896 next_use = use->next_use(); |
| 2897 if (use->instruction()->IsMaterializeObject()) { |
| 2898 use->BindTo(MaterializationFor(alloc, use->instruction())); |
| 2899 } |
| 2900 } |
| 2901 } |
| 2902 } |
| 2903 |
| 2904 |
| 2905 // We transitively insert materializations at each deoptimization exit that |
| 2906 // might see the given allocation (see ExitsCollector). Some of this |
| 2907 // materializations are not actually used and some fail to compute because |
| 2908 // they are inserted in the block that is not dominated by the allocation. |
| 2909 // Remove them unused materializations from the graph. |
| 2910 void AllocationSinking::RemoveUnusedMaterializations() { |
| 2911 intptr_t j = 0; |
| 2912 for (intptr_t i = 0; i < materializations_.length(); i++) { |
| 2913 MaterializeObjectInstr* mat = materializations_[i]; |
| 2914 if ((mat->input_use_list() == NULL) && (mat->env_use_list() == NULL)) { |
| 2915 // Check if this materialization failed to compute and remove any |
| 2916 // unforwarded loads. There were no loads from any allocation sinking |
| 2917 // candidate in the beggining so it is safe to assume that any encountered |
| 2918 // load was inserted by CreateMaterializationAt. |
| 2919 for (intptr_t i = 0; i < mat->InputCount(); i++) { |
| 2920 LoadFieldInstr* load = mat->InputAt(i)->definition()->AsLoadField(); |
| 2921 if ((load != NULL) && |
| 2922 (load->instance()->definition() == mat->allocation())) { |
| 2923 load->ReplaceUsesWith(flow_graph_->constant_null()); |
| 2924 load->RemoveFromGraph(); |
| 2925 } |
| 2926 } |
| 2927 mat->RemoveFromGraph(); |
| 2928 } else { |
| 2929 if (j != i) { |
| 2930 materializations_[j] = mat; |
| 2931 } |
| 2932 j++; |
| 2933 } |
| 2934 } |
| 2935 materializations_.TruncateTo(j); |
| 2936 } |
| 2937 |
| 2938 |
| 2939 // Some candidates might stop being eligible for allocation sinking after |
| 2940 // the load forwarding because they flow into phis that load forwarding |
| 2941 // inserts. Discover such allocations and remove them from the list |
| 2942 // of allocation sinking candidates undoing all changes that we did |
| 2943 // in preparation for sinking these allocations. |
| 2944 void AllocationSinking::DiscoverFailedCandidates() { |
| 2945 // Transitively unmark all candidates that are not strictly valid. |
| 2946 bool changed; |
| 2947 do { |
| 2948 changed = false; |
| 2949 for (intptr_t i = 0; i < candidates_.length(); i++) { |
| 2950 Definition* alloc = candidates_[i]; |
| 2951 if (alloc->Identity().IsAllocationSinkingCandidate()) { |
| 2952 if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) { |
| 2953 alloc->SetIdentity(AliasIdentity::Unknown()); |
| 2954 changed = true; |
| 2955 } |
| 2956 } |
| 2957 } |
| 2958 } while (changed); |
| 2959 |
| 2960 // Remove all failed candidates from the candidates list. |
| 2961 intptr_t j = 0; |
| 2962 for (intptr_t i = 0; i < candidates_.length(); i++) { |
| 2963 Definition* alloc = candidates_[i]; |
| 2964 if (!alloc->Identity().IsAllocationSinkingCandidate()) { |
| 2965 if (FLAG_trace_optimization) { |
| 2966 THR_Print("allocation v%" Pd " can't be eliminated\n", |
| 2967 alloc->ssa_temp_index()); |
| 2968 } |
| 2969 |
| 2970 #ifdef DEBUG |
| 2971 for (Value* use = alloc->env_use_list(); |
| 2972 use != NULL; |
| 2973 use = use->next_use()) { |
| 2974 ASSERT(use->instruction()->IsMaterializeObject()); |
| 2975 } |
| 2976 #endif |
| 2977 |
| 2978 // All materializations will be removed from the graph. Remove inserted |
| 2979 // loads first and detach materializations from allocation's environment |
| 2980 // use list: we will reconstruct it when we start removing |
| 2981 // materializations. |
| 2982 alloc->set_env_use_list(NULL); |
| 2983 for (Value* use = alloc->input_use_list(); |
| 2984 use != NULL; |
| 2985 use = use->next_use()) { |
| 2986 if (use->instruction()->IsLoadField()) { |
| 2987 LoadFieldInstr* load = use->instruction()->AsLoadField(); |
| 2988 load->ReplaceUsesWith(flow_graph_->constant_null()); |
| 2989 load->RemoveFromGraph(); |
| 2990 } else { |
| 2991 ASSERT(use->instruction()->IsMaterializeObject() || |
| 2992 use->instruction()->IsPhi() || |
| 2993 use->instruction()->IsStoreInstanceField()); |
| 2994 } |
| 2995 } |
| 2996 } else { |
| 2997 if (j != i) { |
| 2998 candidates_[j] = alloc; |
| 2999 } |
| 3000 j++; |
| 3001 } |
| 3002 } |
| 3003 |
| 3004 if (j != candidates_.length()) { // Something was removed from candidates. |
| 3005 intptr_t k = 0; |
| 3006 for (intptr_t i = 0; i < materializations_.length(); i++) { |
| 3007 MaterializeObjectInstr* mat = materializations_[i]; |
| 3008 if (!mat->allocation()->Identity().IsAllocationSinkingCandidate()) { |
| 3009 // Restore environment uses of the allocation that were replaced |
| 3010 // by this materialization and drop materialization. |
| 3011 mat->ReplaceUsesWith(mat->allocation()); |
| 3012 mat->RemoveFromGraph(); |
| 3013 } else { |
| 3014 if (k != i) { |
| 3015 materializations_[k] = mat; |
| 3016 } |
| 3017 k++; |
| 3018 } |
| 3019 } |
| 3020 materializations_.TruncateTo(k); |
| 3021 } |
| 3022 |
| 3023 candidates_.TruncateTo(j); |
| 3024 } |
| 3025 |
| 3026 |
| 3027 void AllocationSinking::Optimize() { |
| 3028 CollectCandidates(); |
| 3029 |
| 3030 // Insert MaterializeObject instructions that will describe the state of the |
| 3031 // object at all deoptimization points. Each inserted materialization looks |
| 3032 // like this (where v_0 is allocation that we are going to eliminate): |
| 3033 // v_1 <- LoadField(v_0, field_1) |
| 3034 // ... |
| 3035 // v_N <- LoadField(v_0, field_N) |
| 3036 // v_{N+1} <- MaterializeObject(field_1 = v_1, ..., field_N = v_{N}) |
| 3037 for (intptr_t i = 0; i < candidates_.length(); i++) { |
| 3038 InsertMaterializations(candidates_[i]); |
| 3039 } |
| 3040 |
| 3041 // Run load forwarding to eliminate LoadField instructions inserted above. |
| 3042 // All loads will be successfully eliminated because: |
| 3043 // a) they use fields (not offsets) and thus provide precise aliasing |
| 3044 // information |
| 3045 // b) candidate does not escape and thus its fields is not affected by |
| 3046 // external effects from calls. |
| 3047 LoadOptimizer::OptimizeGraph(flow_graph_); |
| 3048 |
| 3049 NormalizeMaterializations(); |
| 3050 |
| 3051 RemoveUnusedMaterializations(); |
| 3052 |
| 3053 // If any candidates are no longer eligible for allocation sinking abort |
| 3054 // the optimization for them and undo any changes we did in preparation. |
| 3055 DiscoverFailedCandidates(); |
| 3056 |
| 3057 // At this point we have computed the state of object at each deoptimization |
| 3058 // point and we can eliminate it. Loads inserted above were forwarded so there |
| 3059 // are no uses of the allocation just as in the begging of the pass. |
| 3060 for (intptr_t i = 0; i < candidates_.length(); i++) { |
| 3061 EliminateAllocation(candidates_[i]); |
| 3062 } |
| 3063 |
| 3064 // Process materializations and unbox their arguments: materializations |
| 3065 // are part of the environment and can materialize boxes for double/mint/simd |
| 3066 // values when needed. |
| 3067 // TODO(vegorov): handle all box types here. |
| 3068 for (intptr_t i = 0; i < materializations_.length(); i++) { |
| 3069 MaterializeObjectInstr* mat = materializations_[i]; |
| 3070 for (intptr_t j = 0; j < mat->InputCount(); j++) { |
| 3071 Definition* defn = mat->InputAt(j)->definition(); |
| 3072 if (defn->IsBox()) { |
| 3073 mat->InputAt(j)->BindTo(defn->InputAt(0)->definition()); |
| 3074 } |
| 3075 } |
| 3076 } |
| 3077 } |
| 3078 |
| 3079 |
| 3080 // Remove materializations from the graph. Register allocator will treat them |
| 3081 // as part of the environment not as a real instruction. |
| 3082 void AllocationSinking::DetachMaterializations() { |
| 3083 for (intptr_t i = 0; i < materializations_.length(); i++) { |
| 3084 materializations_[i]->previous()->LinkTo(materializations_[i]->next()); |
| 3085 } |
| 3086 } |
| 3087 |
| 3088 |
| 3089 // Add a field/offset to the list of fields if it is not yet present there. |
| 3090 static bool AddSlot(ZoneGrowableArray<const Object*>* slots, |
| 3091 const Object& slot) { |
| 3092 for (intptr_t i = 0; i < slots->length(); i++) { |
| 3093 if ((*slots)[i]->raw() == slot.raw()) { |
| 3094 return false; |
| 3095 } |
| 3096 } |
| 3097 slots->Add(&slot); |
| 3098 return true; |
| 3099 } |
| 3100 |
| 3101 |
| 3102 // Find deoptimization exit for the given materialization assuming that all |
| 3103 // materializations are emitted right before the instruction which is a |
| 3104 // deoptimization exit. |
| 3105 static Instruction* ExitForMaterialization(MaterializeObjectInstr* mat) { |
| 3106 while (mat->next()->IsMaterializeObject()) { |
| 3107 mat = mat->next()->AsMaterializeObject(); |
| 3108 } |
| 3109 return mat->next(); |
| 3110 } |
| 3111 |
| 3112 |
| 3113 // Given the deoptimization exit find first materialization that was inserted |
| 3114 // before it. |
| 3115 static Instruction* FirstMaterializationAt(Instruction* exit) { |
| 3116 while (exit->previous()->IsMaterializeObject()) { |
| 3117 exit = exit->previous(); |
| 3118 } |
| 3119 return exit; |
| 3120 } |
| 3121 |
| 3122 |
| 3123 // Given the allocation and deoptimization exit try to find MaterializeObject |
| 3124 // instruction corresponding to this allocation at this exit. |
| 3125 MaterializeObjectInstr* AllocationSinking::MaterializationFor( |
| 3126 Definition* alloc, Instruction* exit) { |
| 3127 if (exit->IsMaterializeObject()) { |
| 3128 exit = ExitForMaterialization(exit->AsMaterializeObject()); |
| 3129 } |
| 3130 |
| 3131 for (MaterializeObjectInstr* mat = exit->previous()->AsMaterializeObject(); |
| 3132 mat != NULL; |
| 3133 mat = mat->previous()->AsMaterializeObject()) { |
| 3134 if (mat->allocation() == alloc) { |
| 3135 return mat; |
| 3136 } |
| 3137 } |
| 3138 |
| 3139 return NULL; |
| 3140 } |
| 3141 |
| 3142 |
| 3143 // Insert MaterializeObject instruction for the given allocation before |
| 3144 // the given instruction that can deoptimize. |
| 3145 void AllocationSinking::CreateMaterializationAt( |
| 3146 Instruction* exit, |
| 3147 Definition* alloc, |
| 3148 const ZoneGrowableArray<const Object*>& slots) { |
| 3149 ZoneGrowableArray<Value*>* values = |
| 3150 new(Z) ZoneGrowableArray<Value*>(slots.length()); |
| 3151 |
| 3152 // All loads should be inserted before the first materialization so that |
| 3153 // IR follows the following pattern: loads, materializations, deoptimizing |
| 3154 // instruction. |
| 3155 Instruction* load_point = FirstMaterializationAt(exit); |
| 3156 |
| 3157 // Insert load instruction for every field. |
| 3158 for (intptr_t i = 0; i < slots.length(); i++) { |
| 3159 LoadFieldInstr* load = slots[i]->IsField() |
| 3160 ? new(Z) LoadFieldInstr( |
| 3161 new(Z) Value(alloc), |
| 3162 &Field::Cast(*slots[i]), |
| 3163 AbstractType::ZoneHandle(Z), |
| 3164 alloc->token_pos()) |
| 3165 : new(Z) LoadFieldInstr( |
| 3166 new(Z) Value(alloc), |
| 3167 Smi::Cast(*slots[i]).Value(), |
| 3168 AbstractType::ZoneHandle(Z), |
| 3169 alloc->token_pos()); |
| 3170 flow_graph_->InsertBefore( |
| 3171 load_point, load, NULL, FlowGraph::kValue); |
| 3172 values->Add(new(Z) Value(load)); |
| 3173 } |
| 3174 |
| 3175 MaterializeObjectInstr* mat = NULL; |
| 3176 if (alloc->IsAllocateObject()) { |
| 3177 mat = new(Z) MaterializeObjectInstr( |
| 3178 alloc->AsAllocateObject(), slots, values); |
| 3179 } else { |
| 3180 ASSERT(alloc->IsAllocateUninitializedContext()); |
| 3181 mat = new(Z) MaterializeObjectInstr( |
| 3182 alloc->AsAllocateUninitializedContext(), slots, values); |
| 3183 } |
| 3184 |
| 3185 flow_graph_->InsertBefore(exit, mat, NULL, FlowGraph::kValue); |
| 3186 |
| 3187 // Replace all mentions of this allocation with a newly inserted |
| 3188 // MaterializeObject instruction. |
| 3189 // We must preserve the identity: all mentions are replaced by the same |
| 3190 // materialization. |
| 3191 for (Environment::DeepIterator env_it(exit->env()); |
| 3192 !env_it.Done(); |
| 3193 env_it.Advance()) { |
| 3194 Value* use = env_it.CurrentValue(); |
| 3195 if (use->definition() == alloc) { |
| 3196 use->RemoveFromUseList(); |
| 3197 use->set_definition(mat); |
| 3198 mat->AddEnvUse(use); |
| 3199 } |
| 3200 } |
| 3201 |
| 3202 // Mark MaterializeObject as an environment use of this allocation. |
| 3203 // This will allow us to discover it when we are looking for deoptimization |
| 3204 // exits for another allocation that potentially flows into this one. |
| 3205 Value* val = new(Z) Value(alloc); |
| 3206 val->set_instruction(mat); |
| 3207 alloc->AddEnvUse(val); |
| 3208 |
| 3209 // Record inserted materialization. |
| 3210 materializations_.Add(mat); |
| 3211 } |
| 3212 |
| 3213 |
| 3214 // Add given instruction to the list of the instructions if it is not yet |
| 3215 // present there. |
| 3216 template<typename T> |
| 3217 void AddInstruction(GrowableArray<T*>* list, T* value) { |
| 3218 ASSERT(!value->IsGraphEntry()); |
| 3219 for (intptr_t i = 0; i < list->length(); i++) { |
| 3220 if ((*list)[i] == value) { |
| 3221 return; |
| 3222 } |
| 3223 } |
| 3224 list->Add(value); |
| 3225 } |
| 3226 |
| 3227 |
| 3228 // Transitively collect all deoptimization exits that might need this allocation |
| 3229 // rematerialized. It is not enough to collect only environment uses of this |
| 3230 // allocation because it can flow into other objects that will be |
| 3231 // dematerialized and that are referenced by deopt environments that |
| 3232 // don't contain this allocation explicitly. |
| 3233 void AllocationSinking::ExitsCollector::Collect(Definition* alloc) { |
| 3234 for (Value* use = alloc->env_use_list(); |
| 3235 use != NULL; |
| 3236 use = use->next_use()) { |
| 3237 if (use->instruction()->IsMaterializeObject()) { |
| 3238 AddInstruction(&exits_, ExitForMaterialization( |
| 3239 use->instruction()->AsMaterializeObject())); |
| 3240 } else { |
| 3241 AddInstruction(&exits_, use->instruction()); |
| 3242 } |
| 3243 } |
| 3244 |
| 3245 // Check if this allocation is stored into any other allocation sinking |
| 3246 // candidate and put it on worklist so that we conservatively collect all |
| 3247 // exits for that candidate as well because they potentially might see |
| 3248 // this object. |
| 3249 for (Value* use = alloc->input_use_list(); |
| 3250 use != NULL; |
| 3251 use = use->next_use()) { |
| 3252 Definition* obj = StoreInto(use); |
| 3253 if ((obj != NULL) && (obj != alloc)) { |
| 3254 AddInstruction(&worklist_, obj); |
| 3255 } |
| 3256 } |
| 3257 } |
| 3258 |
| 3259 |
| 3260 void AllocationSinking::ExitsCollector::CollectTransitively(Definition* alloc) { |
| 3261 exits_.TruncateTo(0); |
| 3262 worklist_.TruncateTo(0); |
| 3263 |
| 3264 worklist_.Add(alloc); |
| 3265 |
| 3266 // Note: worklist potentially will grow while we are iterating over it. |
| 3267 // We are not removing allocations from the worklist not to waste space on |
| 3268 // the side maintaining BitVector of already processed allocations: worklist |
| 3269 // is expected to be very small thus linear search in it is just as effecient |
| 3270 // as a bitvector. |
| 3271 for (intptr_t i = 0; i < worklist_.length(); i++) { |
| 3272 Collect(worklist_[i]); |
| 3273 } |
| 3274 } |
| 3275 |
| 3276 |
| 3277 void AllocationSinking::InsertMaterializations(Definition* alloc) { |
| 3278 // Collect all fields that are written for this instance. |
| 3279 ZoneGrowableArray<const Object*>* slots = |
| 3280 new(Z) ZoneGrowableArray<const Object*>(5); |
| 3281 |
| 3282 for (Value* use = alloc->input_use_list(); |
| 3283 use != NULL; |
| 3284 use = use->next_use()) { |
| 3285 StoreInstanceFieldInstr* store = use->instruction()->AsStoreInstanceField(); |
| 3286 if ((store != NULL) && (store->instance()->definition() == alloc)) { |
| 3287 if (!store->field().IsNull()) { |
| 3288 AddSlot(slots, store->field()); |
| 3289 } else { |
| 3290 AddSlot(slots, Smi::ZoneHandle(Z, Smi::New(store->offset_in_bytes()))); |
| 3291 } |
| 3292 } |
| 3293 } |
| 3294 |
| 3295 if (alloc->ArgumentCount() > 0) { |
| 3296 AllocateObjectInstr* alloc_object = alloc->AsAllocateObject(); |
| 3297 ASSERT(alloc_object->ArgumentCount() == 1); |
| 3298 intptr_t type_args_offset = |
| 3299 alloc_object->cls().type_arguments_field_offset(); |
| 3300 AddSlot(slots, Smi::ZoneHandle(Z, Smi::New(type_args_offset))); |
| 3301 } |
| 3302 |
| 3303 // Collect all instructions that mention this object in the environment. |
| 3304 exits_collector_.CollectTransitively(alloc); |
| 3305 |
| 3306 // Insert materializations at environment uses. |
| 3307 for (intptr_t i = 0; i < exits_collector_.exits().length(); i++) { |
| 3308 CreateMaterializationAt( |
| 3309 exits_collector_.exits()[i], alloc, *slots); |
| 3310 } |
| 3311 } |
| 3312 |
| 3313 |
| 3314 void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) { |
| 3315 // For every catch-block: Iterate over all call instructions inside the |
| 3316 // corresponding try-block and figure out for each environment value if it |
| 3317 // is the same constant at all calls. If yes, replace the initial definition |
| 3318 // at the catch-entry with this constant. |
| 3319 const GrowableArray<CatchBlockEntryInstr*>& catch_entries = |
| 3320 flow_graph->graph_entry()->catch_entries(); |
| 3321 intptr_t base = kFirstLocalSlotFromFp + flow_graph->num_non_copied_params(); |
| 3322 for (intptr_t catch_idx = 0; |
| 3323 catch_idx < catch_entries.length(); |
| 3324 ++catch_idx) { |
| 3325 CatchBlockEntryInstr* catch_entry = catch_entries[catch_idx]; |
| 3326 |
| 3327 // Initialize cdefs with the original initial definitions (ParameterInstr). |
| 3328 // The following representation is used: |
| 3329 // ParameterInstr => unknown |
| 3330 // ConstantInstr => known constant |
| 3331 // NULL => non-constant |
| 3332 GrowableArray<Definition*>* idefs = catch_entry->initial_definitions(); |
| 3333 GrowableArray<Definition*> cdefs(idefs->length()); |
| 3334 cdefs.AddArray(*idefs); |
| 3335 |
| 3336 // exception_var and stacktrace_var are never constant. |
| 3337 intptr_t ex_idx = base - catch_entry->exception_var().index(); |
| 3338 intptr_t st_idx = base - catch_entry->stacktrace_var().index(); |
| 3339 cdefs[ex_idx] = cdefs[st_idx] = NULL; |
| 3340 |
| 3341 for (BlockIterator block_it = flow_graph->reverse_postorder_iterator(); |
| 3342 !block_it.Done(); |
| 3343 block_it.Advance()) { |
| 3344 BlockEntryInstr* block = block_it.Current(); |
| 3345 if (block->try_index() == catch_entry->catch_try_index()) { |
| 3346 for (ForwardInstructionIterator instr_it(block); |
| 3347 !instr_it.Done(); |
| 3348 instr_it.Advance()) { |
| 3349 Instruction* current = instr_it.Current(); |
| 3350 if (current->MayThrow()) { |
| 3351 Environment* env = current->env()->Outermost(); |
| 3352 ASSERT(env != NULL); |
| 3353 for (intptr_t env_idx = 0; env_idx < cdefs.length(); ++env_idx) { |
| 3354 if (cdefs[env_idx] != NULL && |
| 3355 env->ValueAt(env_idx)->BindsToConstant()) { |
| 3356 cdefs[env_idx] = env->ValueAt(env_idx)->definition(); |
| 3357 } |
| 3358 if (cdefs[env_idx] != env->ValueAt(env_idx)->definition()) { |
| 3359 cdefs[env_idx] = NULL; |
| 3360 } |
| 3361 } |
| 3362 } |
| 3363 } |
| 3364 } |
| 3365 } |
| 3366 for (intptr_t j = 0; j < idefs->length(); ++j) { |
| 3367 if (cdefs[j] != NULL && cdefs[j]->IsConstant()) { |
| 3368 // TODO(fschneider): Use constants from the constant pool. |
| 3369 Definition* old = (*idefs)[j]; |
| 3370 ConstantInstr* orig = cdefs[j]->AsConstant(); |
| 3371 ConstantInstr* copy = |
| 3372 new(flow_graph->zone()) ConstantInstr(orig->value()); |
| 3373 copy->set_ssa_temp_index(flow_graph->alloc_ssa_temp_index()); |
| 3374 old->ReplaceUsesWith(copy); |
| 3375 (*idefs)[j] = copy; |
| 3376 } |
| 3377 } |
| 3378 } |
| 3379 } |
| 3380 |
| 3381 |
| 3382 // Returns true iff this definition is used in a non-phi instruction. |
| 3383 static bool HasRealUse(Definition* def) { |
| 3384 // Environment uses are real (non-phi) uses. |
| 3385 if (def->env_use_list() != NULL) return true; |
| 3386 |
| 3387 for (Value::Iterator it(def->input_use_list()); |
| 3388 !it.Done(); |
| 3389 it.Advance()) { |
| 3390 if (!it.Current()->instruction()->IsPhi()) return true; |
| 3391 } |
| 3392 return false; |
| 3393 } |
| 3394 |
| 3395 |
| 3396 void DeadCodeElimination::EliminateDeadPhis(FlowGraph* flow_graph) { |
| 3397 GrowableArray<PhiInstr*> live_phis; |
| 3398 for (BlockIterator b = flow_graph->postorder_iterator(); |
| 3399 !b.Done(); |
| 3400 b.Advance()) { |
| 3401 JoinEntryInstr* join = b.Current()->AsJoinEntry(); |
| 3402 if (join != NULL) { |
| 3403 for (PhiIterator it(join); !it.Done(); it.Advance()) { |
| 3404 PhiInstr* phi = it.Current(); |
| 3405 // Phis that have uses and phis inside try blocks are |
| 3406 // marked as live. |
| 3407 if (HasRealUse(phi) || join->InsideTryBlock()) { |
| 3408 live_phis.Add(phi); |
| 3409 phi->mark_alive(); |
| 3410 } else { |
| 3411 phi->mark_dead(); |
| 3412 } |
| 3413 } |
| 3414 } |
| 3415 } |
| 3416 |
| 3417 while (!live_phis.is_empty()) { |
| 3418 PhiInstr* phi = live_phis.RemoveLast(); |
| 3419 for (intptr_t i = 0; i < phi->InputCount(); i++) { |
| 3420 Value* val = phi->InputAt(i); |
| 3421 PhiInstr* used_phi = val->definition()->AsPhi(); |
| 3422 if ((used_phi != NULL) && !used_phi->is_alive()) { |
| 3423 used_phi->mark_alive(); |
| 3424 live_phis.Add(used_phi); |
| 3425 } |
| 3426 } |
| 3427 } |
| 3428 |
| 3429 for (BlockIterator it(flow_graph->postorder_iterator()); |
| 3430 !it.Done(); |
| 3431 it.Advance()) { |
| 3432 JoinEntryInstr* join = it.Current()->AsJoinEntry(); |
| 3433 if (join != NULL) { |
| 3434 if (join->phis_ == NULL) continue; |
| 3435 |
| 3436 // Eliminate dead phis and compact the phis_ array of the block. |
| 3437 intptr_t to_index = 0; |
| 3438 for (intptr_t i = 0; i < join->phis_->length(); ++i) { |
| 3439 PhiInstr* phi = (*join->phis_)[i]; |
| 3440 if (phi != NULL) { |
| 3441 if (!phi->is_alive()) { |
| 3442 phi->ReplaceUsesWith(flow_graph->constant_null()); |
| 3443 phi->UnuseAllInputs(); |
| 3444 (*join->phis_)[i] = NULL; |
| 3445 if (FLAG_trace_optimization) { |
| 3446 THR_Print("Removing dead phi v%" Pd "\n", phi->ssa_temp_index()); |
| 3447 } |
| 3448 } else if (phi->IsRedundant()) { |
| 3449 phi->ReplaceUsesWith(phi->InputAt(0)->definition()); |
| 3450 phi->UnuseAllInputs(); |
| 3451 (*join->phis_)[i] = NULL; |
| 3452 if (FLAG_trace_optimization) { |
| 3453 THR_Print("Removing redundant phi v%" Pd "\n", |
| 3454 phi->ssa_temp_index()); |
| 3455 } |
| 3456 } else { |
| 3457 (*join->phis_)[to_index++] = phi; |
| 3458 } |
| 3459 } |
| 3460 } |
| 3461 if (to_index == 0) { |
| 3462 join->phis_ = NULL; |
| 3463 } else { |
| 3464 join->phis_->TruncateTo(to_index); |
| 3465 } |
| 3466 } |
| 3467 } |
| 3468 } |
| 3469 |
| 3470 |
| 3471 } // namespace dart |
OLD | NEW |