| OLD | NEW |
| (Empty) |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/mips64/lithium-codegen-mips64.h" | |
| 6 #include "src/mips64/lithium-gap-resolver-mips64.h" | |
| 7 | |
| 8 namespace v8 { | |
| 9 namespace internal { | |
| 10 | |
| 11 LGapResolver::LGapResolver(LCodeGen* owner) | |
| 12 : cgen_(owner), | |
| 13 moves_(32, owner->zone()), | |
| 14 root_index_(0), | |
| 15 in_cycle_(false), | |
| 16 saved_destination_(NULL) {} | |
| 17 | |
| 18 | |
| 19 void LGapResolver::Resolve(LParallelMove* parallel_move) { | |
| 20 DCHECK(moves_.is_empty()); | |
| 21 // Build up a worklist of moves. | |
| 22 BuildInitialMoveList(parallel_move); | |
| 23 | |
| 24 for (int i = 0; i < moves_.length(); ++i) { | |
| 25 LMoveOperands move = moves_[i]; | |
| 26 // Skip constants to perform them last. They don't block other moves | |
| 27 // and skipping such moves with register destinations keeps those | |
| 28 // registers free for the whole algorithm. | |
| 29 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { | |
| 30 root_index_ = i; // Any cycle is found when by reaching this move again. | |
| 31 PerformMove(i); | |
| 32 if (in_cycle_) { | |
| 33 RestoreValue(); | |
| 34 } | |
| 35 } | |
| 36 } | |
| 37 | |
| 38 // Perform the moves with constant sources. | |
| 39 for (int i = 0; i < moves_.length(); ++i) { | |
| 40 if (!moves_[i].IsEliminated()) { | |
| 41 DCHECK(moves_[i].source()->IsConstantOperand()); | |
| 42 EmitMove(i); | |
| 43 } | |
| 44 } | |
| 45 | |
| 46 moves_.Rewind(0); | |
| 47 } | |
| 48 | |
| 49 | |
| 50 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { | |
| 51 // Perform a linear sweep of the moves to add them to the initial list of | |
| 52 // moves to perform, ignoring any move that is redundant (the source is | |
| 53 // the same as the destination, the destination is ignored and | |
| 54 // unallocated, or the move was already eliminated). | |
| 55 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); | |
| 56 for (int i = 0; i < moves->length(); ++i) { | |
| 57 LMoveOperands move = moves->at(i); | |
| 58 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); | |
| 59 } | |
| 60 Verify(); | |
| 61 } | |
| 62 | |
| 63 | |
| 64 void LGapResolver::PerformMove(int index) { | |
| 65 // Each call to this function performs a move and deletes it from the move | |
| 66 // graph. We first recursively perform any move blocking this one. We | |
| 67 // mark a move as "pending" on entry to PerformMove in order to detect | |
| 68 // cycles in the move graph. | |
| 69 | |
| 70 // We can only find a cycle, when doing a depth-first traversal of moves, | |
| 71 // be encountering the starting move again. So by spilling the source of | |
| 72 // the starting move, we break the cycle. All moves are then unblocked, | |
| 73 // and the starting move is completed by writing the spilled value to | |
| 74 // its destination. All other moves from the spilled source have been | |
| 75 // completed prior to breaking the cycle. | |
| 76 // An additional complication is that moves to MemOperands with large | |
| 77 // offsets (more than 1K or 4K) require us to spill this spilled value to | |
| 78 // the stack, to free up the register. | |
| 79 DCHECK(!moves_[index].IsPending()); | |
| 80 DCHECK(!moves_[index].IsRedundant()); | |
| 81 | |
| 82 // Clear this move's destination to indicate a pending move. The actual | |
| 83 // destination is saved in a stack allocated local. Multiple moves can | |
| 84 // be pending because this function is recursive. | |
| 85 DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated. | |
| 86 LOperand* destination = moves_[index].destination(); | |
| 87 moves_[index].set_destination(NULL); | |
| 88 | |
| 89 // Perform a depth-first traversal of the move graph to resolve | |
| 90 // dependencies. Any unperformed, unpending move with a source the same | |
| 91 // as this one's destination blocks this one so recursively perform all | |
| 92 // such moves. | |
| 93 for (int i = 0; i < moves_.length(); ++i) { | |
| 94 LMoveOperands other_move = moves_[i]; | |
| 95 if (other_move.Blocks(destination) && !other_move.IsPending()) { | |
| 96 PerformMove(i); | |
| 97 // If there is a blocking, pending move it must be moves_[root_index_] | |
| 98 // and all other moves with the same source as moves_[root_index_] are | |
| 99 // sucessfully executed (because they are cycle-free) by this loop. | |
| 100 } | |
| 101 } | |
| 102 | |
| 103 // We are about to resolve this move and don't need it marked as | |
| 104 // pending, so restore its destination. | |
| 105 moves_[index].set_destination(destination); | |
| 106 | |
| 107 // The move may be blocked on a pending move, which must be the starting move. | |
| 108 // In this case, we have a cycle, and we save the source of this move to | |
| 109 // a scratch register to break it. | |
| 110 LMoveOperands other_move = moves_[root_index_]; | |
| 111 if (other_move.Blocks(destination)) { | |
| 112 DCHECK(other_move.IsPending()); | |
| 113 BreakCycle(index); | |
| 114 return; | |
| 115 } | |
| 116 | |
| 117 // This move is no longer blocked. | |
| 118 EmitMove(index); | |
| 119 } | |
| 120 | |
| 121 | |
| 122 void LGapResolver::Verify() { | |
| 123 #ifdef ENABLE_SLOW_DCHECKS | |
| 124 // No operand should be the destination for more than one move. | |
| 125 for (int i = 0; i < moves_.length(); ++i) { | |
| 126 LOperand* destination = moves_[i].destination(); | |
| 127 for (int j = i + 1; j < moves_.length(); ++j) { | |
| 128 SLOW_DCHECK(!destination->Equals(moves_[j].destination())); | |
| 129 } | |
| 130 } | |
| 131 #endif | |
| 132 } | |
| 133 | |
| 134 #define __ ACCESS_MASM(cgen_->masm()) | |
| 135 | |
| 136 void LGapResolver::BreakCycle(int index) { | |
| 137 // We save in a register the value that should end up in the source of | |
| 138 // moves_[root_index]. After performing all moves in the tree rooted | |
| 139 // in that move, we save the value to that source. | |
| 140 DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); | |
| 141 DCHECK(!in_cycle_); | |
| 142 in_cycle_ = true; | |
| 143 LOperand* source = moves_[index].source(); | |
| 144 saved_destination_ = moves_[index].destination(); | |
| 145 if (source->IsRegister()) { | |
| 146 __ mov(kLithiumScratchReg, cgen_->ToRegister(source)); | |
| 147 } else if (source->IsStackSlot()) { | |
| 148 __ ld(kLithiumScratchReg, cgen_->ToMemOperand(source)); | |
| 149 } else if (source->IsDoubleRegister()) { | |
| 150 __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); | |
| 151 } else if (source->IsDoubleStackSlot()) { | |
| 152 __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); | |
| 153 } else { | |
| 154 UNREACHABLE(); | |
| 155 } | |
| 156 // This move will be done by restoring the saved value to the destination. | |
| 157 moves_[index].Eliminate(); | |
| 158 } | |
| 159 | |
| 160 | |
| 161 void LGapResolver::RestoreValue() { | |
| 162 DCHECK(in_cycle_); | |
| 163 DCHECK(saved_destination_ != NULL); | |
| 164 | |
| 165 // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble. | |
| 166 if (saved_destination_->IsRegister()) { | |
| 167 __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg); | |
| 168 } else if (saved_destination_->IsStackSlot()) { | |
| 169 __ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_)); | |
| 170 } else if (saved_destination_->IsDoubleRegister()) { | |
| 171 __ mov_d(cgen_->ToDoubleRegister(saved_destination_), | |
| 172 kLithiumScratchDouble); | |
| 173 } else if (saved_destination_->IsDoubleStackSlot()) { | |
| 174 __ sdc1(kLithiumScratchDouble, | |
| 175 cgen_->ToMemOperand(saved_destination_)); | |
| 176 } else { | |
| 177 UNREACHABLE(); | |
| 178 } | |
| 179 | |
| 180 in_cycle_ = false; | |
| 181 saved_destination_ = NULL; | |
| 182 } | |
| 183 | |
| 184 | |
| 185 void LGapResolver::EmitMove(int index) { | |
| 186 LOperand* source = moves_[index].source(); | |
| 187 LOperand* destination = moves_[index].destination(); | |
| 188 | |
| 189 // Dispatch on the source and destination operand kinds. Not all | |
| 190 // combinations are possible. | |
| 191 | |
| 192 if (source->IsRegister()) { | |
| 193 Register source_register = cgen_->ToRegister(source); | |
| 194 if (destination->IsRegister()) { | |
| 195 __ mov(cgen_->ToRegister(destination), source_register); | |
| 196 } else { | |
| 197 DCHECK(destination->IsStackSlot()); | |
| 198 __ sd(source_register, cgen_->ToMemOperand(destination)); | |
| 199 } | |
| 200 } else if (source->IsStackSlot()) { | |
| 201 MemOperand source_operand = cgen_->ToMemOperand(source); | |
| 202 if (destination->IsRegister()) { | |
| 203 __ ld(cgen_->ToRegister(destination), source_operand); | |
| 204 } else { | |
| 205 DCHECK(destination->IsStackSlot()); | |
| 206 MemOperand destination_operand = cgen_->ToMemOperand(destination); | |
| 207 if (in_cycle_) { | |
| 208 if (!destination_operand.OffsetIsInt16Encodable()) { | |
| 209 // 'at' is overwritten while saving the value to the destination. | |
| 210 // Therefore we can't use 'at'. It is OK if the read from the source | |
| 211 // destroys 'at', since that happens before the value is read. | |
| 212 // This uses only a single reg of the double reg-pair. | |
| 213 __ ldc1(kLithiumScratchDouble, source_operand); | |
| 214 __ sdc1(kLithiumScratchDouble, destination_operand); | |
| 215 } else { | |
| 216 __ ld(at, source_operand); | |
| 217 __ sd(at, destination_operand); | |
| 218 } | |
| 219 } else { | |
| 220 __ ld(kLithiumScratchReg, source_operand); | |
| 221 __ sd(kLithiumScratchReg, destination_operand); | |
| 222 } | |
| 223 } | |
| 224 | |
| 225 } else if (source->IsConstantOperand()) { | |
| 226 LConstantOperand* constant_source = LConstantOperand::cast(source); | |
| 227 if (destination->IsRegister()) { | |
| 228 Register dst = cgen_->ToRegister(destination); | |
| 229 if (cgen_->IsSmi(constant_source)) { | |
| 230 __ li(dst, Operand(cgen_->ToSmi(constant_source))); | |
| 231 } else if (cgen_->IsInteger32(constant_source)) { | |
| 232 __ li(dst, Operand(cgen_->ToInteger32(constant_source))); | |
| 233 } else { | |
| 234 __ li(dst, cgen_->ToHandle(constant_source)); | |
| 235 } | |
| 236 } else if (destination->IsDoubleRegister()) { | |
| 237 DoubleRegister result = cgen_->ToDoubleRegister(destination); | |
| 238 double v = cgen_->ToDouble(constant_source); | |
| 239 __ Move(result, v); | |
| 240 } else { | |
| 241 DCHECK(destination->IsStackSlot()); | |
| 242 DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. | |
| 243 if (cgen_->IsSmi(constant_source)) { | |
| 244 __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source))); | |
| 245 __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); | |
| 246 } else if (cgen_->IsInteger32(constant_source)) { | |
| 247 __ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source))); | |
| 248 __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); | |
| 249 } else { | |
| 250 __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source)); | |
| 251 __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination)); | |
| 252 } | |
| 253 } | |
| 254 | |
| 255 } else if (source->IsDoubleRegister()) { | |
| 256 DoubleRegister source_register = cgen_->ToDoubleRegister(source); | |
| 257 if (destination->IsDoubleRegister()) { | |
| 258 __ mov_d(cgen_->ToDoubleRegister(destination), source_register); | |
| 259 } else { | |
| 260 DCHECK(destination->IsDoubleStackSlot()); | |
| 261 MemOperand destination_operand = cgen_->ToMemOperand(destination); | |
| 262 __ sdc1(source_register, destination_operand); | |
| 263 } | |
| 264 | |
| 265 } else if (source->IsDoubleStackSlot()) { | |
| 266 MemOperand source_operand = cgen_->ToMemOperand(source); | |
| 267 if (destination->IsDoubleRegister()) { | |
| 268 __ ldc1(cgen_->ToDoubleRegister(destination), source_operand); | |
| 269 } else { | |
| 270 DCHECK(destination->IsDoubleStackSlot()); | |
| 271 MemOperand destination_operand = cgen_->ToMemOperand(destination); | |
| 272 if (in_cycle_) { | |
| 273 // kLithiumScratchDouble was used to break the cycle, | |
| 274 // but kLithiumScratchReg is free. | |
| 275 MemOperand source_high_operand = | |
| 276 cgen_->ToHighMemOperand(source); | |
| 277 MemOperand destination_high_operand = | |
| 278 cgen_->ToHighMemOperand(destination); | |
| 279 __ lw(kLithiumScratchReg, source_operand); | |
| 280 __ sw(kLithiumScratchReg, destination_operand); | |
| 281 __ lw(kLithiumScratchReg, source_high_operand); | |
| 282 __ sw(kLithiumScratchReg, destination_high_operand); | |
| 283 } else { | |
| 284 __ ldc1(kLithiumScratchDouble, source_operand); | |
| 285 __ sdc1(kLithiumScratchDouble, destination_operand); | |
| 286 } | |
| 287 } | |
| 288 } else { | |
| 289 UNREACHABLE(); | |
| 290 } | |
| 291 | |
| 292 moves_[index].Eliminate(); | |
| 293 } | |
| 294 | |
| 295 | |
| 296 #undef __ | |
| 297 | |
| 298 } // namespace internal | |
| 299 } // namespace v8 | |
| OLD | NEW |