Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(342)

Side by Side Diff: src/arm/lithium-gap-resolver-arm.cc

Issue 6311010: ARM: Port new version of ParallelMove's GapResolver to ARM. Add MemOperand s... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-gap-resolver-arm.h ('k') | tools/gyp/v8.gyp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "arm/lithium-gap-resolver-arm.h"
29 #include "arm/lithium-codegen-arm.h"
30
31 namespace v8 {
32 namespace internal {
33
34 static const Register kSavedValueRegister = { 9 };
35 static const DoubleRegister kSavedDoubleValueRegister = { 0 };
36
37 LGapResolver::LGapResolver(LCodeGen* owner)
38 : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
39 saved_destination_(NULL) { }
40
41
42 void LGapResolver::Resolve(LParallelMove* parallel_move) {
43 ASSERT(moves_.is_empty());
44 // Build up a worklist of moves.
45 BuildInitialMoveList(parallel_move);
46
47 for (int i = 0; i < moves_.length(); ++i) {
48 LMoveOperands move = moves_[i];
49 // Skip constants to perform them last. They don't block other moves
50 // and skipping such moves with register destinations keeps those
51 // registers free for the whole algorithm.
52 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
53 root_index_ = i; // Any cycle is found when by reaching this move again.
54 PerformMove(i);
55 if (in_cycle_) {
56 RestoreValue();
57 }
58 }
59 }
60
61 // Perform the moves with constant sources.
62 for (int i = 0; i < moves_.length(); ++i) {
63 if (!moves_[i].IsEliminated()) {
64 ASSERT(moves_[i].source()->IsConstantOperand());
65 EmitMove(i);
66 }
67 }
68
69 moves_.Rewind(0);
70 }
71
72
73 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
74 // Perform a linear sweep of the moves to add them to the initial list of
75 // moves to perform, ignoring any move that is redundant (the source is
76 // the same as the destination, the destination is ignored and
77 // unallocated, or the move was already eliminated).
78 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
79 for (int i = 0; i < moves->length(); ++i) {
80 LMoveOperands move = moves->at(i);
81 if (!move.IsRedundant()) moves_.Add(move);
82 }
83 Verify();
84 }
85
86
87 void LGapResolver::PerformMove(int index) {
88 // Each call to this function performs a move and deletes it from the move
89 // graph. We first recursively perform any move blocking this one. We
90 // mark a move as "pending" on entry to PerformMove in order to detect
91 // cycles in the move graph.
92
93 // We can only find a cycle, when doing a depth-first traversal of moves,
94 // be encountering the starting move again. So by spilling the source of
95 // the starting move, we break the cycle. All moves are then unblocked,
96 // and the starting move is completed by writing the spilled value to
97 // its destination. All other moves from the spilled source have been
98 // completed prior to breaking the cycle.
99 // An additional complication is that moves to MemOperands with large
100 // offsets (more than 1K or 4K) require us to spill this spilled value to
101 // the stack, to free up the register.
102 ASSERT(!moves_[index].IsPending());
103 ASSERT(!moves_[index].IsRedundant());
104
105 // Clear this move's destination to indicate a pending move. The actual
106 // destination is saved in a stack allocated local. Multiple moves can
107 // be pending because this function is recursive.
108 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
109 LOperand* destination = moves_[index].destination();
110 moves_[index].set_destination(NULL);
111
112 // Perform a depth-first traversal of the move graph to resolve
113 // dependencies. Any unperformed, unpending move with a source the same
114 // as this one's destination blocks this one so recursively perform all
115 // such moves.
116 for (int i = 0; i < moves_.length(); ++i) {
117 LMoveOperands other_move = moves_[i];
118 if (other_move.Blocks(destination) && !other_move.IsPending()) {
119 PerformMove(i);
120 // If there is a blocking, pending move it must be moves_[root_index_]
121 // and all other moves with the same source as moves_[root_index_] are
122 // sucessfully executed (because they are cycle-free) by this loop.
123 }
124 }
125
126 // We are about to resolve this move and don't need it marked as
127 // pending, so restore its destination.
128 moves_[index].set_destination(destination);
129
130 // The move may be blocked on a pending move, which must be the starting move.
131 // In this case, we have a cycle, and we save the source of this move to
132 // a scratch register to break it.
133 LMoveOperands other_move = moves_[root_index_];
134 if (other_move.Blocks(destination)) {
135 ASSERT(other_move.IsPending());
136 BreakCycle(index);
137 return;
138 }
139
140 // This move is no longer blocked.
141 EmitMove(index);
142 }
143
144
145 void LGapResolver::Verify() {
146 #ifdef ENABLE_SLOW_ASSERTS
147 // No operand should be the destination for more than one move.
148 for (int i = 0; i < moves_.length(); ++i) {
149 LOperand* destination = moves_[i].destination();
150 for (int j = i + 1; j < moves_.length(); ++j) {
151 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
152 }
153 }
154 #endif
155 }
156
157 #define __ ACCESS_MASM(cgen_->masm())
158
159 void LGapResolver::BreakCycle(int index) {
160 // We save in a register the value that should end up in the source of
161 // moves_[root_index]. After performing all moves in the tree rooted
162 // in that move, we save the value to that source.
163 ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
164 ASSERT(!in_cycle_);
165 in_cycle_ = true;
166 LOperand* source = moves_[index].source();
167 saved_destination_ = moves_[index].destination();
168 if (source->IsRegister()) {
169 __ mov(kSavedValueRegister, cgen_->ToRegister(source));
170 } else if (source->IsStackSlot()) {
171 __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
172 } else if (source->IsDoubleRegister()) {
173 __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
174 } else if (source->IsDoubleStackSlot()) {
175 __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
176 } else {
177 UNREACHABLE();
178 }
179 // This move will be done by restoring the saved value to the destination.
180 moves_[index].Eliminate();
181 }
182
183
184 void LGapResolver::RestoreValue() {
185 ASSERT(in_cycle_);
186 ASSERT(saved_destination_ != NULL);
187
188 // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
189 if (saved_destination_->IsRegister()) {
190 __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
191 } else if (saved_destination_->IsStackSlot()) {
192 __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
193 } else if (saved_destination_->IsDoubleRegister()) {
194 __ vmov(cgen_->ToDoubleRegister(saved_destination_),
195 kSavedDoubleValueRegister);
196 } else if (saved_destination_->IsDoubleStackSlot()) {
197 __ vstr(kSavedDoubleValueRegister,
198 cgen_->ToMemOperand(saved_destination_));
199 } else {
200 UNREACHABLE();
201 }
202
203 in_cycle_ = false;
204 saved_destination_ = NULL;
205 }
206
207
208 void LGapResolver::EmitMove(int index) {
209 LOperand* source = moves_[index].source();
210 LOperand* destination = moves_[index].destination();
211
212 // Dispatch on the source and destination operand kinds. Not all
213 // combinations are possible.
214
215 if (source->IsRegister()) {
216 Register source_register = cgen_->ToRegister(source);
217 if (destination->IsRegister()) {
218 __ mov(cgen_->ToRegister(destination), source_register);
219 } else {
220 ASSERT(destination->IsStackSlot());
221 __ str(source_register, cgen_->ToMemOperand(destination));
222 }
223
224 } else if (source->IsStackSlot()) {
225 MemOperand source_operand = cgen_->ToMemOperand(source);
226 if (destination->IsRegister()) {
227 __ ldr(cgen_->ToRegister(destination), source_operand);
228 } else {
229 ASSERT(destination->IsStackSlot());
230 MemOperand destination_operand = cgen_->ToMemOperand(destination);
231 if (in_cycle_) {
232 if (!destination_operand.OffsetIsUint12Encodable()) {
233 // ip is overwritten while saving the value to the destination.
234 // Therefore we can't use ip. It is OK if the read from the source
235 // destroys ip, since that happens before the value is read.
236 __ vldr(kSavedDoubleValueRegister.low(), source_operand);
237 __ vstr(kSavedDoubleValueRegister.low(), destination_operand);
238 } else {
239 __ ldr(ip, source_operand);
240 __ str(ip, destination_operand);
241 }
242 } else {
243 __ ldr(kSavedValueRegister, source_operand);
244 __ str(kSavedValueRegister, destination_operand);
245 }
246 }
247
248 } else if (source->IsConstantOperand()) {
249 Operand source_operand = cgen_->ToOperand(source);
250 if (destination->IsRegister()) {
251 __ mov(cgen_->ToRegister(destination), source_operand);
252 } else {
253 ASSERT(destination->IsStackSlot());
254 ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
255 MemOperand destination_operand = cgen_->ToMemOperand(destination);
256 __ mov(kSavedValueRegister, source_operand);
257 __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
258 }
259
260 } else if (source->IsDoubleRegister()) {
261 DoubleRegister source_register = cgen_->ToDoubleRegister(source);
262 if (destination->IsDoubleRegister()) {
263 __ vmov(cgen_->ToDoubleRegister(destination), source_register);
264 } else {
265 ASSERT(destination->IsDoubleStackSlot());
266 MemOperand destination_operand = cgen_->ToMemOperand(destination);
267 __ vstr(source_register, destination_operand);
268 }
269
270 } else if (source->IsDoubleStackSlot()) {
271 MemOperand source_operand = cgen_->ToMemOperand(source);
272 if (destination->IsDoubleRegister()) {
273 __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
274 } else {
275 ASSERT(destination->IsDoubleStackSlot());
276 MemOperand destination_operand = cgen_->ToMemOperand(destination);
277 if (in_cycle_) {
278 // kSavedDoubleValueRegister was used to break the cycle,
279 // but kSavedValueRegister is free.
280 MemOperand source_high_operand =
281 cgen_->ToHighMemOperand(source);
282 MemOperand destination_high_operand =
283 cgen_->ToHighMemOperand(destination);
284 __ ldr(kSavedValueRegister, source_operand);
285 __ str(kSavedValueRegister, destination_operand);
286 __ ldr(kSavedValueRegister, source_high_operand);
287 __ str(kSavedValueRegister, destination_high_operand);
288 } else {
289 __ vldr(kSavedDoubleValueRegister, source_operand);
290 __ vstr(kSavedDoubleValueRegister, destination_operand);
291 }
292 }
293 } else {
294 UNREACHABLE();
295 }
296
297 moves_[index].Eliminate();
298 }
299
300
301 #undef __
302
303 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-gap-resolver-arm.h ('k') | tools/gyp/v8.gyp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698