Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(342)

Side by Side Diff: src/a64/lithium-gap-resolver-a64.cc

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/lithium-gap-resolver-a64.h ('k') | src/a64/macro-assembler-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "arm/lithium-gap-resolver-arm.h" 30 #include "a64/lithium-gap-resolver-a64.h"
31 #include "arm/lithium-codegen-arm.h" 31 #include "a64/lithium-codegen-a64.h"
32 32
33 namespace v8 { 33 namespace v8 {
34 namespace internal { 34 namespace internal {
35 35
36 static const Register kSavedValueRegister = { 9 }; 36 // We use the root register to spill a value while breaking a cycle in parallel
37 // moves. We don't need access to roots while resolving the move list and using
38 // the root register has two advantages:
39 // - It is not in crankshaft allocatable registers list, so it can't interfere
40 // with any of the moves we are resolving.
41 // - We don't need to push it on the stack, as we can reload it with its value
42 // once we have resolved a cycle.
43 #define kSavedValue root
37 44
38 LGapResolver::LGapResolver(LCodeGen* owner) 45 LGapResolver::LGapResolver(LCodeGen* owner)
39 : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), 46 : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
40 saved_destination_(NULL) { } 47 saved_destination_(NULL), need_to_restore_root_(false) { }
41 48
42 49
50 #define __ ACCESS_MASM(cgen_->masm())
51
43 void LGapResolver::Resolve(LParallelMove* parallel_move) { 52 void LGapResolver::Resolve(LParallelMove* parallel_move) {
44 ASSERT(moves_.is_empty()); 53 ASSERT(moves_.is_empty());
54
45 // Build up a worklist of moves. 55 // Build up a worklist of moves.
46 BuildInitialMoveList(parallel_move); 56 BuildInitialMoveList(parallel_move);
47 57
48 for (int i = 0; i < moves_.length(); ++i) { 58 for (int i = 0; i < moves_.length(); ++i) {
49 LMoveOperands move = moves_[i]; 59 LMoveOperands move = moves_[i];
50 // Skip constants to perform them last. They don't block other moves 60
61 // Skip constants to perform them last. They don't block other moves
51 // and skipping such moves with register destinations keeps those 62 // and skipping such moves with register destinations keeps those
52 // registers free for the whole algorithm. 63 // registers free for the whole algorithm.
53 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { 64 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
54 root_index_ = i; // Any cycle is found when by reaching this move again. 65 root_index_ = i; // Any cycle is found when we reach this move again.
55 PerformMove(i); 66 PerformMove(i);
56 if (in_cycle_) { 67 if (in_cycle_) RestoreValue();
57 RestoreValue();
58 }
59 } 68 }
60 } 69 }
61 70
62 // Perform the moves with constant sources. 71 // Perform the moves with constant sources.
63 for (int i = 0; i < moves_.length(); ++i) { 72 for (int i = 0; i < moves_.length(); ++i) {
64 if (!moves_[i].IsEliminated()) { 73 LMoveOperands move = moves_[i];
65 ASSERT(moves_[i].source()->IsConstantOperand()); 74
75 if (!move.IsEliminated()) {
76 ASSERT(move.source()->IsConstantOperand());
66 EmitMove(i); 77 EmitMove(i);
67 } 78 }
68 } 79 }
69 80
81 if (need_to_restore_root_) {
82 ASSERT(kSavedValue.Is(root));
83 __ InitializeRootRegister();
84 }
85
70 moves_.Rewind(0); 86 moves_.Rewind(0);
71 } 87 }
72 88
73 89
74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { 90 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
75 // Perform a linear sweep of the moves to add them to the initial list of 91 // Perform a linear sweep of the moves to add them to the initial list of
76 // moves to perform, ignoring any move that is redundant (the source is 92 // moves to perform, ignoring any move that is redundant (the source is
77 // the same as the destination, the destination is ignored and 93 // the same as the destination, the destination is ignored and
78 // unallocated, or the move was already eliminated). 94 // unallocated, or the move was already eliminated).
79 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); 95 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80 for (int i = 0; i < moves->length(); ++i) { 96 for (int i = 0; i < moves->length(); ++i) {
81 LMoveOperands move = moves->at(i); 97 LMoveOperands move = moves->at(i);
82 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); 98 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
83 } 99 }
84 Verify(); 100 Verify();
85 } 101 }
86 102
87 103
88 void LGapResolver::PerformMove(int index) { 104 void LGapResolver::PerformMove(int index) {
89 // Each call to this function performs a move and deletes it from the move 105 // Each call to this function performs a move and deletes it from the move
90 // graph. We first recursively perform any move blocking this one. We 106 // graph. We first recursively perform any move blocking this one. We
91 // mark a move as "pending" on entry to PerformMove in order to detect 107 // mark a move as "pending" on entry to PerformMove in order to detect
92 // cycles in the move graph. 108 // cycles in the move graph.
109 LMoveOperands& current_move = moves_[index];
93 110
94 // We can only find a cycle, when doing a depth-first traversal of moves, 111 ASSERT(!current_move.IsPending());
95 // be encountering the starting move again. So by spilling the source of 112 ASSERT(!current_move.IsRedundant());
96 // the starting move, we break the cycle. All moves are then unblocked,
97 // and the starting move is completed by writing the spilled value to
98 // its destination. All other moves from the spilled source have been
99 // completed prior to breaking the cycle.
100 // An additional complication is that moves to MemOperands with large
101 // offsets (more than 1K or 4K) require us to spill this spilled value to
102 // the stack, to free up the register.
103 ASSERT(!moves_[index].IsPending());
104 ASSERT(!moves_[index].IsRedundant());
105 113
106 // Clear this move's destination to indicate a pending move. The actual 114 // Clear this move's destination to indicate a pending move. The actual
107 // destination is saved in a stack allocated local. Multiple moves can 115 // destination is saved in a stack allocated local. Multiple moves can
108 // be pending because this function is recursive. 116 // be pending because this function is recursive.
109 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. 117 ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
110 LOperand* destination = moves_[index].destination(); 118 LOperand* destination = current_move.destination();
111 moves_[index].set_destination(NULL); 119 current_move.set_destination(NULL);
112 120
113 // Perform a depth-first traversal of the move graph to resolve 121 // Perform a depth-first traversal of the move graph to resolve
114 // dependencies. Any unperformed, unpending move with a source the same 122 // dependencies. Any unperformed, unpending move with a source the same
115 // as this one's destination blocks this one so recursively perform all 123 // as this one's destination blocks this one so recursively perform all
116 // such moves. 124 // such moves.
117 for (int i = 0; i < moves_.length(); ++i) { 125 for (int i = 0; i < moves_.length(); ++i) {
118 LMoveOperands other_move = moves_[i]; 126 LMoveOperands other_move = moves_[i];
119 if (other_move.Blocks(destination) && !other_move.IsPending()) { 127 if (other_move.Blocks(destination) && !other_move.IsPending()) {
120 PerformMove(i); 128 PerformMove(i);
121 // If there is a blocking, pending move it must be moves_[root_index_] 129 // If there is a blocking, pending move it must be moves_[root_index_]
122 // and all other moves with the same source as moves_[root_index_] are 130 // and all other moves with the same source as moves_[root_index_] are
123 // sucessfully executed (because they are cycle-free) by this loop. 131 // sucessfully executed (because they are cycle-free) by this loop.
124 } 132 }
125 } 133 }
126 134
127 // We are about to resolve this move and don't need it marked as 135 // We are about to resolve this move and don't need it marked as
128 // pending, so restore its destination. 136 // pending, so restore its destination.
129 moves_[index].set_destination(destination); 137 current_move.set_destination(destination);
130 138
131 // The move may be blocked on a pending move, which must be the starting move. 139 // The move may be blocked on a pending move, which must be the starting move.
132 // In this case, we have a cycle, and we save the source of this move to 140 // In this case, we have a cycle, and we save the source of this move to
133 // a scratch register to break it. 141 // a scratch register to break it.
134 LMoveOperands other_move = moves_[root_index_]; 142 LMoveOperands other_move = moves_[root_index_];
135 if (other_move.Blocks(destination)) { 143 if (other_move.Blocks(destination)) {
136 ASSERT(other_move.IsPending()); 144 ASSERT(other_move.IsPending());
137 BreakCycle(index); 145 BreakCycle(index);
138 return; 146 return;
139 } 147 }
140 148
141 // This move is no longer blocked. 149 // This move is no longer blocked.
142 EmitMove(index); 150 EmitMove(index);
143 } 151 }
144 152
145 153
146 void LGapResolver::Verify() { 154 void LGapResolver::Verify() {
147 #ifdef ENABLE_SLOW_ASSERTS 155 #ifdef ENABLE_SLOW_ASSERTS
148 // No operand should be the destination for more than one move. 156 // No operand should be the destination for more than one move.
149 for (int i = 0; i < moves_.length(); ++i) { 157 for (int i = 0; i < moves_.length(); ++i) {
150 LOperand* destination = moves_[i].destination(); 158 LOperand* destination = moves_[i].destination();
151 for (int j = i + 1; j < moves_.length(); ++j) { 159 for (int j = i + 1; j < moves_.length(); ++j) {
152 SLOW_ASSERT(!destination->Equals(moves_[j].destination())); 160 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
153 } 161 }
154 } 162 }
155 #endif 163 #endif
156 } 164 }
157 165
158 #define __ ACCESS_MASM(cgen_->masm())
159 166
160 void LGapResolver::BreakCycle(int index) { 167 void LGapResolver::BreakCycle(int index) {
161 // We save in a register the value that should end up in the source of
162 // moves_[root_index]. After performing all moves in the tree rooted
163 // in that move, we save the value to that source.
164 ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); 168 ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
165 ASSERT(!in_cycle_); 169 ASSERT(!in_cycle_);
170
171 // We use a register which is not allocatable by crankshaft to break the cycle
172 // to be sure it doesn't interfere with the moves we are resolving.
173 ASSERT(!kSavedValue.IsAllocatable());
174 need_to_restore_root_ = true;
175
176 // We save in a register the source of that move and we remember its
177 // destination. Then we mark this move as resolved so the cycle is
178 // broken and we can perform the other moves.
166 in_cycle_ = true; 179 in_cycle_ = true;
167 LOperand* source = moves_[index].source(); 180 LOperand* source = moves_[index].source();
168 saved_destination_ = moves_[index].destination(); 181 saved_destination_ = moves_[index].destination();
182
169 if (source->IsRegister()) { 183 if (source->IsRegister()) {
170 __ mov(kSavedValueRegister, cgen_->ToRegister(source)); 184 __ Mov(kSavedValue, cgen_->ToRegister(source));
171 } else if (source->IsStackSlot()) { 185 } else if (source->IsStackSlot()) {
172 __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); 186 __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
173 } else if (source->IsDoubleRegister()) { 187 } else if (source->IsDoubleRegister()) {
174 __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); 188 // TODO(all): We should use a double register to store the value to avoid
189 // the penalty of the mov across register banks. We are going to reserve
190 // d31 to hold 0.0 value. We could clobber this register while breaking the
191 // cycle and restore it after like we do with the root register.
192 // LGapResolver::RestoreValue() will need to be updated as well when we'll
193 // do that.
194 __ Fmov(kSavedValue, cgen_->ToDoubleRegister(source));
175 } else if (source->IsDoubleStackSlot()) { 195 } else if (source->IsDoubleStackSlot()) {
176 __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); 196 __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
177 } else { 197 } else {
178 UNREACHABLE(); 198 UNREACHABLE();
179 } 199 }
180 // This move will be done by restoring the saved value to the destination. 200
201 // Mark this move as resolved.
202 // This move will be actually performed by moving the saved value to this
203 // move's destination in LGapResolver::RestoreValue().
181 moves_[index].Eliminate(); 204 moves_[index].Eliminate();
182 } 205 }
183 206
184 207
185 void LGapResolver::RestoreValue() { 208 void LGapResolver::RestoreValue() {
186 ASSERT(in_cycle_); 209 ASSERT(in_cycle_);
187 ASSERT(saved_destination_ != NULL); 210 ASSERT(saved_destination_ != NULL);
188 211
189 // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
190 if (saved_destination_->IsRegister()) { 212 if (saved_destination_->IsRegister()) {
191 __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); 213 __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
192 } else if (saved_destination_->IsStackSlot()) { 214 } else if (saved_destination_->IsStackSlot()) {
193 __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); 215 __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
194 } else if (saved_destination_->IsDoubleRegister()) { 216 } else if (saved_destination_->IsDoubleRegister()) {
195 __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); 217 __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedValue);
196 } else if (saved_destination_->IsDoubleStackSlot()) { 218 } else if (saved_destination_->IsDoubleStackSlot()) {
197 __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); 219 __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
198 } else { 220 } else {
199 UNREACHABLE(); 221 UNREACHABLE();
200 } 222 }
201 223
202 in_cycle_ = false; 224 in_cycle_ = false;
203 saved_destination_ = NULL; 225 saved_destination_ = NULL;
204 } 226 }
205 227
206 228
207 void LGapResolver::EmitMove(int index) { 229 void LGapResolver::EmitMove(int index) {
208 LOperand* source = moves_[index].source(); 230 LOperand* source = moves_[index].source();
209 LOperand* destination = moves_[index].destination(); 231 LOperand* destination = moves_[index].destination();
210 232
211 // Dispatch on the source and destination operand kinds. Not all 233 // Dispatch on the source and destination operand kinds. Not all
212 // combinations are possible. 234 // combinations are possible.
213 235
214 if (source->IsRegister()) { 236 if (source->IsRegister()) {
215 Register source_register = cgen_->ToRegister(source); 237 Register source_register = cgen_->ToRegister(source);
216 if (destination->IsRegister()) { 238 if (destination->IsRegister()) {
217 __ mov(cgen_->ToRegister(destination), source_register); 239 __ Mov(cgen_->ToRegister(destination), source_register);
218 } else { 240 } else {
219 ASSERT(destination->IsStackSlot()); 241 ASSERT(destination->IsStackSlot());
220 __ str(source_register, cgen_->ToMemOperand(destination)); 242 __ Str(source_register, cgen_->ToMemOperand(destination));
221 } 243 }
244
222 } else if (source->IsStackSlot()) { 245 } else if (source->IsStackSlot()) {
223 MemOperand source_operand = cgen_->ToMemOperand(source); 246 MemOperand source_operand = cgen_->ToMemOperand(source);
224 if (destination->IsRegister()) { 247 if (destination->IsRegister()) {
225 __ ldr(cgen_->ToRegister(destination), source_operand); 248 __ Ldr(cgen_->ToRegister(destination), source_operand);
226 } else { 249 } else {
227 ASSERT(destination->IsStackSlot()); 250 ASSERT(destination->IsStackSlot());
228 MemOperand destination_operand = cgen_->ToMemOperand(destination); 251 EmitStackSlotMove(index);
229 if (in_cycle_) {
230 if (!destination_operand.OffsetIsUint12Encodable()) {
231 // ip is overwritten while saving the value to the destination.
232 // Therefore we can't use ip. It is OK if the read from the source
233 // destroys ip, since that happens before the value is read.
234 __ vldr(kScratchDoubleReg.low(), source_operand);
235 __ vstr(kScratchDoubleReg.low(), destination_operand);
236 } else {
237 __ ldr(ip, source_operand);
238 __ str(ip, destination_operand);
239 }
240 } else {
241 __ ldr(kSavedValueRegister, source_operand);
242 __ str(kSavedValueRegister, destination_operand);
243 }
244 } 252 }
245 253
246 } else if (source->IsConstantOperand()) { 254 } else if (source->IsConstantOperand()) {
247 LConstantOperand* constant_source = LConstantOperand::cast(source); 255 LConstantOperand* constant_source = LConstantOperand::cast(source);
248 if (destination->IsRegister()) { 256 if (destination->IsRegister()) {
249 Register dst = cgen_->ToRegister(destination); 257 Register dst = cgen_->ToRegister(destination);
250 Representation r = cgen_->IsSmi(constant_source) 258 if (cgen_->IsSmi(constant_source)) {
251 ? Representation::Smi() : Representation::Integer32(); 259 __ Mov(dst, Operand(cgen_->ToSmi(constant_source)));
252 if (cgen_->IsInteger32(constant_source)) { 260 } else if (cgen_->IsInteger32Constant(constant_source)) {
253 __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r))); 261 __ Mov(dst, cgen_->ToInteger32(constant_source));
254 } else { 262 } else {
255 __ Move(dst, cgen_->ToHandle(constant_source)); 263 __ LoadObject(dst, cgen_->ToHandle(constant_source));
256 } 264 }
257 } else if (destination->IsDoubleRegister()) { 265 } else if (destination->IsDoubleRegister()) {
258 DwVfpRegister result = cgen_->ToDoubleRegister(destination); 266 DoubleRegister result = cgen_->ToDoubleRegister(destination);
259 double v = cgen_->ToDouble(constant_source); 267 __ Fmov(result, cgen_->ToDouble(constant_source));
260 __ Vmov(result, v, ip);
261 } else { 268 } else {
262 ASSERT(destination->IsStackSlot()); 269 ASSERT(destination->IsStackSlot());
263 ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. 270 ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
264 Representation r = cgen_->IsSmi(constant_source) 271 need_to_restore_root_ = true;
265 ? Representation::Smi() : Representation::Integer32(); 272 if (cgen_->IsSmi(constant_source)) {
266 if (cgen_->IsInteger32(constant_source)) { 273 __ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source)));
267 __ mov(kSavedValueRegister, 274 } else if (cgen_->IsInteger32Constant(constant_source)) {
268 Operand(cgen_->ToRepresentation(constant_source, r))); 275 __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
269 } else { 276 } else {
270 __ Move(kSavedValueRegister, 277 __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
271 cgen_->ToHandle(constant_source));
272 } 278 }
273 __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); 279 __ Str(kSavedValue, cgen_->ToMemOperand(destination));
274 } 280 }
275 281
276 } else if (source->IsDoubleRegister()) { 282 } else if (source->IsDoubleRegister()) {
277 DwVfpRegister source_register = cgen_->ToDoubleRegister(source); 283 DoubleRegister src = cgen_->ToDoubleRegister(source);
278 if (destination->IsDoubleRegister()) { 284 if (destination->IsDoubleRegister()) {
279 __ vmov(cgen_->ToDoubleRegister(destination), source_register); 285 __ Fmov(cgen_->ToDoubleRegister(destination), src);
280 } else { 286 } else {
281 ASSERT(destination->IsDoubleStackSlot()); 287 ASSERT(destination->IsDoubleStackSlot());
282 __ vstr(source_register, cgen_->ToMemOperand(destination)); 288 __ Str(src, cgen_->ToMemOperand(destination));
283 } 289 }
284 290
285 } else if (source->IsDoubleStackSlot()) { 291 } else if (source->IsDoubleStackSlot()) {
286 MemOperand source_operand = cgen_->ToMemOperand(source); 292 MemOperand src = cgen_->ToMemOperand(source);
287 if (destination->IsDoubleRegister()) { 293 if (destination->IsDoubleRegister()) {
288 __ vldr(cgen_->ToDoubleRegister(destination), source_operand); 294 __ Ldr(cgen_->ToDoubleRegister(destination), src);
289 } else { 295 } else {
290 ASSERT(destination->IsDoubleStackSlot()); 296 ASSERT(destination->IsDoubleStackSlot());
291 MemOperand destination_operand = cgen_->ToMemOperand(destination); 297 EmitStackSlotMove(index);
292 if (in_cycle_) {
293 // kSavedDoubleValueRegister was used to break the cycle,
294 // but kSavedValueRegister is free.
295 MemOperand source_high_operand =
296 cgen_->ToHighMemOperand(source);
297 MemOperand destination_high_operand =
298 cgen_->ToHighMemOperand(destination);
299 __ ldr(kSavedValueRegister, source_operand);
300 __ str(kSavedValueRegister, destination_operand);
301 __ ldr(kSavedValueRegister, source_high_operand);
302 __ str(kSavedValueRegister, destination_high_operand);
303 } else {
304 __ vldr(kScratchDoubleReg, source_operand);
305 __ vstr(kScratchDoubleReg, destination_operand);
306 }
307 } 298 }
299
308 } else { 300 } else {
309 UNREACHABLE(); 301 UNREACHABLE();
310 } 302 }
311 303
304 // The move has been emitted, we can eliminate it.
312 moves_[index].Eliminate(); 305 moves_[index].Eliminate();
313 } 306 }
314 307
315 308
316 #undef __ 309 void LGapResolver::EmitStackSlotMove(int index) {
310 // We need a temp register to perform a stack slot to stack slot move, and
311 // the register must not be involved in breaking cycles.
312
313 // Use the Crankshaft double scratch register as the temporary.
314 DoubleRegister temp = crankshaft_fp_scratch;
315
316 LOperand* src = moves_[index].source();
317 LOperand* dst = moves_[index].destination();
318
319 ASSERT(src->IsStackSlot());
320 ASSERT(dst->IsStackSlot());
321 __ Ldr(temp, cgen_->ToMemOperand(src));
322 __ Str(temp, cgen_->ToMemOperand(dst));
323 }
317 324
318 } } // namespace v8::internal 325 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/a64/lithium-gap-resolver-a64.h ('k') | src/a64/macro-assembler-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698