OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" |
10 #include "vm/longjump.h" | 10 #include "vm/longjump.h" |
(...skipping 17 matching lines...) Expand all Loading... |
28 if (kind_ == Immediate) { | 28 if (kind_ == Immediate) { |
29 uint32_t offset = encoding_ & kOffset12Mask; | 29 uint32_t offset = encoding_ & kOffset12Mask; |
30 ASSERT(offset < 256); | 30 ASSERT(offset < 256); |
31 return (encoding_ & ~kOffset12Mask) | B22 | ((offset & 0xf0) << 4) | | 31 return (encoding_ & ~kOffset12Mask) | B22 | ((offset & 0xf0) << 4) | |
32 (offset & 0xf); | 32 (offset & 0xf); |
33 } | 33 } |
34 ASSERT(kind_ == IndexRegister); | 34 ASSERT(kind_ == IndexRegister); |
35 return encoding_; | 35 return encoding_; |
36 } | 36 } |
37 | 37 |
38 | |
39 uint32_t Address::vencoding() const { | 38 uint32_t Address::vencoding() const { |
40 ASSERT(kind_ == Immediate); | 39 ASSERT(kind_ == Immediate); |
41 uint32_t offset = encoding_ & kOffset12Mask; | 40 uint32_t offset = encoding_ & kOffset12Mask; |
42 ASSERT(offset < (1 << 10)); // In the range 0 to +1020. | 41 ASSERT(offset < (1 << 10)); // In the range 0 to +1020. |
43 ASSERT(Utils::IsAligned(offset, 4)); // Multiple of 4. | 42 ASSERT(Utils::IsAligned(offset, 4)); // Multiple of 4. |
44 int mode = encoding_ & ((8 | 4 | 1) << 21); | 43 int mode = encoding_ & ((8 | 4 | 1) << 21); |
45 ASSERT((mode == Offset) || (mode == NegOffset)); | 44 ASSERT((mode == Offset) || (mode == NegOffset)); |
46 uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2); | 45 uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2); |
47 if (mode == Offset) { | 46 if (mode == Offset) { |
48 vencoding |= 1 << 23; | 47 vencoding |= 1 << 23; |
49 } | 48 } |
50 return vencoding; | 49 return vencoding; |
51 } | 50 } |
52 | 51 |
53 | |
54 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { | 52 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { |
55 ASSERT(Utils::IsAligned(data, 4)); | 53 ASSERT(Utils::IsAligned(data, 4)); |
56 ASSERT(Utils::IsAligned(length, 4)); | 54 ASSERT(Utils::IsAligned(length, 4)); |
57 const uword end = data + length; | 55 const uword end = data + length; |
58 while (data < end) { | 56 while (data < end) { |
59 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; | 57 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; |
60 data += 4; | 58 data += 4; |
61 } | 59 } |
62 } | 60 } |
63 | 61 |
64 | |
65 void Assembler::Emit(int32_t value) { | 62 void Assembler::Emit(int32_t value) { |
66 AssemblerBuffer::EnsureCapacity ensured(&buffer_); | 63 AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
67 buffer_.Emit<int32_t>(value); | 64 buffer_.Emit<int32_t>(value); |
68 } | 65 } |
69 | 66 |
70 | |
71 void Assembler::EmitType01(Condition cond, | 67 void Assembler::EmitType01(Condition cond, |
72 int type, | 68 int type, |
73 Opcode opcode, | 69 Opcode opcode, |
74 int set_cc, | 70 int set_cc, |
75 Register rn, | 71 Register rn, |
76 Register rd, | 72 Register rd, |
77 Operand o) { | 73 Operand o) { |
78 ASSERT(rd != kNoRegister); | 74 ASSERT(rd != kNoRegister); |
79 ASSERT(cond != kNoCondition); | 75 ASSERT(cond != kNoCondition); |
80 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 76 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
81 type << kTypeShift | | 77 type << kTypeShift | |
82 static_cast<int32_t>(opcode) << kOpcodeShift | | 78 static_cast<int32_t>(opcode) << kOpcodeShift | |
83 set_cc << kSShift | static_cast<int32_t>(rn) << kRnShift | | 79 set_cc << kSShift | static_cast<int32_t>(rn) << kRnShift | |
84 static_cast<int32_t>(rd) << kRdShift | o.encoding(); | 80 static_cast<int32_t>(rd) << kRdShift | o.encoding(); |
85 Emit(encoding); | 81 Emit(encoding); |
86 } | 82 } |
87 | 83 |
88 | |
89 void Assembler::EmitType5(Condition cond, int32_t offset, bool link) { | 84 void Assembler::EmitType5(Condition cond, int32_t offset, bool link) { |
90 ASSERT(cond != kNoCondition); | 85 ASSERT(cond != kNoCondition); |
91 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 86 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
92 5 << kTypeShift | (link ? 1 : 0) << kLinkShift; | 87 5 << kTypeShift | (link ? 1 : 0) << kLinkShift; |
93 Emit(Assembler::EncodeBranchOffset(offset, encoding)); | 88 Emit(Assembler::EncodeBranchOffset(offset, encoding)); |
94 } | 89 } |
95 | 90 |
96 | |
97 void Assembler::EmitMemOp(Condition cond, | 91 void Assembler::EmitMemOp(Condition cond, |
98 bool load, | 92 bool load, |
99 bool byte, | 93 bool byte, |
100 Register rd, | 94 Register rd, |
101 Address ad) { | 95 Address ad) { |
102 ASSERT(rd != kNoRegister); | 96 ASSERT(rd != kNoRegister); |
103 ASSERT(cond != kNoCondition); | 97 ASSERT(cond != kNoCondition); |
104 ASSERT(!ad.has_writeback() || (ad.rn() != rd)); // Unpredictable. | 98 ASSERT(!ad.has_writeback() || (ad.rn() != rd)); // Unpredictable. |
105 | 99 |
106 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 | | 100 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B26 | |
107 (ad.kind() == Address::Immediate ? 0 : B25) | | 101 (ad.kind() == Address::Immediate ? 0 : B25) | |
108 (load ? L : 0) | (byte ? B : 0) | | 102 (load ? L : 0) | (byte ? B : 0) | |
109 (static_cast<int32_t>(rd) << kRdShift) | ad.encoding(); | 103 (static_cast<int32_t>(rd) << kRdShift) | ad.encoding(); |
110 Emit(encoding); | 104 Emit(encoding); |
111 } | 105 } |
112 | 106 |
113 | |
114 void Assembler::EmitMemOpAddressMode3(Condition cond, | 107 void Assembler::EmitMemOpAddressMode3(Condition cond, |
115 int32_t mode, | 108 int32_t mode, |
116 Register rd, | 109 Register rd, |
117 Address ad) { | 110 Address ad) { |
118 ASSERT(rd != kNoRegister); | 111 ASSERT(rd != kNoRegister); |
119 ASSERT(cond != kNoCondition); | 112 ASSERT(cond != kNoCondition); |
120 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | mode | | 113 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | mode | |
121 (static_cast<int32_t>(rd) << kRdShift) | ad.encoding3(); | 114 (static_cast<int32_t>(rd) << kRdShift) | ad.encoding3(); |
122 Emit(encoding); | 115 Emit(encoding); |
123 } | 116 } |
124 | 117 |
125 | |
126 void Assembler::EmitMultiMemOp(Condition cond, | 118 void Assembler::EmitMultiMemOp(Condition cond, |
127 BlockAddressMode am, | 119 BlockAddressMode am, |
128 bool load, | 120 bool load, |
129 Register base, | 121 Register base, |
130 RegList regs) { | 122 RegList regs) { |
131 ASSERT(base != kNoRegister); | 123 ASSERT(base != kNoRegister); |
132 ASSERT(cond != kNoCondition); | 124 ASSERT(cond != kNoCondition); |
133 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 125 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
134 am | (load ? L : 0) | | 126 am | (load ? L : 0) | |
135 (static_cast<int32_t>(base) << kRnShift) | regs; | 127 (static_cast<int32_t>(base) << kRnShift) | regs; |
136 Emit(encoding); | 128 Emit(encoding); |
137 } | 129 } |
138 | 130 |
139 | |
140 void Assembler::EmitShiftImmediate(Condition cond, | 131 void Assembler::EmitShiftImmediate(Condition cond, |
141 Shift opcode, | 132 Shift opcode, |
142 Register rd, | 133 Register rd, |
143 Register rm, | 134 Register rm, |
144 Operand o) { | 135 Operand o) { |
145 ASSERT(cond != kNoCondition); | 136 ASSERT(cond != kNoCondition); |
146 ASSERT(o.type() == 1); | 137 ASSERT(o.type() == 1); |
147 int32_t encoding = | 138 int32_t encoding = |
148 static_cast<int32_t>(cond) << kConditionShift | | 139 static_cast<int32_t>(cond) << kConditionShift | |
149 static_cast<int32_t>(MOV) << kOpcodeShift | | 140 static_cast<int32_t>(MOV) << kOpcodeShift | |
150 static_cast<int32_t>(rd) << kRdShift | o.encoding() << kShiftImmShift | | 141 static_cast<int32_t>(rd) << kRdShift | o.encoding() << kShiftImmShift | |
151 static_cast<int32_t>(opcode) << kShiftShift | static_cast<int32_t>(rm); | 142 static_cast<int32_t>(opcode) << kShiftShift | static_cast<int32_t>(rm); |
152 Emit(encoding); | 143 Emit(encoding); |
153 } | 144 } |
154 | 145 |
155 | |
156 void Assembler::EmitShiftRegister(Condition cond, | 146 void Assembler::EmitShiftRegister(Condition cond, |
157 Shift opcode, | 147 Shift opcode, |
158 Register rd, | 148 Register rd, |
159 Register rm, | 149 Register rm, |
160 Operand o) { | 150 Operand o) { |
161 ASSERT(cond != kNoCondition); | 151 ASSERT(cond != kNoCondition); |
162 ASSERT(o.type() == 0); | 152 ASSERT(o.type() == 0); |
163 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | | 153 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | |
164 static_cast<int32_t>(MOV) << kOpcodeShift | | 154 static_cast<int32_t>(MOV) << kOpcodeShift | |
165 static_cast<int32_t>(rd) << kRdShift | | 155 static_cast<int32_t>(rd) << kRdShift | |
166 o.encoding() << kShiftRegisterShift | | 156 o.encoding() << kShiftRegisterShift | |
167 static_cast<int32_t>(opcode) << kShiftShift | B4 | | 157 static_cast<int32_t>(opcode) << kShiftShift | B4 | |
168 static_cast<int32_t>(rm); | 158 static_cast<int32_t>(rm); |
169 Emit(encoding); | 159 Emit(encoding); |
170 } | 160 } |
171 | 161 |
172 | |
173 void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) { | 162 void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) { |
174 EmitType01(cond, o.type(), AND, 0, rn, rd, o); | 163 EmitType01(cond, o.type(), AND, 0, rn, rd, o); |
175 } | 164 } |
176 | 165 |
177 | |
178 void Assembler::eor(Register rd, Register rn, Operand o, Condition cond) { | 166 void Assembler::eor(Register rd, Register rn, Operand o, Condition cond) { |
179 EmitType01(cond, o.type(), EOR, 0, rn, rd, o); | 167 EmitType01(cond, o.type(), EOR, 0, rn, rd, o); |
180 } | 168 } |
181 | 169 |
182 | |
183 void Assembler::sub(Register rd, Register rn, Operand o, Condition cond) { | 170 void Assembler::sub(Register rd, Register rn, Operand o, Condition cond) { |
184 EmitType01(cond, o.type(), SUB, 0, rn, rd, o); | 171 EmitType01(cond, o.type(), SUB, 0, rn, rd, o); |
185 } | 172 } |
186 | 173 |
187 void Assembler::rsb(Register rd, Register rn, Operand o, Condition cond) { | 174 void Assembler::rsb(Register rd, Register rn, Operand o, Condition cond) { |
188 EmitType01(cond, o.type(), RSB, 0, rn, rd, o); | 175 EmitType01(cond, o.type(), RSB, 0, rn, rd, o); |
189 } | 176 } |
190 | 177 |
191 void Assembler::rsbs(Register rd, Register rn, Operand o, Condition cond) { | 178 void Assembler::rsbs(Register rd, Register rn, Operand o, Condition cond) { |
192 EmitType01(cond, o.type(), RSB, 1, rn, rd, o); | 179 EmitType01(cond, o.type(), RSB, 1, rn, rd, o); |
193 } | 180 } |
194 | 181 |
195 | |
196 void Assembler::add(Register rd, Register rn, Operand o, Condition cond) { | 182 void Assembler::add(Register rd, Register rn, Operand o, Condition cond) { |
197 EmitType01(cond, o.type(), ADD, 0, rn, rd, o); | 183 EmitType01(cond, o.type(), ADD, 0, rn, rd, o); |
198 } | 184 } |
199 | 185 |
200 | |
201 void Assembler::adds(Register rd, Register rn, Operand o, Condition cond) { | 186 void Assembler::adds(Register rd, Register rn, Operand o, Condition cond) { |
202 EmitType01(cond, o.type(), ADD, 1, rn, rd, o); | 187 EmitType01(cond, o.type(), ADD, 1, rn, rd, o); |
203 } | 188 } |
204 | 189 |
205 | |
206 void Assembler::subs(Register rd, Register rn, Operand o, Condition cond) { | 190 void Assembler::subs(Register rd, Register rn, Operand o, Condition cond) { |
207 EmitType01(cond, o.type(), SUB, 1, rn, rd, o); | 191 EmitType01(cond, o.type(), SUB, 1, rn, rd, o); |
208 } | 192 } |
209 | 193 |
210 | |
211 void Assembler::adc(Register rd, Register rn, Operand o, Condition cond) { | 194 void Assembler::adc(Register rd, Register rn, Operand o, Condition cond) { |
212 EmitType01(cond, o.type(), ADC, 0, rn, rd, o); | 195 EmitType01(cond, o.type(), ADC, 0, rn, rd, o); |
213 } | 196 } |
214 | 197 |
215 | |
216 void Assembler::adcs(Register rd, Register rn, Operand o, Condition cond) { | 198 void Assembler::adcs(Register rd, Register rn, Operand o, Condition cond) { |
217 EmitType01(cond, o.type(), ADC, 1, rn, rd, o); | 199 EmitType01(cond, o.type(), ADC, 1, rn, rd, o); |
218 } | 200 } |
219 | 201 |
220 | |
221 void Assembler::sbc(Register rd, Register rn, Operand o, Condition cond) { | 202 void Assembler::sbc(Register rd, Register rn, Operand o, Condition cond) { |
222 EmitType01(cond, o.type(), SBC, 0, rn, rd, o); | 203 EmitType01(cond, o.type(), SBC, 0, rn, rd, o); |
223 } | 204 } |
224 | 205 |
225 | |
226 void Assembler::sbcs(Register rd, Register rn, Operand o, Condition cond) { | 206 void Assembler::sbcs(Register rd, Register rn, Operand o, Condition cond) { |
227 EmitType01(cond, o.type(), SBC, 1, rn, rd, o); | 207 EmitType01(cond, o.type(), SBC, 1, rn, rd, o); |
228 } | 208 } |
229 | 209 |
230 | |
231 void Assembler::rsc(Register rd, Register rn, Operand o, Condition cond) { | 210 void Assembler::rsc(Register rd, Register rn, Operand o, Condition cond) { |
232 EmitType01(cond, o.type(), RSC, 0, rn, rd, o); | 211 EmitType01(cond, o.type(), RSC, 0, rn, rd, o); |
233 } | 212 } |
234 | 213 |
235 | |
236 void Assembler::tst(Register rn, Operand o, Condition cond) { | 214 void Assembler::tst(Register rn, Operand o, Condition cond) { |
237 EmitType01(cond, o.type(), TST, 1, rn, R0, o); | 215 EmitType01(cond, o.type(), TST, 1, rn, R0, o); |
238 } | 216 } |
239 | 217 |
240 | |
241 void Assembler::teq(Register rn, Operand o, Condition cond) { | 218 void Assembler::teq(Register rn, Operand o, Condition cond) { |
242 EmitType01(cond, o.type(), TEQ, 1, rn, R0, o); | 219 EmitType01(cond, o.type(), TEQ, 1, rn, R0, o); |
243 } | 220 } |
244 | 221 |
245 | |
246 void Assembler::cmp(Register rn, Operand o, Condition cond) { | 222 void Assembler::cmp(Register rn, Operand o, Condition cond) { |
247 EmitType01(cond, o.type(), CMP, 1, rn, R0, o); | 223 EmitType01(cond, o.type(), CMP, 1, rn, R0, o); |
248 } | 224 } |
249 | 225 |
250 | |
251 void Assembler::cmn(Register rn, Operand o, Condition cond) { | 226 void Assembler::cmn(Register rn, Operand o, Condition cond) { |
252 EmitType01(cond, o.type(), CMN, 1, rn, R0, o); | 227 EmitType01(cond, o.type(), CMN, 1, rn, R0, o); |
253 } | 228 } |
254 | 229 |
255 | |
256 void Assembler::orr(Register rd, Register rn, Operand o, Condition cond) { | 230 void Assembler::orr(Register rd, Register rn, Operand o, Condition cond) { |
257 EmitType01(cond, o.type(), ORR, 0, rn, rd, o); | 231 EmitType01(cond, o.type(), ORR, 0, rn, rd, o); |
258 } | 232 } |
259 | 233 |
260 | |
261 void Assembler::orrs(Register rd, Register rn, Operand o, Condition cond) { | 234 void Assembler::orrs(Register rd, Register rn, Operand o, Condition cond) { |
262 EmitType01(cond, o.type(), ORR, 1, rn, rd, o); | 235 EmitType01(cond, o.type(), ORR, 1, rn, rd, o); |
263 } | 236 } |
264 | 237 |
265 | |
266 void Assembler::mov(Register rd, Operand o, Condition cond) { | 238 void Assembler::mov(Register rd, Operand o, Condition cond) { |
267 EmitType01(cond, o.type(), MOV, 0, R0, rd, o); | 239 EmitType01(cond, o.type(), MOV, 0, R0, rd, o); |
268 } | 240 } |
269 | 241 |
270 | |
271 void Assembler::movs(Register rd, Operand o, Condition cond) { | 242 void Assembler::movs(Register rd, Operand o, Condition cond) { |
272 EmitType01(cond, o.type(), MOV, 1, R0, rd, o); | 243 EmitType01(cond, o.type(), MOV, 1, R0, rd, o); |
273 } | 244 } |
274 | 245 |
275 | |
276 void Assembler::bic(Register rd, Register rn, Operand o, Condition cond) { | 246 void Assembler::bic(Register rd, Register rn, Operand o, Condition cond) { |
277 EmitType01(cond, o.type(), BIC, 0, rn, rd, o); | 247 EmitType01(cond, o.type(), BIC, 0, rn, rd, o); |
278 } | 248 } |
279 | 249 |
280 | |
281 void Assembler::bics(Register rd, Register rn, Operand o, Condition cond) { | 250 void Assembler::bics(Register rd, Register rn, Operand o, Condition cond) { |
282 EmitType01(cond, o.type(), BIC, 1, rn, rd, o); | 251 EmitType01(cond, o.type(), BIC, 1, rn, rd, o); |
283 } | 252 } |
284 | 253 |
285 | |
286 void Assembler::mvn(Register rd, Operand o, Condition cond) { | 254 void Assembler::mvn(Register rd, Operand o, Condition cond) { |
287 EmitType01(cond, o.type(), MVN, 0, R0, rd, o); | 255 EmitType01(cond, o.type(), MVN, 0, R0, rd, o); |
288 } | 256 } |
289 | 257 |
290 | |
291 void Assembler::mvns(Register rd, Operand o, Condition cond) { | 258 void Assembler::mvns(Register rd, Operand o, Condition cond) { |
292 EmitType01(cond, o.type(), MVN, 1, R0, rd, o); | 259 EmitType01(cond, o.type(), MVN, 1, R0, rd, o); |
293 } | 260 } |
294 | 261 |
295 | |
296 void Assembler::clz(Register rd, Register rm, Condition cond) { | 262 void Assembler::clz(Register rd, Register rm, Condition cond) { |
297 ASSERT(rd != kNoRegister); | 263 ASSERT(rd != kNoRegister); |
298 ASSERT(rm != kNoRegister); | 264 ASSERT(rm != kNoRegister); |
299 ASSERT(cond != kNoCondition); | 265 ASSERT(cond != kNoCondition); |
300 ASSERT(rd != PC); | 266 ASSERT(rd != PC); |
301 ASSERT(rm != PC); | 267 ASSERT(rm != PC); |
302 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | | 268 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
303 B22 | B21 | (0xf << 16) | | 269 B22 | B21 | (0xf << 16) | |
304 (static_cast<int32_t>(rd) << kRdShift) | (0xf << 8) | B4 | | 270 (static_cast<int32_t>(rd) << kRdShift) | (0xf << 8) | B4 | |
305 static_cast<int32_t>(rm); | 271 static_cast<int32_t>(rm); |
306 Emit(encoding); | 272 Emit(encoding); |
307 } | 273 } |
308 | 274 |
309 | |
310 void Assembler::movw(Register rd, uint16_t imm16, Condition cond) { | 275 void Assembler::movw(Register rd, uint16_t imm16, Condition cond) { |
311 ASSERT(cond != kNoCondition); | 276 ASSERT(cond != kNoCondition); |
312 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 | | 277 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 | |
313 ((imm16 >> 12) << 16) | | 278 ((imm16 >> 12) << 16) | |
314 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); | 279 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); |
315 Emit(encoding); | 280 Emit(encoding); |
316 } | 281 } |
317 | 282 |
318 | |
319 void Assembler::movt(Register rd, uint16_t imm16, Condition cond) { | 283 void Assembler::movt(Register rd, uint16_t imm16, Condition cond) { |
320 ASSERT(cond != kNoCondition); | 284 ASSERT(cond != kNoCondition); |
321 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 | | 285 int32_t encoding = static_cast<int32_t>(cond) << kConditionShift | B25 | B24 | |
322 B22 | ((imm16 >> 12) << 16) | | 286 B22 | ((imm16 >> 12) << 16) | |
323 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); | 287 static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff); |
324 Emit(encoding); | 288 Emit(encoding); |
325 } | 289 } |
326 | 290 |
327 | |
328 void Assembler::EmitMulOp(Condition cond, | 291 void Assembler::EmitMulOp(Condition cond, |
329 int32_t opcode, | 292 int32_t opcode, |
330 Register rd, | 293 Register rd, |
331 Register rn, | 294 Register rn, |
332 Register rm, | 295 Register rm, |
333 Register rs) { | 296 Register rs) { |
334 ASSERT(rd != kNoRegister); | 297 ASSERT(rd != kNoRegister); |
335 ASSERT(rn != kNoRegister); | 298 ASSERT(rn != kNoRegister); |
336 ASSERT(rm != kNoRegister); | 299 ASSERT(rm != kNoRegister); |
337 ASSERT(rs != kNoRegister); | 300 ASSERT(rs != kNoRegister); |
338 ASSERT(cond != kNoCondition); | 301 ASSERT(cond != kNoCondition); |
339 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) | | 302 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) | |
340 (static_cast<int32_t>(rn) << kRnShift) | | 303 (static_cast<int32_t>(rn) << kRnShift) | |
341 (static_cast<int32_t>(rd) << kRdShift) | | 304 (static_cast<int32_t>(rd) << kRdShift) | |
342 (static_cast<int32_t>(rs) << kRsShift) | B7 | B4 | | 305 (static_cast<int32_t>(rs) << kRsShift) | B7 | B4 | |
343 (static_cast<int32_t>(rm) << kRmShift); | 306 (static_cast<int32_t>(rm) << kRmShift); |
344 Emit(encoding); | 307 Emit(encoding); |
345 } | 308 } |
346 | 309 |
347 | |
348 void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) { | 310 void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) { |
349 // Assembler registers rd, rn, rm are encoded as rn, rm, rs. | 311 // Assembler registers rd, rn, rm are encoded as rn, rm, rs. |
350 EmitMulOp(cond, 0, R0, rd, rn, rm); | 312 EmitMulOp(cond, 0, R0, rd, rn, rm); |
351 } | 313 } |
352 | 314 |
353 | |
354 // Like mul, but sets condition flags. | 315 // Like mul, but sets condition flags. |
355 void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) { | 316 void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) { |
356 EmitMulOp(cond, B20, R0, rd, rn, rm); | 317 EmitMulOp(cond, B20, R0, rd, rn, rm); |
357 } | 318 } |
358 | 319 |
359 | |
360 void Assembler::mla(Register rd, | 320 void Assembler::mla(Register rd, |
361 Register rn, | 321 Register rn, |
362 Register rm, | 322 Register rm, |
363 Register ra, | 323 Register ra, |
364 Condition cond) { | 324 Condition cond) { |
365 // rd <- ra + rn * rm. | 325 // rd <- ra + rn * rm. |
366 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. | 326 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. |
367 EmitMulOp(cond, B21, ra, rd, rn, rm); | 327 EmitMulOp(cond, B21, ra, rd, rn, rm); |
368 } | 328 } |
369 | 329 |
370 | |
371 void Assembler::mls(Register rd, | 330 void Assembler::mls(Register rd, |
372 Register rn, | 331 Register rn, |
373 Register rm, | 332 Register rm, |
374 Register ra, | 333 Register ra, |
375 Condition cond) { | 334 Condition cond) { |
376 // rd <- ra - rn * rm. | 335 // rd <- ra - rn * rm. |
377 if (TargetCPUFeatures::arm_version() == ARMv7) { | 336 if (TargetCPUFeatures::arm_version() == ARMv7) { |
378 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. | 337 // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd. |
379 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm); | 338 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm); |
380 } else { | 339 } else { |
381 mul(IP, rn, rm, cond); | 340 mul(IP, rn, rm, cond); |
382 sub(rd, ra, Operand(IP), cond); | 341 sub(rd, ra, Operand(IP), cond); |
383 } | 342 } |
384 } | 343 } |
385 | 344 |
386 | |
387 void Assembler::smull(Register rd_lo, | 345 void Assembler::smull(Register rd_lo, |
388 Register rd_hi, | 346 Register rd_hi, |
389 Register rn, | 347 Register rn, |
390 Register rm, | 348 Register rm, |
391 Condition cond) { | 349 Condition cond) { |
392 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. | 350 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
393 EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm); | 351 EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm); |
394 } | 352 } |
395 | 353 |
396 | |
397 void Assembler::umull(Register rd_lo, | 354 void Assembler::umull(Register rd_lo, |
398 Register rd_hi, | 355 Register rd_hi, |
399 Register rn, | 356 Register rn, |
400 Register rm, | 357 Register rm, |
401 Condition cond) { | 358 Condition cond) { |
402 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. | 359 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
403 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm); | 360 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm); |
404 } | 361 } |
405 | 362 |
406 | |
407 void Assembler::umlal(Register rd_lo, | 363 void Assembler::umlal(Register rd_lo, |
408 Register rd_hi, | 364 Register rd_hi, |
409 Register rn, | 365 Register rn, |
410 Register rm, | 366 Register rm, |
411 Condition cond) { | 367 Condition cond) { |
412 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. | 368 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
413 EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm); | 369 EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm); |
414 } | 370 } |
415 | 371 |
416 | |
417 void Assembler::umaal(Register rd_lo, | 372 void Assembler::umaal(Register rd_lo, |
418 Register rd_hi, | 373 Register rd_hi, |
419 Register rn, | 374 Register rn, |
420 Register rm) { | 375 Register rm) { |
421 ASSERT(rd_lo != IP); | 376 ASSERT(rd_lo != IP); |
422 ASSERT(rd_hi != IP); | 377 ASSERT(rd_hi != IP); |
423 ASSERT(rn != IP); | 378 ASSERT(rn != IP); |
424 ASSERT(rm != IP); | 379 ASSERT(rm != IP); |
425 if (TargetCPUFeatures::arm_version() != ARMv5TE) { | 380 if (TargetCPUFeatures::arm_version() != ARMv5TE) { |
426 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. | 381 // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs. |
427 EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm); | 382 EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm); |
428 } else { | 383 } else { |
429 mov(IP, Operand(0)); | 384 mov(IP, Operand(0)); |
430 umlal(rd_lo, IP, rn, rm); | 385 umlal(rd_lo, IP, rn, rm); |
431 adds(rd_lo, rd_lo, Operand(rd_hi)); | 386 adds(rd_lo, rd_lo, Operand(rd_hi)); |
432 adc(rd_hi, IP, Operand(0)); | 387 adc(rd_hi, IP, Operand(0)); |
433 } | 388 } |
434 } | 389 } |
435 | 390 |
436 | |
437 void Assembler::EmitDivOp(Condition cond, | 391 void Assembler::EmitDivOp(Condition cond, |
438 int32_t opcode, | 392 int32_t opcode, |
439 Register rd, | 393 Register rd, |
440 Register rn, | 394 Register rn, |
441 Register rm) { | 395 Register rm) { |
442 ASSERT(TargetCPUFeatures::integer_division_supported()); | 396 ASSERT(TargetCPUFeatures::integer_division_supported()); |
443 ASSERT(rd != kNoRegister); | 397 ASSERT(rd != kNoRegister); |
444 ASSERT(rn != kNoRegister); | 398 ASSERT(rn != kNoRegister); |
445 ASSERT(rm != kNoRegister); | 399 ASSERT(rm != kNoRegister); |
446 ASSERT(cond != kNoCondition); | 400 ASSERT(cond != kNoCondition); |
447 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) | | 401 int32_t encoding = opcode | (static_cast<int32_t>(cond) << kConditionShift) | |
448 (static_cast<int32_t>(rn) << kDivRnShift) | | 402 (static_cast<int32_t>(rn) << kDivRnShift) | |
449 (static_cast<int32_t>(rd) << kDivRdShift) | B26 | B25 | | 403 (static_cast<int32_t>(rd) << kDivRdShift) | B26 | B25 | |
450 B24 | B20 | B4 | (static_cast<int32_t>(rm) << kDivRmShift); | 404 B24 | B20 | B4 | (static_cast<int32_t>(rm) << kDivRmShift); |
451 Emit(encoding); | 405 Emit(encoding); |
452 } | 406 } |
453 | 407 |
454 | |
455 void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) { | 408 void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) { |
456 EmitDivOp(cond, 0, rd, rn, rm); | 409 EmitDivOp(cond, 0, rd, rn, rm); |
457 } | 410 } |
458 | 411 |
459 | |
460 void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) { | 412 void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) { |
461 EmitDivOp(cond, B21, rd, rn, rm); | 413 EmitDivOp(cond, B21, rd, rn, rm); |
462 } | 414 } |
463 | 415 |
464 | |
465 void Assembler::ldr(Register rd, Address ad, Condition cond) { | 416 void Assembler::ldr(Register rd, Address ad, Condition cond) { |
466 EmitMemOp(cond, true, false, rd, ad); | 417 EmitMemOp(cond, true, false, rd, ad); |
467 } | 418 } |
468 | 419 |
469 | |
470 void Assembler::str(Register rd, Address ad, Condition cond) { | 420 void Assembler::str(Register rd, Address ad, Condition cond) { |
471 EmitMemOp(cond, false, false, rd, ad); | 421 EmitMemOp(cond, false, false, rd, ad); |
472 } | 422 } |
473 | 423 |
474 | |
475 void Assembler::ldrb(Register rd, Address ad, Condition cond) { | 424 void Assembler::ldrb(Register rd, Address ad, Condition cond) { |
476 EmitMemOp(cond, true, true, rd, ad); | 425 EmitMemOp(cond, true, true, rd, ad); |
477 } | 426 } |
478 | 427 |
479 | |
480 void Assembler::strb(Register rd, Address ad, Condition cond) { | 428 void Assembler::strb(Register rd, Address ad, Condition cond) { |
481 EmitMemOp(cond, false, true, rd, ad); | 429 EmitMemOp(cond, false, true, rd, ad); |
482 } | 430 } |
483 | 431 |
484 | |
485 void Assembler::ldrh(Register rd, Address ad, Condition cond) { | 432 void Assembler::ldrh(Register rd, Address ad, Condition cond) { |
486 EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad); | 433 EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad); |
487 } | 434 } |
488 | 435 |
489 | |
490 void Assembler::strh(Register rd, Address ad, Condition cond) { | 436 void Assembler::strh(Register rd, Address ad, Condition cond) { |
491 EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad); | 437 EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad); |
492 } | 438 } |
493 | 439 |
494 | |
495 void Assembler::ldrsb(Register rd, Address ad, Condition cond) { | 440 void Assembler::ldrsb(Register rd, Address ad, Condition cond) { |
496 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad); | 441 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad); |
497 } | 442 } |
498 | 443 |
499 | |
500 void Assembler::ldrsh(Register rd, Address ad, Condition cond) { | 444 void Assembler::ldrsh(Register rd, Address ad, Condition cond) { |
501 EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad); | 445 EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad); |
502 } | 446 } |
503 | 447 |
504 | |
505 void Assembler::ldrd(Register rd, | 448 void Assembler::ldrd(Register rd, |
506 Register rd2, | 449 Register rd2, |
507 Register rn, | 450 Register rn, |
508 int32_t offset, | 451 int32_t offset, |
509 Condition cond) { | 452 Condition cond) { |
510 ASSERT((rd % 2) == 0); | 453 ASSERT((rd % 2) == 0); |
511 ASSERT(rd2 == rd + 1); | 454 ASSERT(rd2 == rd + 1); |
512 if (TargetCPUFeatures::arm_version() == ARMv5TE) { | 455 if (TargetCPUFeatures::arm_version() == ARMv5TE) { |
513 ldr(rd, Address(rn, offset), cond); | 456 ldr(rd, Address(rn, offset), cond); |
514 ldr(rd2, Address(rn, offset + kWordSize), cond); | 457 ldr(rd2, Address(rn, offset + kWordSize), cond); |
515 } else { | 458 } else { |
516 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset)); | 459 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset)); |
517 } | 460 } |
518 } | 461 } |
519 | 462 |
520 | |
521 void Assembler::strd(Register rd, | 463 void Assembler::strd(Register rd, |
522 Register rd2, | 464 Register rd2, |
523 Register rn, | 465 Register rn, |
524 int32_t offset, | 466 int32_t offset, |
525 Condition cond) { | 467 Condition cond) { |
526 ASSERT((rd % 2) == 0); | 468 ASSERT((rd % 2) == 0); |
527 ASSERT(rd2 == rd + 1); | 469 ASSERT(rd2 == rd + 1); |
528 if (TargetCPUFeatures::arm_version() == ARMv5TE) { | 470 if (TargetCPUFeatures::arm_version() == ARMv5TE) { |
529 str(rd, Address(rn, offset), cond); | 471 str(rd, Address(rn, offset), cond); |
530 str(rd2, Address(rn, offset + kWordSize), cond); | 472 str(rd2, Address(rn, offset + kWordSize), cond); |
531 } else { | 473 } else { |
532 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset)); | 474 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset)); |
533 } | 475 } |
534 } | 476 } |
535 | 477 |
536 | |
537 void Assembler::ldm(BlockAddressMode am, | 478 void Assembler::ldm(BlockAddressMode am, |
538 Register base, | 479 Register base, |
539 RegList regs, | 480 RegList regs, |
540 Condition cond) { | 481 Condition cond) { |
541 ASSERT(regs != 0); | 482 ASSERT(regs != 0); |
542 EmitMultiMemOp(cond, am, true, base, regs); | 483 EmitMultiMemOp(cond, am, true, base, regs); |
543 } | 484 } |
544 | 485 |
545 | |
546 void Assembler::stm(BlockAddressMode am, | 486 void Assembler::stm(BlockAddressMode am, |
547 Register base, | 487 Register base, |
548 RegList regs, | 488 RegList regs, |
549 Condition cond) { | 489 Condition cond) { |
550 ASSERT(regs != 0); | 490 ASSERT(regs != 0); |
551 EmitMultiMemOp(cond, am, false, base, regs); | 491 EmitMultiMemOp(cond, am, false, base, regs); |
552 } | 492 } |
553 | 493 |
554 | |
555 void Assembler::ldrex(Register rt, Register rn, Condition cond) { | 494 void Assembler::ldrex(Register rt, Register rn, Condition cond) { |
556 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); | 495 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); |
557 ASSERT(rn != kNoRegister); | 496 ASSERT(rn != kNoRegister); |
558 ASSERT(rt != kNoRegister); | 497 ASSERT(rt != kNoRegister); |
559 ASSERT(cond != kNoCondition); | 498 ASSERT(cond != kNoCondition); |
560 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | | 499 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
561 B23 | L | (static_cast<int32_t>(rn) << kLdExRnShift) | | 500 B23 | L | (static_cast<int32_t>(rn) << kLdExRnShift) | |
562 (static_cast<int32_t>(rt) << kLdExRtShift) | B11 | B10 | | 501 (static_cast<int32_t>(rt) << kLdExRtShift) | B11 | B10 | |
563 B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0; | 502 B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0; |
564 Emit(encoding); | 503 Emit(encoding); |
565 } | 504 } |
566 | 505 |
567 | |
568 void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) { | 506 void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) { |
569 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); | 507 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); |
570 ASSERT(rn != kNoRegister); | 508 ASSERT(rn != kNoRegister); |
571 ASSERT(rd != kNoRegister); | 509 ASSERT(rd != kNoRegister); |
572 ASSERT(rt != kNoRegister); | 510 ASSERT(rt != kNoRegister); |
573 ASSERT(cond != kNoCondition); | 511 ASSERT(cond != kNoCondition); |
574 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | | 512 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
575 B23 | (static_cast<int32_t>(rn) << kStrExRnShift) | | 513 B23 | (static_cast<int32_t>(rn) << kStrExRnShift) | |
576 (static_cast<int32_t>(rd) << kStrExRdShift) | B11 | B10 | | 514 (static_cast<int32_t>(rd) << kStrExRdShift) | B11 | B10 | |
577 B9 | B8 | B7 | B4 | | 515 B9 | B8 | B7 | B4 | |
578 (static_cast<int32_t>(rt) << kStrExRtShift); | 516 (static_cast<int32_t>(rt) << kStrExRtShift); |
579 Emit(encoding); | 517 Emit(encoding); |
580 } | 518 } |
581 | 519 |
582 | |
583 void Assembler::clrex() { | 520 void Assembler::clrex() { |
584 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); | 521 ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE); |
585 int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 | | 522 int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 | |
586 B21 | B20 | (0xff << 12) | B4 | 0xf; | 523 B21 | B20 | (0xff << 12) | B4 | 0xf; |
587 Emit(encoding); | 524 Emit(encoding); |
588 } | 525 } |
589 | 526 |
590 | |
591 void Assembler::nop(Condition cond) { | 527 void Assembler::nop(Condition cond) { |
592 ASSERT(cond != kNoCondition); | 528 ASSERT(cond != kNoCondition); |
593 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B25 | | 529 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B25 | |
594 B24 | B21 | (0xf << 12); | 530 B24 | B21 | (0xf << 12); |
595 Emit(encoding); | 531 Emit(encoding); |
596 } | 532 } |
597 | 533 |
598 | |
599 void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) { | 534 void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) { |
600 ASSERT(TargetCPUFeatures::vfp_supported()); | 535 ASSERT(TargetCPUFeatures::vfp_supported()); |
601 ASSERT(sn != kNoSRegister); | 536 ASSERT(sn != kNoSRegister); |
602 ASSERT(rt != kNoRegister); | 537 ASSERT(rt != kNoRegister); |
603 ASSERT(rt != SP); | 538 ASSERT(rt != SP); |
604 ASSERT(rt != PC); | 539 ASSERT(rt != PC); |
605 ASSERT(cond != kNoCondition); | 540 ASSERT(cond != kNoCondition); |
606 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 541 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
607 B26 | B25 | ((static_cast<int32_t>(sn) >> 1) * B16) | | 542 B26 | B25 | ((static_cast<int32_t>(sn) >> 1) * B16) | |
608 (static_cast<int32_t>(rt) * B12) | B11 | B9 | | 543 (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
609 ((static_cast<int32_t>(sn) & 1) * B7) | B4; | 544 ((static_cast<int32_t>(sn) & 1) * B7) | B4; |
610 Emit(encoding); | 545 Emit(encoding); |
611 } | 546 } |
612 | 547 |
613 | |
614 void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) { | 548 void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) { |
615 ASSERT(TargetCPUFeatures::vfp_supported()); | 549 ASSERT(TargetCPUFeatures::vfp_supported()); |
616 ASSERT(sn != kNoSRegister); | 550 ASSERT(sn != kNoSRegister); |
617 ASSERT(rt != kNoRegister); | 551 ASSERT(rt != kNoRegister); |
618 ASSERT(rt != SP); | 552 ASSERT(rt != SP); |
619 ASSERT(rt != PC); | 553 ASSERT(rt != PC); |
620 ASSERT(cond != kNoCondition); | 554 ASSERT(cond != kNoCondition); |
621 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 555 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
622 B26 | B25 | B20 | ((static_cast<int32_t>(sn) >> 1) * B16) | | 556 B26 | B25 | B20 | ((static_cast<int32_t>(sn) >> 1) * B16) | |
623 (static_cast<int32_t>(rt) * B12) | B11 | B9 | | 557 (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
624 ((static_cast<int32_t>(sn) & 1) * B7) | B4; | 558 ((static_cast<int32_t>(sn) & 1) * B7) | B4; |
625 Emit(encoding); | 559 Emit(encoding); |
626 } | 560 } |
627 | 561 |
628 | |
629 void Assembler::vmovsrr(SRegister sm, | 562 void Assembler::vmovsrr(SRegister sm, |
630 Register rt, | 563 Register rt, |
631 Register rt2, | 564 Register rt2, |
632 Condition cond) { | 565 Condition cond) { |
633 ASSERT(TargetCPUFeatures::vfp_supported()); | 566 ASSERT(TargetCPUFeatures::vfp_supported()); |
634 ASSERT(sm != kNoSRegister); | 567 ASSERT(sm != kNoSRegister); |
635 ASSERT(sm != S31); | 568 ASSERT(sm != S31); |
636 ASSERT(rt != kNoRegister); | 569 ASSERT(rt != kNoRegister); |
637 ASSERT(rt != SP); | 570 ASSERT(rt != SP); |
638 ASSERT(rt != PC); | 571 ASSERT(rt != PC); |
639 ASSERT(rt2 != kNoRegister); | 572 ASSERT(rt2 != kNoRegister); |
640 ASSERT(rt2 != SP); | 573 ASSERT(rt2 != SP); |
641 ASSERT(rt2 != PC); | 574 ASSERT(rt2 != PC); |
642 ASSERT(cond != kNoCondition); | 575 ASSERT(cond != kNoCondition); |
643 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 576 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
644 B26 | B22 | (static_cast<int32_t>(rt2) * B16) | | 577 B26 | B22 | (static_cast<int32_t>(rt2) * B16) | |
645 (static_cast<int32_t>(rt) * B12) | B11 | B9 | | 578 (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
646 ((static_cast<int32_t>(sm) & 1) * B5) | B4 | | 579 ((static_cast<int32_t>(sm) & 1) * B5) | B4 | |
647 (static_cast<int32_t>(sm) >> 1); | 580 (static_cast<int32_t>(sm) >> 1); |
648 Emit(encoding); | 581 Emit(encoding); |
649 } | 582 } |
650 | 583 |
651 | |
652 void Assembler::vmovrrs(Register rt, | 584 void Assembler::vmovrrs(Register rt, |
653 Register rt2, | 585 Register rt2, |
654 SRegister sm, | 586 SRegister sm, |
655 Condition cond) { | 587 Condition cond) { |
656 ASSERT(TargetCPUFeatures::vfp_supported()); | 588 ASSERT(TargetCPUFeatures::vfp_supported()); |
657 ASSERT(sm != kNoSRegister); | 589 ASSERT(sm != kNoSRegister); |
658 ASSERT(sm != S31); | 590 ASSERT(sm != S31); |
659 ASSERT(rt != kNoRegister); | 591 ASSERT(rt != kNoRegister); |
660 ASSERT(rt != SP); | 592 ASSERT(rt != SP); |
661 ASSERT(rt != PC); | 593 ASSERT(rt != PC); |
662 ASSERT(rt2 != kNoRegister); | 594 ASSERT(rt2 != kNoRegister); |
663 ASSERT(rt2 != SP); | 595 ASSERT(rt2 != SP); |
664 ASSERT(rt2 != PC); | 596 ASSERT(rt2 != PC); |
665 ASSERT(rt != rt2); | 597 ASSERT(rt != rt2); |
666 ASSERT(cond != kNoCondition); | 598 ASSERT(cond != kNoCondition); |
667 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 599 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
668 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) | | 600 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) | |
669 (static_cast<int32_t>(rt) * B12) | B11 | B9 | | 601 (static_cast<int32_t>(rt) * B12) | B11 | B9 | |
670 ((static_cast<int32_t>(sm) & 1) * B5) | B4 | | 602 ((static_cast<int32_t>(sm) & 1) * B5) | B4 | |
671 (static_cast<int32_t>(sm) >> 1); | 603 (static_cast<int32_t>(sm) >> 1); |
672 Emit(encoding); | 604 Emit(encoding); |
673 } | 605 } |
674 | 606 |
675 | |
676 void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) { | 607 void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) { |
677 ASSERT(TargetCPUFeatures::vfp_supported()); | 608 ASSERT(TargetCPUFeatures::vfp_supported()); |
678 ASSERT((i == 0) || (i == 1)); | 609 ASSERT((i == 0) || (i == 1)); |
679 ASSERT(rt != kNoRegister); | 610 ASSERT(rt != kNoRegister); |
680 ASSERT(rt != SP); | 611 ASSERT(rt != SP); |
681 ASSERT(rt != PC); | 612 ASSERT(rt != PC); |
682 ASSERT(dn != kNoDRegister); | 613 ASSERT(dn != kNoDRegister); |
683 ASSERT(cond != kNoCondition); | 614 ASSERT(cond != kNoCondition); |
684 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 615 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
685 B26 | B25 | (i * B21) | (static_cast<int32_t>(rt) * B12) | | 616 B26 | B25 | (i * B21) | (static_cast<int32_t>(rt) * B12) | |
686 B11 | B9 | B8 | ((static_cast<int32_t>(dn) >> 4) * B7) | | 617 B11 | B9 | B8 | ((static_cast<int32_t>(dn) >> 4) * B7) | |
687 ((static_cast<int32_t>(dn) & 0xf) * B16) | B4; | 618 ((static_cast<int32_t>(dn) & 0xf) * B16) | B4; |
688 Emit(encoding); | 619 Emit(encoding); |
689 } | 620 } |
690 | 621 |
691 | |
692 void Assembler::vmovdrr(DRegister dm, | 622 void Assembler::vmovdrr(DRegister dm, |
693 Register rt, | 623 Register rt, |
694 Register rt2, | 624 Register rt2, |
695 Condition cond) { | 625 Condition cond) { |
696 ASSERT(TargetCPUFeatures::vfp_supported()); | 626 ASSERT(TargetCPUFeatures::vfp_supported()); |
697 ASSERT(dm != kNoDRegister); | 627 ASSERT(dm != kNoDRegister); |
698 ASSERT(rt != kNoRegister); | 628 ASSERT(rt != kNoRegister); |
699 ASSERT(rt != SP); | 629 ASSERT(rt != SP); |
700 ASSERT(rt != PC); | 630 ASSERT(rt != PC); |
701 ASSERT(rt2 != kNoRegister); | 631 ASSERT(rt2 != kNoRegister); |
702 ASSERT(rt2 != SP); | 632 ASSERT(rt2 != SP); |
703 ASSERT(rt2 != PC); | 633 ASSERT(rt2 != PC); |
704 ASSERT(cond != kNoCondition); | 634 ASSERT(cond != kNoCondition); |
705 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 635 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
706 B26 | B22 | (static_cast<int32_t>(rt2) * B16) | | 636 B26 | B22 | (static_cast<int32_t>(rt2) * B16) | |
707 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 | | 637 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 | |
708 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 | | 638 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 | |
709 (static_cast<int32_t>(dm) & 0xf); | 639 (static_cast<int32_t>(dm) & 0xf); |
710 Emit(encoding); | 640 Emit(encoding); |
711 } | 641 } |
712 | 642 |
713 | |
714 void Assembler::vmovrrd(Register rt, | 643 void Assembler::vmovrrd(Register rt, |
715 Register rt2, | 644 Register rt2, |
716 DRegister dm, | 645 DRegister dm, |
717 Condition cond) { | 646 Condition cond) { |
718 ASSERT(TargetCPUFeatures::vfp_supported()); | 647 ASSERT(TargetCPUFeatures::vfp_supported()); |
719 ASSERT(dm != kNoDRegister); | 648 ASSERT(dm != kNoDRegister); |
720 ASSERT(rt != kNoRegister); | 649 ASSERT(rt != kNoRegister); |
721 ASSERT(rt != SP); | 650 ASSERT(rt != SP); |
722 ASSERT(rt != PC); | 651 ASSERT(rt != PC); |
723 ASSERT(rt2 != kNoRegister); | 652 ASSERT(rt2 != kNoRegister); |
724 ASSERT(rt2 != SP); | 653 ASSERT(rt2 != SP); |
725 ASSERT(rt2 != PC); | 654 ASSERT(rt2 != PC); |
726 ASSERT(rt != rt2); | 655 ASSERT(rt != rt2); |
727 ASSERT(cond != kNoCondition); | 656 ASSERT(cond != kNoCondition); |
728 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 657 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
729 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) | | 658 B26 | B22 | B20 | (static_cast<int32_t>(rt2) * B16) | |
730 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 | | 659 (static_cast<int32_t>(rt) * B12) | B11 | B9 | B8 | |
731 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 | | 660 ((static_cast<int32_t>(dm) >> 4) * B5) | B4 | |
732 (static_cast<int32_t>(dm) & 0xf); | 661 (static_cast<int32_t>(dm) & 0xf); |
733 Emit(encoding); | 662 Emit(encoding); |
734 } | 663 } |
735 | 664 |
736 | |
737 void Assembler::vldrs(SRegister sd, Address ad, Condition cond) { | 665 void Assembler::vldrs(SRegister sd, Address ad, Condition cond) { |
738 ASSERT(TargetCPUFeatures::vfp_supported()); | 666 ASSERT(TargetCPUFeatures::vfp_supported()); |
739 ASSERT(sd != kNoSRegister); | 667 ASSERT(sd != kNoSRegister); |
740 ASSERT(cond != kNoCondition); | 668 ASSERT(cond != kNoCondition); |
741 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 669 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
742 B26 | B24 | B20 | ((static_cast<int32_t>(sd) & 1) * B22) | | 670 B26 | B24 | B20 | ((static_cast<int32_t>(sd) & 1) * B22) | |
743 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 | | 671 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 | |
744 ad.vencoding(); | 672 ad.vencoding(); |
745 Emit(encoding); | 673 Emit(encoding); |
746 } | 674 } |
747 | 675 |
748 | |
749 void Assembler::vstrs(SRegister sd, Address ad, Condition cond) { | 676 void Assembler::vstrs(SRegister sd, Address ad, Condition cond) { |
750 ASSERT(TargetCPUFeatures::vfp_supported()); | 677 ASSERT(TargetCPUFeatures::vfp_supported()); |
751 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); | 678 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); |
752 ASSERT(sd != kNoSRegister); | 679 ASSERT(sd != kNoSRegister); |
753 ASSERT(cond != kNoCondition); | 680 ASSERT(cond != kNoCondition); |
754 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 681 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
755 B26 | B24 | ((static_cast<int32_t>(sd) & 1) * B22) | | 682 B26 | B24 | ((static_cast<int32_t>(sd) & 1) * B22) | |
756 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 | | 683 ((static_cast<int32_t>(sd) >> 1) * B12) | B11 | B9 | |
757 ad.vencoding(); | 684 ad.vencoding(); |
758 Emit(encoding); | 685 Emit(encoding); |
759 } | 686 } |
760 | 687 |
761 | |
762 void Assembler::vldrd(DRegister dd, Address ad, Condition cond) { | 688 void Assembler::vldrd(DRegister dd, Address ad, Condition cond) { |
763 ASSERT(TargetCPUFeatures::vfp_supported()); | 689 ASSERT(TargetCPUFeatures::vfp_supported()); |
764 ASSERT(dd != kNoDRegister); | 690 ASSERT(dd != kNoDRegister); |
765 ASSERT(cond != kNoCondition); | 691 ASSERT(cond != kNoCondition); |
766 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 692 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
767 B26 | B24 | B20 | ((static_cast<int32_t>(dd) >> 4) * B22) | | 693 B26 | B24 | B20 | ((static_cast<int32_t>(dd) >> 4) * B22) | |
768 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 | | 694 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 | |
769 ad.vencoding(); | 695 ad.vencoding(); |
770 Emit(encoding); | 696 Emit(encoding); |
771 } | 697 } |
772 | 698 |
773 | |
774 void Assembler::vstrd(DRegister dd, Address ad, Condition cond) { | 699 void Assembler::vstrd(DRegister dd, Address ad, Condition cond) { |
775 ASSERT(TargetCPUFeatures::vfp_supported()); | 700 ASSERT(TargetCPUFeatures::vfp_supported()); |
776 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); | 701 ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC); |
777 ASSERT(dd != kNoDRegister); | 702 ASSERT(dd != kNoDRegister); |
778 ASSERT(cond != kNoCondition); | 703 ASSERT(cond != kNoCondition); |
779 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 704 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
780 B26 | B24 | ((static_cast<int32_t>(dd) >> 4) * B22) | | 705 B26 | B24 | ((static_cast<int32_t>(dd) >> 4) * B22) | |
781 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 | | 706 ((static_cast<int32_t>(dd) & 0xf) * B12) | B11 | B9 | B8 | |
782 ad.vencoding(); | 707 ad.vencoding(); |
783 Emit(encoding); | 708 Emit(encoding); |
(...skipping 12 matching lines...) Expand all Loading... |
796 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters); | 721 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters); |
797 | 722 |
798 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 723 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
799 B26 | B11 | B9 | am | (load ? L : 0) | | 724 B26 | B11 | B9 | am | (load ? L : 0) | |
800 (static_cast<int32_t>(base) << kRnShift) | | 725 (static_cast<int32_t>(base) << kRnShift) | |
801 ((static_cast<int32_t>(start) & 0x1) ? D : 0) | | 726 ((static_cast<int32_t>(start) & 0x1) ? D : 0) | |
802 ((static_cast<int32_t>(start) >> 1) << 12) | count; | 727 ((static_cast<int32_t>(start) >> 1) << 12) | count; |
803 Emit(encoding); | 728 Emit(encoding); |
804 } | 729 } |
805 | 730 |
806 | |
807 void Assembler::EmitMultiVDMemOp(Condition cond, | 731 void Assembler::EmitMultiVDMemOp(Condition cond, |
808 BlockAddressMode am, | 732 BlockAddressMode am, |
809 bool load, | 733 bool load, |
810 Register base, | 734 Register base, |
811 DRegister start, | 735 DRegister start, |
812 int32_t count) { | 736 int32_t count) { |
813 ASSERT(TargetCPUFeatures::vfp_supported()); | 737 ASSERT(TargetCPUFeatures::vfp_supported()); |
814 ASSERT(base != kNoRegister); | 738 ASSERT(base != kNoRegister); |
815 ASSERT(cond != kNoCondition); | 739 ASSERT(cond != kNoCondition); |
816 ASSERT(start != kNoDRegister); | 740 ASSERT(start != kNoDRegister); |
817 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters); | 741 ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters); |
818 const int armv5te = TargetCPUFeatures::arm_version() == ARMv5TE ? 1 : 0; | 742 const int armv5te = TargetCPUFeatures::arm_version() == ARMv5TE ? 1 : 0; |
819 | 743 |
820 int32_t encoding = | 744 int32_t encoding = |
821 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B11 | B9 | | 745 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B11 | B9 | |
822 B8 | am | (load ? L : 0) | (static_cast<int32_t>(base) << kRnShift) | | 746 B8 | am | (load ? L : 0) | (static_cast<int32_t>(base) << kRnShift) | |
823 ((static_cast<int32_t>(start) & 0x10) ? D : 0) | | 747 ((static_cast<int32_t>(start) & 0x10) ? D : 0) | |
824 ((static_cast<int32_t>(start) & 0xf) << 12) | (count << 1) | armv5te; | 748 ((static_cast<int32_t>(start) & 0xf) << 12) | (count << 1) | armv5te; |
825 Emit(encoding); | 749 Emit(encoding); |
826 } | 750 } |
827 | 751 |
828 | |
829 void Assembler::vldms(BlockAddressMode am, | 752 void Assembler::vldms(BlockAddressMode am, |
830 Register base, | 753 Register base, |
831 SRegister first, | 754 SRegister first, |
832 SRegister last, | 755 SRegister last, |
833 Condition cond) { | 756 Condition cond) { |
834 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); | 757 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
835 ASSERT(last > first); | 758 ASSERT(last > first); |
836 EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1); | 759 EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1); |
837 } | 760 } |
838 | 761 |
839 | |
840 void Assembler::vstms(BlockAddressMode am, | 762 void Assembler::vstms(BlockAddressMode am, |
841 Register base, | 763 Register base, |
842 SRegister first, | 764 SRegister first, |
843 SRegister last, | 765 SRegister last, |
844 Condition cond) { | 766 Condition cond) { |
845 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); | 767 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
846 ASSERT(last > first); | 768 ASSERT(last > first); |
847 EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1); | 769 EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1); |
848 } | 770 } |
849 | 771 |
850 | |
851 void Assembler::vldmd(BlockAddressMode am, | 772 void Assembler::vldmd(BlockAddressMode am, |
852 Register base, | 773 Register base, |
853 DRegister first, | 774 DRegister first, |
854 intptr_t count, | 775 intptr_t count, |
855 Condition cond) { | 776 Condition cond) { |
856 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); | 777 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
857 ASSERT(count <= 16); | 778 ASSERT(count <= 16); |
858 ASSERT(first + count <= kNumberOfDRegisters); | 779 ASSERT(first + count <= kNumberOfDRegisters); |
859 EmitMultiVDMemOp(cond, am, true, base, first, count); | 780 EmitMultiVDMemOp(cond, am, true, base, first, count); |
860 } | 781 } |
861 | 782 |
862 | |
863 void Assembler::vstmd(BlockAddressMode am, | 783 void Assembler::vstmd(BlockAddressMode am, |
864 Register base, | 784 Register base, |
865 DRegister first, | 785 DRegister first, |
866 intptr_t count, | 786 intptr_t count, |
867 Condition cond) { | 787 Condition cond) { |
868 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); | 788 ASSERT((am == IA) || (am == IA_W) || (am == DB_W)); |
869 ASSERT(count <= 16); | 789 ASSERT(count <= 16); |
870 ASSERT(first + count <= kNumberOfDRegisters); | 790 ASSERT(first + count <= kNumberOfDRegisters); |
871 EmitMultiVDMemOp(cond, am, false, base, first, count); | 791 EmitMultiVDMemOp(cond, am, false, base, first, count); |
872 } | 792 } |
873 | 793 |
874 | |
875 void Assembler::EmitVFPsss(Condition cond, | 794 void Assembler::EmitVFPsss(Condition cond, |
876 int32_t opcode, | 795 int32_t opcode, |
877 SRegister sd, | 796 SRegister sd, |
878 SRegister sn, | 797 SRegister sn, |
879 SRegister sm) { | 798 SRegister sm) { |
880 ASSERT(TargetCPUFeatures::vfp_supported()); | 799 ASSERT(TargetCPUFeatures::vfp_supported()); |
881 ASSERT(sd != kNoSRegister); | 800 ASSERT(sd != kNoSRegister); |
882 ASSERT(sn != kNoSRegister); | 801 ASSERT(sn != kNoSRegister); |
883 ASSERT(sm != kNoSRegister); | 802 ASSERT(sm != kNoSRegister); |
884 ASSERT(cond != kNoCondition); | 803 ASSERT(cond != kNoCondition); |
885 int32_t encoding = | 804 int32_t encoding = |
886 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | | 805 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
887 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) | | 806 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) | |
888 ((static_cast<int32_t>(sn) >> 1) * B16) | | 807 ((static_cast<int32_t>(sn) >> 1) * B16) | |
889 ((static_cast<int32_t>(sd) >> 1) * B12) | | 808 ((static_cast<int32_t>(sd) >> 1) * B12) | |
890 ((static_cast<int32_t>(sn) & 1) * B7) | | 809 ((static_cast<int32_t>(sn) & 1) * B7) | |
891 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1); | 810 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1); |
892 Emit(encoding); | 811 Emit(encoding); |
893 } | 812 } |
894 | 813 |
895 | |
896 void Assembler::EmitVFPddd(Condition cond, | 814 void Assembler::EmitVFPddd(Condition cond, |
897 int32_t opcode, | 815 int32_t opcode, |
898 DRegister dd, | 816 DRegister dd, |
899 DRegister dn, | 817 DRegister dn, |
900 DRegister dm) { | 818 DRegister dm) { |
901 ASSERT(TargetCPUFeatures::vfp_supported()); | 819 ASSERT(TargetCPUFeatures::vfp_supported()); |
902 ASSERT(dd != kNoDRegister); | 820 ASSERT(dd != kNoDRegister); |
903 ASSERT(dn != kNoDRegister); | 821 ASSERT(dn != kNoDRegister); |
904 ASSERT(dm != kNoDRegister); | 822 ASSERT(dm != kNoDRegister); |
905 ASSERT(cond != kNoCondition); | 823 ASSERT(cond != kNoCondition); |
906 int32_t encoding = | 824 int32_t encoding = |
907 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | | 825 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
908 B9 | B8 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) | | 826 B9 | B8 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) | |
909 ((static_cast<int32_t>(dn) & 0xf) * B16) | | 827 ((static_cast<int32_t>(dn) & 0xf) * B16) | |
910 ((static_cast<int32_t>(dd) & 0xf) * B12) | | 828 ((static_cast<int32_t>(dd) & 0xf) * B12) | |
911 ((static_cast<int32_t>(dn) >> 4) * B7) | | 829 ((static_cast<int32_t>(dn) >> 4) * B7) | |
912 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); | 830 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
913 Emit(encoding); | 831 Emit(encoding); |
914 } | 832 } |
915 | 833 |
916 | |
917 void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) { | 834 void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) { |
918 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm); | 835 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm); |
919 } | 836 } |
920 | 837 |
921 | |
922 void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) { | 838 void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) { |
923 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm); | 839 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm); |
924 } | 840 } |
925 | 841 |
926 | |
927 bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) { | 842 bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) { |
928 if (TargetCPUFeatures::arm_version() != ARMv7) { | 843 if (TargetCPUFeatures::arm_version() != ARMv7) { |
929 return false; | 844 return false; |
930 } | 845 } |
931 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm); | 846 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm); |
932 if (((imm32 & ((1 << 19) - 1)) == 0) && | 847 if (((imm32 & ((1 << 19) - 1)) == 0) && |
933 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) || | 848 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) || |
934 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) - 1)))) { | 849 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) - 1)))) { |
935 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) | | 850 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) | |
936 ((imm32 >> 19) & ((1 << 6) - 1)); | 851 ((imm32 >> 19) & ((1 << 6) - 1)); |
937 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | (imm8 & 0xf), sd, | 852 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | (imm8 & 0xf), sd, |
938 S0, S0); | 853 S0, S0); |
939 return true; | 854 return true; |
940 } | 855 } |
941 return false; | 856 return false; |
942 } | 857 } |
943 | 858 |
944 | |
945 bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) { | 859 bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) { |
946 if (TargetCPUFeatures::arm_version() != ARMv7) { | 860 if (TargetCPUFeatures::arm_version() != ARMv7) { |
947 return false; | 861 return false; |
948 } | 862 } |
949 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm); | 863 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm); |
950 if (((imm64 & ((1LL << 48) - 1)) == 0) && | 864 if (((imm64 & ((1LL << 48) - 1)) == 0) && |
951 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) || | 865 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) || |
952 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) - 1)))) { | 866 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) - 1)))) { |
953 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) | | 867 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) | |
954 ((imm64 >> 48) & ((1 << 6) - 1)); | 868 ((imm64 >> 48) & ((1 << 6) - 1)); |
955 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | B8 | (imm8 & 0xf), | 869 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | B8 | (imm8 & 0xf), |
956 dd, D0, D0); | 870 dd, D0, D0); |
957 return true; | 871 return true; |
958 } | 872 } |
959 return false; | 873 return false; |
960 } | 874 } |
961 | 875 |
962 | |
963 void Assembler::vadds(SRegister sd, | 876 void Assembler::vadds(SRegister sd, |
964 SRegister sn, | 877 SRegister sn, |
965 SRegister sm, | 878 SRegister sm, |
966 Condition cond) { | 879 Condition cond) { |
967 EmitVFPsss(cond, B21 | B20, sd, sn, sm); | 880 EmitVFPsss(cond, B21 | B20, sd, sn, sm); |
968 } | 881 } |
969 | 882 |
970 | |
971 void Assembler::vaddd(DRegister dd, | 883 void Assembler::vaddd(DRegister dd, |
972 DRegister dn, | 884 DRegister dn, |
973 DRegister dm, | 885 DRegister dm, |
974 Condition cond) { | 886 Condition cond) { |
975 EmitVFPddd(cond, B21 | B20, dd, dn, dm); | 887 EmitVFPddd(cond, B21 | B20, dd, dn, dm); |
976 } | 888 } |
977 | 889 |
978 | |
979 void Assembler::vsubs(SRegister sd, | 890 void Assembler::vsubs(SRegister sd, |
980 SRegister sn, | 891 SRegister sn, |
981 SRegister sm, | 892 SRegister sm, |
982 Condition cond) { | 893 Condition cond) { |
983 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm); | 894 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm); |
984 } | 895 } |
985 | 896 |
986 | |
987 void Assembler::vsubd(DRegister dd, | 897 void Assembler::vsubd(DRegister dd, |
988 DRegister dn, | 898 DRegister dn, |
989 DRegister dm, | 899 DRegister dm, |
990 Condition cond) { | 900 Condition cond) { |
991 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm); | 901 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm); |
992 } | 902 } |
993 | 903 |
994 | |
995 void Assembler::vmuls(SRegister sd, | 904 void Assembler::vmuls(SRegister sd, |
996 SRegister sn, | 905 SRegister sn, |
997 SRegister sm, | 906 SRegister sm, |
998 Condition cond) { | 907 Condition cond) { |
999 EmitVFPsss(cond, B21, sd, sn, sm); | 908 EmitVFPsss(cond, B21, sd, sn, sm); |
1000 } | 909 } |
1001 | 910 |
1002 | |
1003 void Assembler::vmuld(DRegister dd, | 911 void Assembler::vmuld(DRegister dd, |
1004 DRegister dn, | 912 DRegister dn, |
1005 DRegister dm, | 913 DRegister dm, |
1006 Condition cond) { | 914 Condition cond) { |
1007 EmitVFPddd(cond, B21, dd, dn, dm); | 915 EmitVFPddd(cond, B21, dd, dn, dm); |
1008 } | 916 } |
1009 | 917 |
1010 | |
1011 void Assembler::vmlas(SRegister sd, | 918 void Assembler::vmlas(SRegister sd, |
1012 SRegister sn, | 919 SRegister sn, |
1013 SRegister sm, | 920 SRegister sm, |
1014 Condition cond) { | 921 Condition cond) { |
1015 EmitVFPsss(cond, 0, sd, sn, sm); | 922 EmitVFPsss(cond, 0, sd, sn, sm); |
1016 } | 923 } |
1017 | 924 |
1018 | |
1019 void Assembler::vmlad(DRegister dd, | 925 void Assembler::vmlad(DRegister dd, |
1020 DRegister dn, | 926 DRegister dn, |
1021 DRegister dm, | 927 DRegister dm, |
1022 Condition cond) { | 928 Condition cond) { |
1023 EmitVFPddd(cond, 0, dd, dn, dm); | 929 EmitVFPddd(cond, 0, dd, dn, dm); |
1024 } | 930 } |
1025 | 931 |
1026 | |
1027 void Assembler::vmlss(SRegister sd, | 932 void Assembler::vmlss(SRegister sd, |
1028 SRegister sn, | 933 SRegister sn, |
1029 SRegister sm, | 934 SRegister sm, |
1030 Condition cond) { | 935 Condition cond) { |
1031 EmitVFPsss(cond, B6, sd, sn, sm); | 936 EmitVFPsss(cond, B6, sd, sn, sm); |
1032 } | 937 } |
1033 | 938 |
1034 | |
1035 void Assembler::vmlsd(DRegister dd, | 939 void Assembler::vmlsd(DRegister dd, |
1036 DRegister dn, | 940 DRegister dn, |
1037 DRegister dm, | 941 DRegister dm, |
1038 Condition cond) { | 942 Condition cond) { |
1039 EmitVFPddd(cond, B6, dd, dn, dm); | 943 EmitVFPddd(cond, B6, dd, dn, dm); |
1040 } | 944 } |
1041 | 945 |
1042 | |
1043 void Assembler::vdivs(SRegister sd, | 946 void Assembler::vdivs(SRegister sd, |
1044 SRegister sn, | 947 SRegister sn, |
1045 SRegister sm, | 948 SRegister sm, |
1046 Condition cond) { | 949 Condition cond) { |
1047 EmitVFPsss(cond, B23, sd, sn, sm); | 950 EmitVFPsss(cond, B23, sd, sn, sm); |
1048 } | 951 } |
1049 | 952 |
1050 | |
1051 void Assembler::vdivd(DRegister dd, | 953 void Assembler::vdivd(DRegister dd, |
1052 DRegister dn, | 954 DRegister dn, |
1053 DRegister dm, | 955 DRegister dm, |
1054 Condition cond) { | 956 Condition cond) { |
1055 EmitVFPddd(cond, B23, dd, dn, dm); | 957 EmitVFPddd(cond, B23, dd, dn, dm); |
1056 } | 958 } |
1057 | 959 |
1058 | |
1059 void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) { | 960 void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) { |
1060 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm); | 961 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm); |
1061 } | 962 } |
1062 | 963 |
1063 | |
1064 void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) { | 964 void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) { |
1065 EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm); | 965 EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm); |
1066 } | 966 } |
1067 | 967 |
1068 | |
1069 void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) { | 968 void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) { |
1070 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm); | 969 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm); |
1071 } | 970 } |
1072 | 971 |
1073 | |
1074 void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) { | 972 void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) { |
1075 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm); | 973 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm); |
1076 } | 974 } |
1077 | 975 |
1078 | |
1079 void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) { | 976 void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) { |
1080 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm); | 977 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm); |
1081 } | 978 } |
1082 | 979 |
1083 void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) { | 980 void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) { |
1084 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm); | 981 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm); |
1085 } | 982 } |
1086 | 983 |
1087 | |
1088 void Assembler::EmitVFPsd(Condition cond, | 984 void Assembler::EmitVFPsd(Condition cond, |
1089 int32_t opcode, | 985 int32_t opcode, |
1090 SRegister sd, | 986 SRegister sd, |
1091 DRegister dm) { | 987 DRegister dm) { |
1092 ASSERT(TargetCPUFeatures::vfp_supported()); | 988 ASSERT(TargetCPUFeatures::vfp_supported()); |
1093 ASSERT(sd != kNoSRegister); | 989 ASSERT(sd != kNoSRegister); |
1094 ASSERT(dm != kNoDRegister); | 990 ASSERT(dm != kNoDRegister); |
1095 ASSERT(cond != kNoCondition); | 991 ASSERT(cond != kNoCondition); |
1096 int32_t encoding = | 992 int32_t encoding = |
1097 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | | 993 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
1098 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) | | 994 B9 | opcode | ((static_cast<int32_t>(sd) & 1) * B22) | |
1099 ((static_cast<int32_t>(sd) >> 1) * B12) | | 995 ((static_cast<int32_t>(sd) >> 1) * B12) | |
1100 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); | 996 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
1101 Emit(encoding); | 997 Emit(encoding); |
1102 } | 998 } |
1103 | 999 |
1104 | |
1105 void Assembler::EmitVFPds(Condition cond, | 1000 void Assembler::EmitVFPds(Condition cond, |
1106 int32_t opcode, | 1001 int32_t opcode, |
1107 DRegister dd, | 1002 DRegister dd, |
1108 SRegister sm) { | 1003 SRegister sm) { |
1109 ASSERT(TargetCPUFeatures::vfp_supported()); | 1004 ASSERT(TargetCPUFeatures::vfp_supported()); |
1110 ASSERT(dd != kNoDRegister); | 1005 ASSERT(dd != kNoDRegister); |
1111 ASSERT(sm != kNoSRegister); | 1006 ASSERT(sm != kNoSRegister); |
1112 ASSERT(cond != kNoCondition); | 1007 ASSERT(cond != kNoCondition); |
1113 int32_t encoding = | 1008 int32_t encoding = |
1114 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | | 1009 (static_cast<int32_t>(cond) << kConditionShift) | B27 | B26 | B25 | B11 | |
1115 B9 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) | | 1010 B9 | opcode | ((static_cast<int32_t>(dd) >> 4) * B22) | |
1116 ((static_cast<int32_t>(dd) & 0xf) * B12) | | 1011 ((static_cast<int32_t>(dd) & 0xf) * B12) | |
1117 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1); | 1012 ((static_cast<int32_t>(sm) & 1) * B5) | (static_cast<int32_t>(sm) >> 1); |
1118 Emit(encoding); | 1013 Emit(encoding); |
1119 } | 1014 } |
1120 | 1015 |
1121 | |
1122 void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) { | 1016 void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) { |
1123 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm); | 1017 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm); |
1124 } | 1018 } |
1125 | 1019 |
1126 | |
1127 void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) { | 1020 void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) { |
1128 EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm); | 1021 EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm); |
1129 } | 1022 } |
1130 | 1023 |
1131 | |
1132 void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) { | 1024 void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) { |
1133 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm); | 1025 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm); |
1134 } | 1026 } |
1135 | 1027 |
1136 | |
1137 void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) { | 1028 void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) { |
1138 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm); | 1029 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm); |
1139 } | 1030 } |
1140 | 1031 |
1141 | |
1142 void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) { | 1032 void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) { |
1143 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm); | 1033 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm); |
1144 } | 1034 } |
1145 | 1035 |
1146 | |
1147 void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) { | 1036 void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) { |
1148 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm); | 1037 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm); |
1149 } | 1038 } |
1150 | 1039 |
1151 | |
1152 void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) { | 1040 void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) { |
1153 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm); | 1041 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm); |
1154 } | 1042 } |
1155 | 1043 |
1156 | |
1157 void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) { | 1044 void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) { |
1158 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm); | 1045 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm); |
1159 } | 1046 } |
1160 | 1047 |
1161 | |
1162 void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) { | 1048 void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) { |
1163 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm); | 1049 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm); |
1164 } | 1050 } |
1165 | 1051 |
1166 | |
1167 void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) { | 1052 void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) { |
1168 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm); | 1053 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm); |
1169 } | 1054 } |
1170 | 1055 |
1171 | |
1172 void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) { | 1056 void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) { |
1173 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm); | 1057 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm); |
1174 } | 1058 } |
1175 | 1059 |
1176 | |
1177 void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) { | 1060 void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) { |
1178 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm); | 1061 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm); |
1179 } | 1062 } |
1180 | 1063 |
1181 | |
1182 void Assembler::vcmpsz(SRegister sd, Condition cond) { | 1064 void Assembler::vcmpsz(SRegister sd, Condition cond) { |
1183 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0); | 1065 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0); |
1184 } | 1066 } |
1185 | 1067 |
1186 | |
1187 void Assembler::vcmpdz(DRegister dd, Condition cond) { | 1068 void Assembler::vcmpdz(DRegister dd, Condition cond) { |
1188 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0); | 1069 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0); |
1189 } | 1070 } |
1190 | 1071 |
1191 | |
1192 void Assembler::vmrs(Register rd, Condition cond) { | 1072 void Assembler::vmrs(Register rd, Condition cond) { |
1193 ASSERT(TargetCPUFeatures::vfp_supported()); | 1073 ASSERT(TargetCPUFeatures::vfp_supported()); |
1194 ASSERT(cond != kNoCondition); | 1074 ASSERT(cond != kNoCondition); |
1195 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | | 1075 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B27 | |
1196 B26 | B25 | B23 | B22 | B21 | B20 | B16 | | 1076 B26 | B25 | B23 | B22 | B21 | B20 | B16 | |
1197 (static_cast<int32_t>(rd) * B12) | B11 | B9 | B4; | 1077 (static_cast<int32_t>(rd) * B12) | B11 | B9 | B4; |
1198 Emit(encoding); | 1078 Emit(encoding); |
1199 } | 1079 } |
1200 | 1080 |
1201 | |
1202 void Assembler::vmstat(Condition cond) { | 1081 void Assembler::vmstat(Condition cond) { |
1203 vmrs(APSR, cond); | 1082 vmrs(APSR, cond); |
1204 } | 1083 } |
1205 | 1084 |
1206 | |
1207 static inline int ShiftOfOperandSize(OperandSize size) { | 1085 static inline int ShiftOfOperandSize(OperandSize size) { |
1208 switch (size) { | 1086 switch (size) { |
1209 case kByte: | 1087 case kByte: |
1210 case kUnsignedByte: | 1088 case kUnsignedByte: |
1211 return 0; | 1089 return 0; |
1212 case kHalfword: | 1090 case kHalfword: |
1213 case kUnsignedHalfword: | 1091 case kUnsignedHalfword: |
1214 return 1; | 1092 return 1; |
1215 case kWord: | 1093 case kWord: |
1216 case kUnsignedWord: | 1094 case kUnsignedWord: |
1217 return 2; | 1095 return 2; |
1218 case kWordPair: | 1096 case kWordPair: |
1219 return 3; | 1097 return 3; |
1220 case kSWord: | 1098 case kSWord: |
1221 case kDWord: | 1099 case kDWord: |
1222 return 0; | 1100 return 0; |
1223 default: | 1101 default: |
1224 UNREACHABLE(); | 1102 UNREACHABLE(); |
1225 break; | 1103 break; |
1226 } | 1104 } |
1227 | 1105 |
1228 UNREACHABLE(); | 1106 UNREACHABLE(); |
1229 return -1; | 1107 return -1; |
1230 } | 1108 } |
1231 | 1109 |
1232 | |
1233 void Assembler::EmitSIMDqqq(int32_t opcode, | 1110 void Assembler::EmitSIMDqqq(int32_t opcode, |
1234 OperandSize size, | 1111 OperandSize size, |
1235 QRegister qd, | 1112 QRegister qd, |
1236 QRegister qn, | 1113 QRegister qn, |
1237 QRegister qm) { | 1114 QRegister qm) { |
1238 ASSERT(TargetCPUFeatures::neon_supported()); | 1115 ASSERT(TargetCPUFeatures::neon_supported()); |
1239 int sz = ShiftOfOperandSize(size); | 1116 int sz = ShiftOfOperandSize(size); |
1240 int32_t encoding = | 1117 int32_t encoding = |
1241 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | B6 | | 1118 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | B6 | |
1242 opcode | ((sz & 0x3) * B20) | | 1119 opcode | ((sz & 0x3) * B20) | |
1243 ((static_cast<int32_t>(qd * 2) >> 4) * B22) | | 1120 ((static_cast<int32_t>(qd * 2) >> 4) * B22) | |
1244 ((static_cast<int32_t>(qn * 2) & 0xf) * B16) | | 1121 ((static_cast<int32_t>(qn * 2) & 0xf) * B16) | |
1245 ((static_cast<int32_t>(qd * 2) & 0xf) * B12) | | 1122 ((static_cast<int32_t>(qd * 2) & 0xf) * B12) | |
1246 ((static_cast<int32_t>(qn * 2) >> 4) * B7) | | 1123 ((static_cast<int32_t>(qn * 2) >> 4) * B7) | |
1247 ((static_cast<int32_t>(qm * 2) >> 4) * B5) | | 1124 ((static_cast<int32_t>(qm * 2) >> 4) * B5) | |
1248 (static_cast<int32_t>(qm * 2) & 0xf); | 1125 (static_cast<int32_t>(qm * 2) & 0xf); |
1249 Emit(encoding); | 1126 Emit(encoding); |
1250 } | 1127 } |
1251 | 1128 |
1252 | |
1253 void Assembler::EmitSIMDddd(int32_t opcode, | 1129 void Assembler::EmitSIMDddd(int32_t opcode, |
1254 OperandSize size, | 1130 OperandSize size, |
1255 DRegister dd, | 1131 DRegister dd, |
1256 DRegister dn, | 1132 DRegister dn, |
1257 DRegister dm) { | 1133 DRegister dm) { |
1258 ASSERT(TargetCPUFeatures::neon_supported()); | 1134 ASSERT(TargetCPUFeatures::neon_supported()); |
1259 int sz = ShiftOfOperandSize(size); | 1135 int sz = ShiftOfOperandSize(size); |
1260 int32_t encoding = | 1136 int32_t encoding = |
1261 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | | 1137 (static_cast<int32_t>(kSpecialCondition) << kConditionShift) | B25 | |
1262 opcode | ((sz & 0x3) * B20) | ((static_cast<int32_t>(dd) >> 4) * B22) | | 1138 opcode | ((sz & 0x3) * B20) | ((static_cast<int32_t>(dd) >> 4) * B22) | |
1263 ((static_cast<int32_t>(dn) & 0xf) * B16) | | 1139 ((static_cast<int32_t>(dn) & 0xf) * B16) | |
1264 ((static_cast<int32_t>(dd) & 0xf) * B12) | | 1140 ((static_cast<int32_t>(dd) & 0xf) * B12) | |
1265 ((static_cast<int32_t>(dn) >> 4) * B7) | | 1141 ((static_cast<int32_t>(dn) >> 4) * B7) | |
1266 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); | 1142 ((static_cast<int32_t>(dm) >> 4) * B5) | (static_cast<int32_t>(dm) & 0xf); |
1267 Emit(encoding); | 1143 Emit(encoding); |
1268 } | 1144 } |
1269 | 1145 |
1270 | |
1271 void Assembler::vmovq(QRegister qd, QRegister qm) { | 1146 void Assembler::vmovq(QRegister qd, QRegister qm) { |
1272 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm); | 1147 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm); |
1273 } | 1148 } |
1274 | 1149 |
1275 | |
1276 void Assembler::vaddqi(OperandSize sz, | 1150 void Assembler::vaddqi(OperandSize sz, |
1277 QRegister qd, | 1151 QRegister qd, |
1278 QRegister qn, | 1152 QRegister qn, |
1279 QRegister qm) { | 1153 QRegister qm) { |
1280 EmitSIMDqqq(B11, sz, qd, qn, qm); | 1154 EmitSIMDqqq(B11, sz, qd, qn, qm); |
1281 } | 1155 } |
1282 | 1156 |
1283 | |
1284 void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) { | 1157 void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) { |
1285 EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm); | 1158 EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm); |
1286 } | 1159 } |
1287 | 1160 |
1288 | |
1289 void Assembler::vsubqi(OperandSize sz, | 1161 void Assembler::vsubqi(OperandSize sz, |
1290 QRegister qd, | 1162 QRegister qd, |
1291 QRegister qn, | 1163 QRegister qn, |
1292 QRegister qm) { | 1164 QRegister qm) { |
1293 EmitSIMDqqq(B24 | B11, sz, qd, qn, qm); | 1165 EmitSIMDqqq(B24 | B11, sz, qd, qn, qm); |
1294 } | 1166 } |
1295 | 1167 |
1296 | |
1297 void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) { | 1168 void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) { |
1298 EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm); | 1169 EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm); |
1299 } | 1170 } |
1300 | 1171 |
1301 | |
1302 void Assembler::vmulqi(OperandSize sz, | 1172 void Assembler::vmulqi(OperandSize sz, |
1303 QRegister qd, | 1173 QRegister qd, |
1304 QRegister qn, | 1174 QRegister qn, |
1305 QRegister qm) { | 1175 QRegister qm) { |
1306 EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm); | 1176 EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm); |
1307 } | 1177 } |
1308 | 1178 |
1309 | |
1310 void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) { | 1179 void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) { |
1311 EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm); | 1180 EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm); |
1312 } | 1181 } |
1313 | 1182 |
1314 | |
1315 void Assembler::vshlqi(OperandSize sz, | 1183 void Assembler::vshlqi(OperandSize sz, |
1316 QRegister qd, | 1184 QRegister qd, |
1317 QRegister qm, | 1185 QRegister qm, |
1318 QRegister qn) { | 1186 QRegister qn) { |
1319 EmitSIMDqqq(B25 | B10, sz, qd, qn, qm); | 1187 EmitSIMDqqq(B25 | B10, sz, qd, qn, qm); |
1320 } | 1188 } |
1321 | 1189 |
1322 | |
1323 void Assembler::vshlqu(OperandSize sz, | 1190 void Assembler::vshlqu(OperandSize sz, |
1324 QRegister qd, | 1191 QRegister qd, |
1325 QRegister qm, | 1192 QRegister qm, |
1326 QRegister qn) { | 1193 QRegister qn) { |
1327 EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm); | 1194 EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm); |
1328 } | 1195 } |
1329 | 1196 |
1330 | |
1331 void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) { | 1197 void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) { |
1332 EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm); | 1198 EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm); |
1333 } | 1199 } |
1334 | 1200 |
1335 | |
1336 void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) { | 1201 void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) { |
1337 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qn, qm); | 1202 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qn, qm); |
1338 } | 1203 } |
1339 | 1204 |
1340 | |
1341 void Assembler::vornq(QRegister qd, QRegister qn, QRegister qm) { | 1205 void Assembler::vornq(QRegister qd, QRegister qn, QRegister qm) { |
1342 EmitSIMDqqq(B21 | B20 | B8 | B4, kByte, qd, qn, qm); | 1206 EmitSIMDqqq(B21 | B20 | B8 | B4, kByte, qd, qn, qm); |
1343 } | 1207 } |
1344 | 1208 |
1345 | |
1346 void Assembler::vandq(QRegister qd, QRegister qn, QRegister qm) { | 1209 void Assembler::vandq(QRegister qd, QRegister qn, QRegister qm) { |
1347 EmitSIMDqqq(B8 | B4, kByte, qd, qn, qm); | 1210 EmitSIMDqqq(B8 | B4, kByte, qd, qn, qm); |
1348 } | 1211 } |
1349 | 1212 |
1350 | |
1351 void Assembler::vmvnq(QRegister qd, QRegister qm) { | 1213 void Assembler::vmvnq(QRegister qd, QRegister qm) { |
1352 EmitSIMDqqq(B25 | B24 | B23 | B10 | B8 | B7, kWordPair, qd, Q0, qm); | 1214 EmitSIMDqqq(B25 | B24 | B23 | B10 | B8 | B7, kWordPair, qd, Q0, qm); |
1353 } | 1215 } |
1354 | 1216 |
1355 | |
1356 void Assembler::vminqs(QRegister qd, QRegister qn, QRegister qm) { | 1217 void Assembler::vminqs(QRegister qd, QRegister qn, QRegister qm) { |
1357 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm); | 1218 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm); |
1358 } | 1219 } |
1359 | 1220 |
1360 | |
1361 void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) { | 1221 void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) { |
1362 EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm); | 1222 EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm); |
1363 } | 1223 } |
1364 | 1224 |
1365 | |
1366 void Assembler::vabsqs(QRegister qd, QRegister qm) { | 1225 void Assembler::vabsqs(QRegister qd, QRegister qm) { |
1367 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord, qd, Q0, | 1226 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord, qd, Q0, |
1368 qm); | 1227 qm); |
1369 } | 1228 } |
1370 | 1229 |
1371 | |
1372 void Assembler::vnegqs(QRegister qd, QRegister qm) { | 1230 void Assembler::vnegqs(QRegister qd, QRegister qm) { |
1373 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord, | 1231 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord, |
1374 qd, Q0, qm); | 1232 qd, Q0, qm); |
1375 } | 1233 } |
1376 | 1234 |
1377 | |
1378 void Assembler::vrecpeqs(QRegister qd, QRegister qm) { | 1235 void Assembler::vrecpeqs(QRegister qd, QRegister qm) { |
1379 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, qd, | 1236 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, qd, |
1380 Q0, qm); | 1237 Q0, qm); |
1381 } | 1238 } |
1382 | 1239 |
1383 | |
1384 void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) { | 1240 void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) { |
1385 EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); | 1241 EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); |
1386 } | 1242 } |
1387 | 1243 |
1388 | |
1389 void Assembler::vrsqrteqs(QRegister qd, QRegister qm) { | 1244 void Assembler::vrsqrteqs(QRegister qd, QRegister qm) { |
1390 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, kSWord, | 1245 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, kSWord, |
1391 qd, Q0, qm); | 1246 qd, Q0, qm); |
1392 } | 1247 } |
1393 | 1248 |
1394 | |
1395 void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) { | 1249 void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) { |
1396 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); | 1250 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm); |
1397 } | 1251 } |
1398 | 1252 |
1399 | |
1400 void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) { | 1253 void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) { |
1401 ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair)); | 1254 ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair)); |
1402 int code = 0; | 1255 int code = 0; |
1403 | 1256 |
1404 switch (sz) { | 1257 switch (sz) { |
1405 case kByte: | 1258 case kByte: |
1406 case kUnsignedByte: { | 1259 case kUnsignedByte: { |
1407 ASSERT((idx >= 0) && (idx < 8)); | 1260 ASSERT((idx >= 0) && (idx < 8)); |
1408 code = 1 | (idx << 1); | 1261 code = 1 | (idx << 1); |
1409 break; | 1262 break; |
(...skipping 11 matching lines...) Expand all Loading... |
1421 break; | 1274 break; |
1422 } | 1275 } |
1423 default: { break; } | 1276 default: { break; } |
1424 } | 1277 } |
1425 | 1278 |
1426 EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair, | 1279 EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair, |
1427 static_cast<DRegister>(qd * 2), | 1280 static_cast<DRegister>(qd * 2), |
1428 static_cast<DRegister>(code & 0xf), dm); | 1281 static_cast<DRegister>(code & 0xf), dm); |
1429 } | 1282 } |
1430 | 1283 |
1431 | |
1432 void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) { | 1284 void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) { |
1433 ASSERT((len >= 1) && (len <= 4)); | 1285 ASSERT((len >= 1) && (len <= 4)); |
1434 EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm); | 1286 EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm); |
1435 } | 1287 } |
1436 | 1288 |
1437 | |
1438 void Assembler::vzipqw(QRegister qd, QRegister qm) { | 1289 void Assembler::vzipqw(QRegister qd, QRegister qm) { |
1439 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm); | 1290 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm); |
1440 } | 1291 } |
1441 | 1292 |
1442 | |
1443 void Assembler::vceqqi(OperandSize sz, | 1293 void Assembler::vceqqi(OperandSize sz, |
1444 QRegister qd, | 1294 QRegister qd, |
1445 QRegister qn, | 1295 QRegister qn, |
1446 QRegister qm) { | 1296 QRegister qm) { |
1447 EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm); | 1297 EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm); |
1448 } | 1298 } |
1449 | 1299 |
1450 | |
1451 void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) { | 1300 void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) { |
1452 EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm); | 1301 EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm); |
1453 } | 1302 } |
1454 | 1303 |
1455 | |
1456 void Assembler::vcgeqi(OperandSize sz, | 1304 void Assembler::vcgeqi(OperandSize sz, |
1457 QRegister qd, | 1305 QRegister qd, |
1458 QRegister qn, | 1306 QRegister qn, |
1459 QRegister qm) { | 1307 QRegister qm) { |
1460 EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm); | 1308 EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm); |
1461 } | 1309 } |
1462 | 1310 |
1463 | |
1464 void Assembler::vcugeqi(OperandSize sz, | 1311 void Assembler::vcugeqi(OperandSize sz, |
1465 QRegister qd, | 1312 QRegister qd, |
1466 QRegister qn, | 1313 QRegister qn, |
1467 QRegister qm) { | 1314 QRegister qm) { |
1468 EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm); | 1315 EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm); |
1469 } | 1316 } |
1470 | 1317 |
1471 | |
1472 void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) { | 1318 void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) { |
1473 EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm); | 1319 EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm); |
1474 } | 1320 } |
1475 | 1321 |
1476 | |
1477 void Assembler::vcgtqi(OperandSize sz, | 1322 void Assembler::vcgtqi(OperandSize sz, |
1478 QRegister qd, | 1323 QRegister qd, |
1479 QRegister qn, | 1324 QRegister qn, |
1480 QRegister qm) { | 1325 QRegister qm) { |
1481 EmitSIMDqqq(B9 | B8, sz, qd, qn, qm); | 1326 EmitSIMDqqq(B9 | B8, sz, qd, qn, qm); |
1482 } | 1327 } |
1483 | 1328 |
1484 | |
1485 void Assembler::vcugtqi(OperandSize sz, | 1329 void Assembler::vcugtqi(OperandSize sz, |
1486 QRegister qd, | 1330 QRegister qd, |
1487 QRegister qn, | 1331 QRegister qn, |
1488 QRegister qm) { | 1332 QRegister qm) { |
1489 EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm); | 1333 EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm); |
1490 } | 1334 } |
1491 | 1335 |
1492 | |
1493 void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) { | 1336 void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) { |
1494 EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm); | 1337 EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm); |
1495 } | 1338 } |
1496 | 1339 |
1497 | |
1498 void Assembler::bkpt(uint16_t imm16) { | 1340 void Assembler::bkpt(uint16_t imm16) { |
1499 Emit(BkptEncoding(imm16)); | 1341 Emit(BkptEncoding(imm16)); |
1500 } | 1342 } |
1501 | 1343 |
1502 | |
1503 void Assembler::b(Label* label, Condition cond) { | 1344 void Assembler::b(Label* label, Condition cond) { |
1504 EmitBranch(cond, label, false); | 1345 EmitBranch(cond, label, false); |
1505 } | 1346 } |
1506 | 1347 |
1507 | |
1508 void Assembler::bl(Label* label, Condition cond) { | 1348 void Assembler::bl(Label* label, Condition cond) { |
1509 EmitBranch(cond, label, true); | 1349 EmitBranch(cond, label, true); |
1510 } | 1350 } |
1511 | 1351 |
1512 | |
1513 void Assembler::bx(Register rm, Condition cond) { | 1352 void Assembler::bx(Register rm, Condition cond) { |
1514 ASSERT(rm != kNoRegister); | 1353 ASSERT(rm != kNoRegister); |
1515 ASSERT(cond != kNoCondition); | 1354 ASSERT(cond != kNoCondition); |
1516 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | | 1355 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
1517 B21 | (0xfff << 8) | B4 | | 1356 B21 | (0xfff << 8) | B4 | |
1518 (static_cast<int32_t>(rm) << kRmShift); | 1357 (static_cast<int32_t>(rm) << kRmShift); |
1519 Emit(encoding); | 1358 Emit(encoding); |
1520 } | 1359 } |
1521 | 1360 |
1522 | |
1523 void Assembler::blx(Register rm, Condition cond) { | 1361 void Assembler::blx(Register rm, Condition cond) { |
1524 ASSERT(rm != kNoRegister); | 1362 ASSERT(rm != kNoRegister); |
1525 ASSERT(cond != kNoCondition); | 1363 ASSERT(cond != kNoCondition); |
1526 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | | 1364 int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | B24 | |
1527 B21 | (0xfff << 8) | B5 | B4 | | 1365 B21 | (0xfff << 8) | B5 | B4 | |
1528 (static_cast<int32_t>(rm) << kRmShift); | 1366 (static_cast<int32_t>(rm) << kRmShift); |
1529 Emit(encoding); | 1367 Emit(encoding); |
1530 } | 1368 } |
1531 | 1369 |
1532 | |
1533 void Assembler::MarkExceptionHandler(Label* label) { | 1370 void Assembler::MarkExceptionHandler(Label* label) { |
1534 EmitType01(AL, 1, TST, 1, PC, R0, Operand(0)); | 1371 EmitType01(AL, 1, TST, 1, PC, R0, Operand(0)); |
1535 Label l; | 1372 Label l; |
1536 b(&l); | 1373 b(&l); |
1537 EmitBranch(AL, label, false); | 1374 EmitBranch(AL, label, false); |
1538 Bind(&l); | 1375 Bind(&l); |
1539 } | 1376 } |
1540 | 1377 |
1541 | |
1542 void Assembler::Drop(intptr_t stack_elements) { | 1378 void Assembler::Drop(intptr_t stack_elements) { |
1543 ASSERT(stack_elements >= 0); | 1379 ASSERT(stack_elements >= 0); |
1544 if (stack_elements > 0) { | 1380 if (stack_elements > 0) { |
1545 AddImmediate(SP, stack_elements * kWordSize); | 1381 AddImmediate(SP, stack_elements * kWordSize); |
1546 } | 1382 } |
1547 } | 1383 } |
1548 | 1384 |
1549 | |
1550 intptr_t Assembler::FindImmediate(int32_t imm) { | 1385 intptr_t Assembler::FindImmediate(int32_t imm) { |
1551 return object_pool_wrapper_.FindImmediate(imm); | 1386 return object_pool_wrapper_.FindImmediate(imm); |
1552 } | 1387 } |
1553 | 1388 |
1554 | |
1555 // Uses a code sequence that can easily be decoded. | 1389 // Uses a code sequence that can easily be decoded. |
1556 void Assembler::LoadWordFromPoolOffset(Register rd, | 1390 void Assembler::LoadWordFromPoolOffset(Register rd, |
1557 int32_t offset, | 1391 int32_t offset, |
1558 Register pp, | 1392 Register pp, |
1559 Condition cond) { | 1393 Condition cond) { |
1560 ASSERT((pp != PP) || constant_pool_allowed()); | 1394 ASSERT((pp != PP) || constant_pool_allowed()); |
1561 ASSERT(rd != pp); | 1395 ASSERT(rd != pp); |
1562 int32_t offset_mask = 0; | 1396 int32_t offset_mask = 0; |
1563 if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) { | 1397 if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) { |
1564 ldr(rd, Address(pp, offset), cond); | 1398 ldr(rd, Address(pp, offset), cond); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1598 ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset())); | 1432 ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset())); |
1599 cmp(R0, Operand(IP)); | 1433 cmp(R0, Operand(IP)); |
1600 b(&instructions_ok, EQ); | 1434 b(&instructions_ok, EQ); |
1601 bkpt(1); | 1435 bkpt(1); |
1602 Bind(&instructions_ok); | 1436 Bind(&instructions_ok); |
1603 Pop(IP); | 1437 Pop(IP); |
1604 Pop(R0); | 1438 Pop(R0); |
1605 #endif | 1439 #endif |
1606 } | 1440 } |
1607 | 1441 |
1608 | |
1609 void Assembler::RestoreCodePointer() { | 1442 void Assembler::RestoreCodePointer() { |
1610 ldr(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize)); | 1443 ldr(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize)); |
1611 CheckCodePointer(); | 1444 CheckCodePointer(); |
1612 } | 1445 } |
1613 | 1446 |
1614 | |
1615 void Assembler::LoadPoolPointer(Register reg) { | 1447 void Assembler::LoadPoolPointer(Register reg) { |
1616 // Load new pool pointer. | 1448 // Load new pool pointer. |
1617 CheckCodePointer(); | 1449 CheckCodePointer(); |
1618 ldr(reg, FieldAddress(CODE_REG, Code::object_pool_offset())); | 1450 ldr(reg, FieldAddress(CODE_REG, Code::object_pool_offset())); |
1619 set_constant_pool_allowed(reg == PP); | 1451 set_constant_pool_allowed(reg == PP); |
1620 } | 1452 } |
1621 | 1453 |
1622 | |
1623 void Assembler::LoadIsolate(Register rd) { | 1454 void Assembler::LoadIsolate(Register rd) { |
1624 ldr(rd, Address(THR, Thread::isolate_offset())); | 1455 ldr(rd, Address(THR, Thread::isolate_offset())); |
1625 } | 1456 } |
1626 | 1457 |
1627 | |
1628 bool Assembler::CanLoadFromObjectPool(const Object& object) const { | 1458 bool Assembler::CanLoadFromObjectPool(const Object& object) const { |
1629 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | 1459 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); |
1630 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | 1460 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); |
1631 ASSERT(!Thread::CanLoadFromThread(object)); | 1461 ASSERT(!Thread::CanLoadFromThread(object)); |
1632 if (!constant_pool_allowed()) { | 1462 if (!constant_pool_allowed()) { |
1633 return false; | 1463 return false; |
1634 } | 1464 } |
1635 | 1465 |
1636 ASSERT(object.IsNotTemporaryScopedHandle()); | 1466 ASSERT(object.IsNotTemporaryScopedHandle()); |
1637 ASSERT(object.IsOld()); | 1467 ASSERT(object.IsOld()); |
1638 return true; | 1468 return true; |
1639 } | 1469 } |
1640 | 1470 |
1641 | |
1642 void Assembler::LoadObjectHelper(Register rd, | 1471 void Assembler::LoadObjectHelper(Register rd, |
1643 const Object& object, | 1472 const Object& object, |
1644 Condition cond, | 1473 Condition cond, |
1645 bool is_unique, | 1474 bool is_unique, |
1646 Register pp) { | 1475 Register pp) { |
1647 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | 1476 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); |
1648 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | 1477 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); |
1649 if (Thread::CanLoadFromThread(object)) { | 1478 if (Thread::CanLoadFromThread(object)) { |
1650 // Load common VM constants from the thread. This works also in places where | 1479 // Load common VM constants from the thread. This works also in places where |
1651 // no constant pool is set up (e.g. intrinsic code). | 1480 // no constant pool is set up (e.g. intrinsic code). |
1652 ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond); | 1481 ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond); |
1653 } else if (object.IsSmi()) { | 1482 } else if (object.IsSmi()) { |
1654 // Relocation doesn't apply to Smis. | 1483 // Relocation doesn't apply to Smis. |
1655 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond); | 1484 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond); |
1656 } else if (CanLoadFromObjectPool(object)) { | 1485 } else if (CanLoadFromObjectPool(object)) { |
1657 // Make sure that class CallPattern is able to decode this load from the | 1486 // Make sure that class CallPattern is able to decode this load from the |
1658 // object pool. | 1487 // object pool. |
1659 const int32_t offset = ObjectPool::element_offset( | 1488 const int32_t offset = ObjectPool::element_offset( |
1660 is_unique ? object_pool_wrapper_.AddObject(object) | 1489 is_unique ? object_pool_wrapper_.AddObject(object) |
1661 : object_pool_wrapper_.FindObject(object)); | 1490 : object_pool_wrapper_.FindObject(object)); |
1662 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond); | 1491 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond); |
1663 } else { | 1492 } else { |
1664 UNREACHABLE(); | 1493 UNREACHABLE(); |
1665 } | 1494 } |
1666 } | 1495 } |
1667 | 1496 |
1668 | |
1669 void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { | 1497 void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { |
1670 LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP); | 1498 LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP); |
1671 } | 1499 } |
1672 | 1500 |
1673 | |
1674 void Assembler::LoadUniqueObject(Register rd, | 1501 void Assembler::LoadUniqueObject(Register rd, |
1675 const Object& object, | 1502 const Object& object, |
1676 Condition cond) { | 1503 Condition cond) { |
1677 LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP); | 1504 LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP); |
1678 } | 1505 } |
1679 | 1506 |
1680 | |
1681 void Assembler::LoadFunctionFromCalleePool(Register dst, | 1507 void Assembler::LoadFunctionFromCalleePool(Register dst, |
1682 const Function& function, | 1508 const Function& function, |
1683 Register new_pp) { | 1509 Register new_pp) { |
1684 const int32_t offset = | 1510 const int32_t offset = |
1685 ObjectPool::element_offset(object_pool_wrapper_.FindObject(function)); | 1511 ObjectPool::element_offset(object_pool_wrapper_.FindObject(function)); |
1686 LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp, AL); | 1512 LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp, AL); |
1687 } | 1513 } |
1688 | 1514 |
1689 | |
1690 void Assembler::LoadNativeEntry(Register rd, | 1515 void Assembler::LoadNativeEntry(Register rd, |
1691 const ExternalLabel* label, | 1516 const ExternalLabel* label, |
1692 Patchability patchable, | 1517 Patchability patchable, |
1693 Condition cond) { | 1518 Condition cond) { |
1694 const int32_t offset = ObjectPool::element_offset( | 1519 const int32_t offset = ObjectPool::element_offset( |
1695 object_pool_wrapper_.FindNativeEntry(label, patchable)); | 1520 object_pool_wrapper_.FindNativeEntry(label, patchable)); |
1696 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); | 1521 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); |
1697 } | 1522 } |
1698 | 1523 |
1699 | |
1700 void Assembler::PushObject(const Object& object) { | 1524 void Assembler::PushObject(const Object& object) { |
1701 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | 1525 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); |
1702 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | 1526 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); |
1703 LoadObject(IP, object); | 1527 LoadObject(IP, object); |
1704 Push(IP); | 1528 Push(IP); |
1705 } | 1529 } |
1706 | 1530 |
1707 | |
1708 void Assembler::CompareObject(Register rn, const Object& object) { | 1531 void Assembler::CompareObject(Register rn, const Object& object) { |
1709 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); | 1532 ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal()); |
1710 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); | 1533 ASSERT(!object.IsField() || Field::Cast(object).IsOriginal()); |
1711 ASSERT(rn != IP); | 1534 ASSERT(rn != IP); |
1712 if (object.IsSmi()) { | 1535 if (object.IsSmi()) { |
1713 CompareImmediate(rn, reinterpret_cast<int32_t>(object.raw())); | 1536 CompareImmediate(rn, reinterpret_cast<int32_t>(object.raw())); |
1714 } else { | 1537 } else { |
1715 LoadObject(IP, object); | 1538 LoadObject(IP, object); |
1716 cmp(rn, Operand(IP)); | 1539 cmp(rn, Operand(IP)); |
1717 } | 1540 } |
1718 } | 1541 } |
1719 | 1542 |
1720 | |
1721 // Preserves object and value registers. | 1543 // Preserves object and value registers. |
1722 void Assembler::StoreIntoObjectFilterNoSmi(Register object, | 1544 void Assembler::StoreIntoObjectFilterNoSmi(Register object, |
1723 Register value, | 1545 Register value, |
1724 Label* no_update) { | 1546 Label* no_update) { |
1725 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) && | 1547 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) && |
1726 (kOldObjectAlignmentOffset == 0)); | 1548 (kOldObjectAlignmentOffset == 0)); |
1727 | 1549 |
1728 // Write-barrier triggers if the value is in the new space (has bit set) and | 1550 // Write-barrier triggers if the value is in the new space (has bit set) and |
1729 // the object is in the old space (has bit cleared). | 1551 // the object is in the old space (has bit cleared). |
1730 // To check that, we compute value & ~object and skip the write barrier | 1552 // To check that, we compute value & ~object and skip the write barrier |
1731 // if the bit is not set. We can't destroy the object. | 1553 // if the bit is not set. We can't destroy the object. |
1732 bic(IP, value, Operand(object)); | 1554 bic(IP, value, Operand(object)); |
1733 tst(IP, Operand(kNewObjectAlignmentOffset)); | 1555 tst(IP, Operand(kNewObjectAlignmentOffset)); |
1734 b(no_update, EQ); | 1556 b(no_update, EQ); |
1735 } | 1557 } |
1736 | 1558 |
1737 | |
1738 // Preserves object and value registers. | 1559 // Preserves object and value registers. |
1739 void Assembler::StoreIntoObjectFilter(Register object, | 1560 void Assembler::StoreIntoObjectFilter(Register object, |
1740 Register value, | 1561 Register value, |
1741 Label* no_update) { | 1562 Label* no_update) { |
1742 // For the value we are only interested in the new/old bit and the tag bit. | 1563 // For the value we are only interested in the new/old bit and the tag bit. |
1743 // And the new bit with the tag bit. The resulting bit will be 0 for a Smi. | 1564 // And the new bit with the tag bit. The resulting bit will be 0 for a Smi. |
1744 and_(IP, value, Operand(value, LSL, kObjectAlignmentLog2 - 1)); | 1565 and_(IP, value, Operand(value, LSL, kObjectAlignmentLog2 - 1)); |
1745 // And the result with the negated space bit of the object. | 1566 // And the result with the negated space bit of the object. |
1746 bic(IP, IP, Operand(object)); | 1567 bic(IP, IP, Operand(object)); |
1747 tst(IP, Operand(kNewObjectAlignmentOffset)); | 1568 tst(IP, Operand(kNewObjectAlignmentOffset)); |
1748 b(no_update, EQ); | 1569 b(no_update, EQ); |
1749 } | 1570 } |
1750 | 1571 |
1751 | |
1752 Register UseRegister(Register reg, RegList* used) { | 1572 Register UseRegister(Register reg, RegList* used) { |
1753 ASSERT(reg != THR); | 1573 ASSERT(reg != THR); |
1754 ASSERT(reg != SP); | 1574 ASSERT(reg != SP); |
1755 ASSERT(reg != FP); | 1575 ASSERT(reg != FP); |
1756 ASSERT(reg != PC); | 1576 ASSERT(reg != PC); |
1757 ASSERT((*used & (1 << reg)) == 0); | 1577 ASSERT((*used & (1 << reg)) == 0); |
1758 *used |= (1 << reg); | 1578 *used |= (1 << reg); |
1759 return reg; | 1579 return reg; |
1760 } | 1580 } |
1761 | 1581 |
1762 | |
1763 Register AllocateRegister(RegList* used) { | 1582 Register AllocateRegister(RegList* used) { |
1764 const RegList free = ~*used; | 1583 const RegList free = ~*used; |
1765 return (free == 0) | 1584 return (free == 0) |
1766 ? kNoRegister | 1585 ? kNoRegister |
1767 : UseRegister( | 1586 : UseRegister( |
1768 static_cast<Register>(Utils::CountTrailingZeros(free)), | 1587 static_cast<Register>(Utils::CountTrailingZeros(free)), |
1769 used); | 1588 used); |
1770 } | 1589 } |
1771 | 1590 |
1772 | |
1773 void Assembler::StoreIntoObject(Register object, | 1591 void Assembler::StoreIntoObject(Register object, |
1774 const Address& dest, | 1592 const Address& dest, |
1775 Register value, | 1593 Register value, |
1776 bool can_value_be_smi) { | 1594 bool can_value_be_smi) { |
1777 ASSERT(object != value); | 1595 ASSERT(object != value); |
1778 str(value, dest); | 1596 str(value, dest); |
1779 Label done; | 1597 Label done; |
1780 if (can_value_be_smi) { | 1598 if (can_value_be_smi) { |
1781 StoreIntoObjectFilter(object, value, &done); | 1599 StoreIntoObjectFilter(object, value, &done); |
1782 } else { | 1600 } else { |
1783 StoreIntoObjectFilterNoSmi(object, value, &done); | 1601 StoreIntoObjectFilterNoSmi(object, value, &done); |
1784 } | 1602 } |
1785 // A store buffer update is required. | 1603 // A store buffer update is required. |
1786 RegList regs = (1 << CODE_REG) | (1 << LR); | 1604 RegList regs = (1 << CODE_REG) | (1 << LR); |
1787 if (value != R0) { | 1605 if (value != R0) { |
1788 regs |= (1 << R0); // Preserve R0. | 1606 regs |= (1 << R0); // Preserve R0. |
1789 } | 1607 } |
1790 PushList(regs); | 1608 PushList(regs); |
1791 if (object != R0) { | 1609 if (object != R0) { |
1792 mov(R0, Operand(object)); | 1610 mov(R0, Operand(object)); |
1793 } | 1611 } |
1794 ldr(LR, Address(THR, Thread::update_store_buffer_entry_point_offset())); | 1612 ldr(LR, Address(THR, Thread::update_store_buffer_entry_point_offset())); |
1795 ldr(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset())); | 1613 ldr(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset())); |
1796 blx(LR); | 1614 blx(LR); |
1797 PopList(regs); | 1615 PopList(regs); |
1798 Bind(&done); | 1616 Bind(&done); |
1799 } | 1617 } |
1800 | 1618 |
1801 | |
1802 void Assembler::StoreIntoObjectOffset(Register object, | 1619 void Assembler::StoreIntoObjectOffset(Register object, |
1803 int32_t offset, | 1620 int32_t offset, |
1804 Register value, | 1621 Register value, |
1805 bool can_value_be_smi) { | 1622 bool can_value_be_smi) { |
1806 int32_t ignored = 0; | 1623 int32_t ignored = 0; |
1807 if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { | 1624 if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { |
1808 StoreIntoObject(object, FieldAddress(object, offset), value, | 1625 StoreIntoObject(object, FieldAddress(object, offset), value, |
1809 can_value_be_smi); | 1626 can_value_be_smi); |
1810 } else { | 1627 } else { |
1811 AddImmediate(IP, object, offset - kHeapObjectTag); | 1628 AddImmediate(IP, object, offset - kHeapObjectTag); |
1812 StoreIntoObject(object, Address(IP), value, can_value_be_smi); | 1629 StoreIntoObject(object, Address(IP), value, can_value_be_smi); |
1813 } | 1630 } |
1814 } | 1631 } |
1815 | 1632 |
1816 | |
1817 void Assembler::StoreIntoObjectNoBarrier(Register object, | 1633 void Assembler::StoreIntoObjectNoBarrier(Register object, |
1818 const Address& dest, | 1634 const Address& dest, |
1819 Register value) { | 1635 Register value) { |
1820 str(value, dest); | 1636 str(value, dest); |
1821 #if defined(DEBUG) | 1637 #if defined(DEBUG) |
1822 Label done; | 1638 Label done; |
1823 StoreIntoObjectFilter(object, value, &done); | 1639 StoreIntoObjectFilter(object, value, &done); |
1824 Stop("Store buffer update is required"); | 1640 Stop("Store buffer update is required"); |
1825 Bind(&done); | 1641 Bind(&done); |
1826 #endif // defined(DEBUG) | 1642 #endif // defined(DEBUG) |
1827 // No store buffer update. | 1643 // No store buffer update. |
1828 } | 1644 } |
1829 | 1645 |
1830 | |
1831 void Assembler::StoreIntoObjectNoBarrier(Register object, | 1646 void Assembler::StoreIntoObjectNoBarrier(Register object, |
1832 const Address& dest, | 1647 const Address& dest, |
1833 const Object& value) { | 1648 const Object& value) { |
1834 ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal()); | 1649 ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal()); |
1835 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); | 1650 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); |
1836 ASSERT(value.IsSmi() || value.InVMHeap() || | 1651 ASSERT(value.IsSmi() || value.InVMHeap() || |
1837 (value.IsOld() && value.IsNotTemporaryScopedHandle())); | 1652 (value.IsOld() && value.IsNotTemporaryScopedHandle())); |
1838 // No store buffer update. | 1653 // No store buffer update. |
1839 LoadObject(IP, value); | 1654 LoadObject(IP, value); |
1840 str(IP, dest); | 1655 str(IP, dest); |
1841 } | 1656 } |
1842 | 1657 |
1843 | |
1844 void Assembler::StoreIntoObjectNoBarrierOffset(Register object, | 1658 void Assembler::StoreIntoObjectNoBarrierOffset(Register object, |
1845 int32_t offset, | 1659 int32_t offset, |
1846 Register value) { | 1660 Register value) { |
1847 int32_t ignored = 0; | 1661 int32_t ignored = 0; |
1848 if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { | 1662 if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { |
1849 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); | 1663 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); |
1850 } else { | 1664 } else { |
1851 Register base = object == R9 ? R8 : R9; | 1665 Register base = object == R9 ? R8 : R9; |
1852 Push(base); | 1666 Push(base); |
1853 AddImmediate(base, object, offset - kHeapObjectTag); | 1667 AddImmediate(base, object, offset - kHeapObjectTag); |
1854 StoreIntoObjectNoBarrier(object, Address(base), value); | 1668 StoreIntoObjectNoBarrier(object, Address(base), value); |
1855 Pop(base); | 1669 Pop(base); |
1856 } | 1670 } |
1857 } | 1671 } |
1858 | 1672 |
1859 | |
1860 void Assembler::StoreIntoObjectNoBarrierOffset(Register object, | 1673 void Assembler::StoreIntoObjectNoBarrierOffset(Register object, |
1861 int32_t offset, | 1674 int32_t offset, |
1862 const Object& value) { | 1675 const Object& value) { |
1863 ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal()); | 1676 ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal()); |
1864 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); | 1677 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal()); |
1865 int32_t ignored = 0; | 1678 int32_t ignored = 0; |
1866 if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { | 1679 if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) { |
1867 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); | 1680 StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value); |
1868 } else { | 1681 } else { |
1869 Register base = object == R9 ? R8 : R9; | 1682 Register base = object == R9 ? R8 : R9; |
1870 Push(base); | 1683 Push(base); |
1871 AddImmediate(base, object, offset - kHeapObjectTag); | 1684 AddImmediate(base, object, offset - kHeapObjectTag); |
1872 StoreIntoObjectNoBarrier(object, Address(base), value); | 1685 StoreIntoObjectNoBarrier(object, Address(base), value); |
1873 Pop(base); | 1686 Pop(base); |
1874 } | 1687 } |
1875 } | 1688 } |
1876 | 1689 |
1877 | |
1878 void Assembler::InitializeFieldsNoBarrier(Register object, | 1690 void Assembler::InitializeFieldsNoBarrier(Register object, |
1879 Register begin, | 1691 Register begin, |
1880 Register end, | 1692 Register end, |
1881 Register value_even, | 1693 Register value_even, |
1882 Register value_odd) { | 1694 Register value_odd) { |
1883 ASSERT(value_odd == value_even + 1); | 1695 ASSERT(value_odd == value_even + 1); |
1884 Label init_loop; | 1696 Label init_loop; |
1885 Bind(&init_loop); | 1697 Bind(&init_loop); |
1886 AddImmediate(begin, 2 * kWordSize); | 1698 AddImmediate(begin, 2 * kWordSize); |
1887 cmp(begin, Operand(end)); | 1699 cmp(begin, Operand(end)); |
1888 strd(value_even, value_odd, begin, -2 * kWordSize, LS); | 1700 strd(value_even, value_odd, begin, -2 * kWordSize, LS); |
1889 b(&init_loop, CC); | 1701 b(&init_loop, CC); |
1890 str(value_even, Address(begin, -2 * kWordSize), HI); | 1702 str(value_even, Address(begin, -2 * kWordSize), HI); |
1891 #if defined(DEBUG) | 1703 #if defined(DEBUG) |
1892 Label done; | 1704 Label done; |
1893 StoreIntoObjectFilter(object, value_even, &done); | 1705 StoreIntoObjectFilter(object, value_even, &done); |
1894 StoreIntoObjectFilter(object, value_odd, &done); | 1706 StoreIntoObjectFilter(object, value_odd, &done); |
1895 Stop("Store buffer update is required"); | 1707 Stop("Store buffer update is required"); |
1896 Bind(&done); | 1708 Bind(&done); |
1897 #endif // defined(DEBUG) | 1709 #endif // defined(DEBUG) |
1898 // No store buffer update. | 1710 // No store buffer update. |
1899 } | 1711 } |
1900 | 1712 |
1901 | |
1902 void Assembler::InitializeFieldsNoBarrierUnrolled(Register object, | 1713 void Assembler::InitializeFieldsNoBarrierUnrolled(Register object, |
1903 Register base, | 1714 Register base, |
1904 intptr_t begin_offset, | 1715 intptr_t begin_offset, |
1905 intptr_t end_offset, | 1716 intptr_t end_offset, |
1906 Register value_even, | 1717 Register value_even, |
1907 Register value_odd) { | 1718 Register value_odd) { |
1908 ASSERT(value_odd == value_even + 1); | 1719 ASSERT(value_odd == value_even + 1); |
1909 intptr_t current_offset = begin_offset; | 1720 intptr_t current_offset = begin_offset; |
1910 while (current_offset + kWordSize < end_offset) { | 1721 while (current_offset + kWordSize < end_offset) { |
1911 strd(value_even, value_odd, base, current_offset); | 1722 strd(value_even, value_odd, base, current_offset); |
1912 current_offset += 2 * kWordSize; | 1723 current_offset += 2 * kWordSize; |
1913 } | 1724 } |
1914 while (current_offset < end_offset) { | 1725 while (current_offset < end_offset) { |
1915 str(value_even, Address(base, current_offset)); | 1726 str(value_even, Address(base, current_offset)); |
1916 current_offset += kWordSize; | 1727 current_offset += kWordSize; |
1917 } | 1728 } |
1918 #if defined(DEBUG) | 1729 #if defined(DEBUG) |
1919 Label done; | 1730 Label done; |
1920 StoreIntoObjectFilter(object, value_even, &done); | 1731 StoreIntoObjectFilter(object, value_even, &done); |
1921 StoreIntoObjectFilter(object, value_odd, &done); | 1732 StoreIntoObjectFilter(object, value_odd, &done); |
1922 Stop("Store buffer update is required"); | 1733 Stop("Store buffer update is required"); |
1923 Bind(&done); | 1734 Bind(&done); |
1924 #endif // defined(DEBUG) | 1735 #endif // defined(DEBUG) |
1925 // No store buffer update. | 1736 // No store buffer update. |
1926 } | 1737 } |
1927 | 1738 |
1928 | |
1929 void Assembler::StoreIntoSmiField(const Address& dest, Register value) { | 1739 void Assembler::StoreIntoSmiField(const Address& dest, Register value) { |
1930 #if defined(DEBUG) | 1740 #if defined(DEBUG) |
1931 Label done; | 1741 Label done; |
1932 tst(value, Operand(kHeapObjectTag)); | 1742 tst(value, Operand(kHeapObjectTag)); |
1933 b(&done, EQ); | 1743 b(&done, EQ); |
1934 Stop("New value must be Smi."); | 1744 Stop("New value must be Smi."); |
1935 Bind(&done); | 1745 Bind(&done); |
1936 #endif // defined(DEBUG) | 1746 #endif // defined(DEBUG) |
1937 str(value, dest); | 1747 str(value, dest); |
1938 } | 1748 } |
1939 | 1749 |
1940 | |
1941 void Assembler::LoadClassId(Register result, Register object, Condition cond) { | 1750 void Assembler::LoadClassId(Register result, Register object, Condition cond) { |
1942 ASSERT(RawObject::kClassIdTagPos == 16); | 1751 ASSERT(RawObject::kClassIdTagPos == 16); |
1943 ASSERT(RawObject::kClassIdTagSize == 16); | 1752 ASSERT(RawObject::kClassIdTagSize == 16); |
1944 const intptr_t class_id_offset = | 1753 const intptr_t class_id_offset = |
1945 Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; | 1754 Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; |
1946 ldrh(result, FieldAddress(object, class_id_offset), cond); | 1755 ldrh(result, FieldAddress(object, class_id_offset), cond); |
1947 } | 1756 } |
1948 | 1757 |
1949 | |
1950 void Assembler::LoadClassById(Register result, Register class_id) { | 1758 void Assembler::LoadClassById(Register result, Register class_id) { |
1951 ASSERT(result != class_id); | 1759 ASSERT(result != class_id); |
1952 LoadIsolate(result); | 1760 LoadIsolate(result); |
1953 const intptr_t offset = | 1761 const intptr_t offset = |
1954 Isolate::class_table_offset() + ClassTable::table_offset(); | 1762 Isolate::class_table_offset() + ClassTable::table_offset(); |
1955 LoadFromOffset(kWord, result, result, offset); | 1763 LoadFromOffset(kWord, result, result, offset); |
1956 ldr(result, Address(result, class_id, LSL, 2)); | 1764 ldr(result, Address(result, class_id, LSL, 2)); |
1957 } | 1765 } |
1958 | 1766 |
1959 | |
1960 void Assembler::LoadClass(Register result, Register object, Register scratch) { | 1767 void Assembler::LoadClass(Register result, Register object, Register scratch) { |
1961 ASSERT(scratch != result); | 1768 ASSERT(scratch != result); |
1962 LoadClassId(scratch, object); | 1769 LoadClassId(scratch, object); |
1963 LoadClassById(result, scratch); | 1770 LoadClassById(result, scratch); |
1964 } | 1771 } |
1965 | 1772 |
1966 | |
1967 void Assembler::CompareClassId(Register object, | 1773 void Assembler::CompareClassId(Register object, |
1968 intptr_t class_id, | 1774 intptr_t class_id, |
1969 Register scratch) { | 1775 Register scratch) { |
1970 LoadClassId(scratch, object); | 1776 LoadClassId(scratch, object); |
1971 CompareImmediate(scratch, class_id); | 1777 CompareImmediate(scratch, class_id); |
1972 } | 1778 } |
1973 | 1779 |
1974 | |
1975 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { | 1780 void Assembler::LoadClassIdMayBeSmi(Register result, Register object) { |
1976 tst(object, Operand(kSmiTagMask)); | 1781 tst(object, Operand(kSmiTagMask)); |
1977 LoadClassId(result, object, NE); | 1782 LoadClassId(result, object, NE); |
1978 LoadImmediate(result, kSmiCid, EQ); | 1783 LoadImmediate(result, kSmiCid, EQ); |
1979 } | 1784 } |
1980 | 1785 |
1981 | |
1982 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { | 1786 void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) { |
1983 LoadClassIdMayBeSmi(result, object); | 1787 LoadClassIdMayBeSmi(result, object); |
1984 SmiTag(result); | 1788 SmiTag(result); |
1985 } | 1789 } |
1986 | 1790 |
1987 | |
1988 static bool CanEncodeBranchOffset(int32_t offset) { | 1791 static bool CanEncodeBranchOffset(int32_t offset) { |
1989 ASSERT(Utils::IsAligned(offset, 4)); | 1792 ASSERT(Utils::IsAligned(offset, 4)); |
1990 return Utils::IsInt(Utils::CountOneBits(kBranchOffsetMask), offset); | 1793 return Utils::IsInt(Utils::CountOneBits(kBranchOffsetMask), offset); |
1991 } | 1794 } |
1992 | 1795 |
1993 | |
1994 int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) { | 1796 int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) { |
1995 // The offset is off by 8 due to the way the ARM CPUs read PC. | 1797 // The offset is off by 8 due to the way the ARM CPUs read PC. |
1996 offset -= Instr::kPCReadOffset; | 1798 offset -= Instr::kPCReadOffset; |
1997 | 1799 |
1998 if (!CanEncodeBranchOffset(offset)) { | 1800 if (!CanEncodeBranchOffset(offset)) { |
1999 ASSERT(!use_far_branches()); | 1801 ASSERT(!use_far_branches()); |
2000 Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error()); | 1802 Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error()); |
2001 } | 1803 } |
2002 | 1804 |
2003 // Properly preserve only the bits supported in the instruction. | 1805 // Properly preserve only the bits supported in the instruction. |
2004 offset >>= 2; | 1806 offset >>= 2; |
2005 offset &= kBranchOffsetMask; | 1807 offset &= kBranchOffsetMask; |
2006 return (inst & ~kBranchOffsetMask) | offset; | 1808 return (inst & ~kBranchOffsetMask) | offset; |
2007 } | 1809 } |
2008 | 1810 |
2009 | |
2010 int Assembler::DecodeBranchOffset(int32_t inst) { | 1811 int Assembler::DecodeBranchOffset(int32_t inst) { |
2011 // Sign-extend, left-shift by 2, then add 8. | 1812 // Sign-extend, left-shift by 2, then add 8. |
2012 return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset); | 1813 return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset); |
2013 } | 1814 } |
2014 | 1815 |
2015 | |
2016 static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) { | 1816 static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) { |
2017 int32_t offset = 0; | 1817 int32_t offset = 0; |
2018 offset |= (movt & 0xf0000) << 12; | 1818 offset |= (movt & 0xf0000) << 12; |
2019 offset |= (movt & 0xfff) << 16; | 1819 offset |= (movt & 0xfff) << 16; |
2020 offset |= (movw & 0xf0000) >> 4; | 1820 offset |= (movw & 0xf0000) >> 4; |
2021 offset |= movw & 0xfff; | 1821 offset |= movw & 0xfff; |
2022 return offset; | 1822 return offset; |
2023 } | 1823 } |
2024 | 1824 |
2025 | |
2026 static int32_t DecodeARMv6LoadImmediate(int32_t mov, | 1825 static int32_t DecodeARMv6LoadImmediate(int32_t mov, |
2027 int32_t or1, | 1826 int32_t or1, |
2028 int32_t or2, | 1827 int32_t or2, |
2029 int32_t or3) { | 1828 int32_t or3) { |
2030 int32_t offset = 0; | 1829 int32_t offset = 0; |
2031 offset |= (mov & 0xff) << 24; | 1830 offset |= (mov & 0xff) << 24; |
2032 offset |= (or1 & 0xff) << 16; | 1831 offset |= (or1 & 0xff) << 16; |
2033 offset |= (or2 & 0xff) << 8; | 1832 offset |= (or2 & 0xff) << 8; |
2034 offset |= (or3 & 0xff); | 1833 offset |= (or3 & 0xff); |
2035 return offset; | 1834 return offset; |
2036 } | 1835 } |
2037 | 1836 |
2038 | |
2039 class PatchFarBranch : public AssemblerFixup { | 1837 class PatchFarBranch : public AssemblerFixup { |
2040 public: | 1838 public: |
2041 PatchFarBranch() {} | 1839 PatchFarBranch() {} |
2042 | 1840 |
2043 void Process(const MemoryRegion& region, intptr_t position) { | 1841 void Process(const MemoryRegion& region, intptr_t position) { |
2044 const ARMVersion version = TargetCPUFeatures::arm_version(); | 1842 const ARMVersion version = TargetCPUFeatures::arm_version(); |
2045 if ((version == ARMv5TE) || (version == ARMv6)) { | 1843 if ((version == ARMv5TE) || (version == ARMv6)) { |
2046 ProcessARMv6(region, position); | 1844 ProcessARMv6(region, position); |
2047 } else { | 1845 } else { |
2048 ASSERT(version == ARMv7); | 1846 ASSERT(version == ARMv7); |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2080 return; | 1878 return; |
2081 } | 1879 } |
2082 | 1880 |
2083 // If the offset loading instructions aren't there, we must have replaced | 1881 // If the offset loading instructions aren't there, we must have replaced |
2084 // the far branch with a near one, and so these instructions | 1882 // the far branch with a near one, and so these instructions |
2085 // should be NOPs. | 1883 // should be NOPs. |
2086 ASSERT((or1 == Instr::kNopInstruction) && (or2 == Instr::kNopInstruction) && | 1884 ASSERT((or1 == Instr::kNopInstruction) && (or2 == Instr::kNopInstruction) && |
2087 (or3 == Instr::kNopInstruction) && (bx == Instr::kNopInstruction)); | 1885 (or3 == Instr::kNopInstruction) && (bx == Instr::kNopInstruction)); |
2088 } | 1886 } |
2089 | 1887 |
2090 | |
2091 void ProcessARMv7(const MemoryRegion& region, intptr_t position) { | 1888 void ProcessARMv7(const MemoryRegion& region, intptr_t position) { |
2092 const int32_t movw = region.Load<int32_t>(position); | 1889 const int32_t movw = region.Load<int32_t>(position); |
2093 const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize); | 1890 const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize); |
2094 const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize); | 1891 const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize); |
2095 | 1892 |
2096 if (((movt & 0xfff0f000) == 0xe340c000) && // movt IP, high | 1893 if (((movt & 0xfff0f000) == 0xe340c000) && // movt IP, high |
2097 ((movw & 0xfff0f000) == 0xe300c000)) { // movw IP, low | 1894 ((movw & 0xfff0f000) == 0xe300c000)) { // movw IP, low |
2098 const int32_t offset = DecodeARMv7LoadImmediate(movt, movw); | 1895 const int32_t offset = DecodeARMv7LoadImmediate(movt, movw); |
2099 const int32_t dest = region.start() + offset; | 1896 const int32_t dest = region.start() + offset; |
2100 const uint16_t dest_high = Utils::High16Bits(dest); | 1897 const uint16_t dest_high = Utils::High16Bits(dest); |
(...skipping 10 matching lines...) Expand all Loading... |
2111 | 1908 |
2112 // If the offset loading instructions aren't there, we must have replaced | 1909 // If the offset loading instructions aren't there, we must have replaced |
2113 // the far branch with a near one, and so these instructions | 1910 // the far branch with a near one, and so these instructions |
2114 // should be NOPs. | 1911 // should be NOPs. |
2115 ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction)); | 1912 ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction)); |
2116 } | 1913 } |
2117 | 1914 |
2118 virtual bool IsPointerOffset() const { return false; } | 1915 virtual bool IsPointerOffset() const { return false; } |
2119 }; | 1916 }; |
2120 | 1917 |
2121 | |
2122 void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) { | 1918 void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) { |
2123 buffer_.EmitFixup(new PatchFarBranch()); | 1919 buffer_.EmitFixup(new PatchFarBranch()); |
2124 LoadPatchableImmediate(IP, offset); | 1920 LoadPatchableImmediate(IP, offset); |
2125 if (link) { | 1921 if (link) { |
2126 blx(IP, cond); | 1922 blx(IP, cond); |
2127 } else { | 1923 } else { |
2128 bx(IP, cond); | 1924 bx(IP, cond); |
2129 } | 1925 } |
2130 } | 1926 } |
2131 | 1927 |
2132 | |
2133 void Assembler::EmitBranch(Condition cond, Label* label, bool link) { | 1928 void Assembler::EmitBranch(Condition cond, Label* label, bool link) { |
2134 if (label->IsBound()) { | 1929 if (label->IsBound()) { |
2135 const int32_t dest = label->Position() - buffer_.Size(); | 1930 const int32_t dest = label->Position() - buffer_.Size(); |
2136 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | 1931 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { |
2137 EmitFarBranch(cond, label->Position(), link); | 1932 EmitFarBranch(cond, label->Position(), link); |
2138 } else { | 1933 } else { |
2139 EmitType5(cond, dest, link); | 1934 EmitType5(cond, dest, link); |
2140 } | 1935 } |
2141 } else { | 1936 } else { |
2142 const intptr_t position = buffer_.Size(); | 1937 const intptr_t position = buffer_.Size(); |
2143 if (use_far_branches()) { | 1938 if (use_far_branches()) { |
2144 const int32_t dest = label->position_; | 1939 const int32_t dest = label->position_; |
2145 EmitFarBranch(cond, dest, link); | 1940 EmitFarBranch(cond, dest, link); |
2146 } else { | 1941 } else { |
2147 // Use the offset field of the branch instruction for linking the sites. | 1942 // Use the offset field of the branch instruction for linking the sites. |
2148 EmitType5(cond, label->position_, link); | 1943 EmitType5(cond, label->position_, link); |
2149 } | 1944 } |
2150 label->LinkTo(position); | 1945 label->LinkTo(position); |
2151 } | 1946 } |
2152 } | 1947 } |
2153 | 1948 |
2154 | |
2155 void Assembler::BindARMv6(Label* label) { | 1949 void Assembler::BindARMv6(Label* label) { |
2156 ASSERT(!label->IsBound()); | 1950 ASSERT(!label->IsBound()); |
2157 intptr_t bound_pc = buffer_.Size(); | 1951 intptr_t bound_pc = buffer_.Size(); |
2158 while (label->IsLinked()) { | 1952 while (label->IsLinked()) { |
2159 const int32_t position = label->Position(); | 1953 const int32_t position = label->Position(); |
2160 int32_t dest = bound_pc - position; | 1954 int32_t dest = bound_pc - position; |
2161 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | 1955 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { |
2162 // Far branches are enabled and we can't encode the branch offset. | 1956 // Far branches are enabled and we can't encode the branch offset. |
2163 | 1957 |
2164 // Grab instructions that load the offset. | 1958 // Grab instructions that load the offset. |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2223 } else { | 2017 } else { |
2224 int32_t next = buffer_.Load<int32_t>(position); | 2018 int32_t next = buffer_.Load<int32_t>(position); |
2225 int32_t encoded = Assembler::EncodeBranchOffset(dest, next); | 2019 int32_t encoded = Assembler::EncodeBranchOffset(dest, next); |
2226 buffer_.Store<int32_t>(position, encoded); | 2020 buffer_.Store<int32_t>(position, encoded); |
2227 label->position_ = Assembler::DecodeBranchOffset(next); | 2021 label->position_ = Assembler::DecodeBranchOffset(next); |
2228 } | 2022 } |
2229 } | 2023 } |
2230 label->BindTo(bound_pc); | 2024 label->BindTo(bound_pc); |
2231 } | 2025 } |
2232 | 2026 |
2233 | |
2234 void Assembler::BindARMv7(Label* label) { | 2027 void Assembler::BindARMv7(Label* label) { |
2235 ASSERT(!label->IsBound()); | 2028 ASSERT(!label->IsBound()); |
2236 intptr_t bound_pc = buffer_.Size(); | 2029 intptr_t bound_pc = buffer_.Size(); |
2237 while (label->IsLinked()) { | 2030 while (label->IsLinked()) { |
2238 const int32_t position = label->Position(); | 2031 const int32_t position = label->Position(); |
2239 int32_t dest = bound_pc - position; | 2032 int32_t dest = bound_pc - position; |
2240 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | 2033 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { |
2241 // Far branches are enabled and we can't encode the branch offset. | 2034 // Far branches are enabled and we can't encode the branch offset. |
2242 | 2035 |
2243 // Grab instructions that load the offset. | 2036 // Grab instructions that load the offset. |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2290 } else { | 2083 } else { |
2291 int32_t next = buffer_.Load<int32_t>(position); | 2084 int32_t next = buffer_.Load<int32_t>(position); |
2292 int32_t encoded = Assembler::EncodeBranchOffset(dest, next); | 2085 int32_t encoded = Assembler::EncodeBranchOffset(dest, next); |
2293 buffer_.Store<int32_t>(position, encoded); | 2086 buffer_.Store<int32_t>(position, encoded); |
2294 label->position_ = Assembler::DecodeBranchOffset(next); | 2087 label->position_ = Assembler::DecodeBranchOffset(next); |
2295 } | 2088 } |
2296 } | 2089 } |
2297 label->BindTo(bound_pc); | 2090 label->BindTo(bound_pc); |
2298 } | 2091 } |
2299 | 2092 |
2300 | |
2301 void Assembler::Bind(Label* label) { | 2093 void Assembler::Bind(Label* label) { |
2302 const ARMVersion version = TargetCPUFeatures::arm_version(); | 2094 const ARMVersion version = TargetCPUFeatures::arm_version(); |
2303 if ((version == ARMv5TE) || (version == ARMv6)) { | 2095 if ((version == ARMv5TE) || (version == ARMv6)) { |
2304 BindARMv6(label); | 2096 BindARMv6(label); |
2305 } else { | 2097 } else { |
2306 ASSERT(version == ARMv7); | 2098 ASSERT(version == ARMv7); |
2307 BindARMv7(label); | 2099 BindARMv7(label); |
2308 } | 2100 } |
2309 } | 2101 } |
2310 | 2102 |
2311 | |
2312 OperandSize Address::OperandSizeFor(intptr_t cid) { | 2103 OperandSize Address::OperandSizeFor(intptr_t cid) { |
2313 switch (cid) { | 2104 switch (cid) { |
2314 case kArrayCid: | 2105 case kArrayCid: |
2315 case kImmutableArrayCid: | 2106 case kImmutableArrayCid: |
2316 return kWord; | 2107 return kWord; |
2317 case kOneByteStringCid: | 2108 case kOneByteStringCid: |
2318 case kExternalOneByteStringCid: | 2109 case kExternalOneByteStringCid: |
2319 return kByte; | 2110 return kByte; |
2320 case kTwoByteStringCid: | 2111 case kTwoByteStringCid: |
2321 case kExternalTwoByteStringCid: | 2112 case kExternalTwoByteStringCid: |
(...skipping 27 matching lines...) Expand all Loading... |
2349 return kRegList; | 2140 return kRegList; |
2350 case kTypedDataInt8ArrayViewCid: | 2141 case kTypedDataInt8ArrayViewCid: |
2351 UNREACHABLE(); | 2142 UNREACHABLE(); |
2352 return kByte; | 2143 return kByte; |
2353 default: | 2144 default: |
2354 UNREACHABLE(); | 2145 UNREACHABLE(); |
2355 return kByte; | 2146 return kByte; |
2356 } | 2147 } |
2357 } | 2148 } |
2358 | 2149 |
2359 | |
2360 bool Address::CanHoldLoadOffset(OperandSize size, | 2150 bool Address::CanHoldLoadOffset(OperandSize size, |
2361 int32_t offset, | 2151 int32_t offset, |
2362 int32_t* offset_mask) { | 2152 int32_t* offset_mask) { |
2363 switch (size) { | 2153 switch (size) { |
2364 case kByte: | 2154 case kByte: |
2365 case kHalfword: | 2155 case kHalfword: |
2366 case kUnsignedHalfword: | 2156 case kUnsignedHalfword: |
2367 case kWordPair: { | 2157 case kWordPair: { |
2368 *offset_mask = 0xff; | 2158 *offset_mask = 0xff; |
2369 return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3. | 2159 return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3. |
(...skipping 14 matching lines...) Expand all Loading... |
2384 *offset_mask = 0x0; | 2174 *offset_mask = 0x0; |
2385 return offset == 0; | 2175 return offset == 0; |
2386 } | 2176 } |
2387 default: { | 2177 default: { |
2388 UNREACHABLE(); | 2178 UNREACHABLE(); |
2389 return false; | 2179 return false; |
2390 } | 2180 } |
2391 } | 2181 } |
2392 } | 2182 } |
2393 | 2183 |
2394 | |
2395 bool Address::CanHoldStoreOffset(OperandSize size, | 2184 bool Address::CanHoldStoreOffset(OperandSize size, |
2396 int32_t offset, | 2185 int32_t offset, |
2397 int32_t* offset_mask) { | 2186 int32_t* offset_mask) { |
2398 switch (size) { | 2187 switch (size) { |
2399 case kHalfword: | 2188 case kHalfword: |
2400 case kUnsignedHalfword: | 2189 case kUnsignedHalfword: |
2401 case kWordPair: { | 2190 case kWordPair: { |
2402 *offset_mask = 0xff; | 2191 *offset_mask = 0xff; |
2403 return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3. | 2192 return Utils::IsAbsoluteUint(8, offset); // Addressing mode 3. |
2404 } | 2193 } |
(...skipping 14 matching lines...) Expand all Loading... |
2419 *offset_mask = 0x0; | 2208 *offset_mask = 0x0; |
2420 return offset == 0; | 2209 return offset == 0; |
2421 } | 2210 } |
2422 default: { | 2211 default: { |
2423 UNREACHABLE(); | 2212 UNREACHABLE(); |
2424 return false; | 2213 return false; |
2425 } | 2214 } |
2426 } | 2215 } |
2427 } | 2216 } |
2428 | 2217 |
2429 | |
2430 bool Address::CanHoldImmediateOffset(bool is_load, | 2218 bool Address::CanHoldImmediateOffset(bool is_load, |
2431 intptr_t cid, | 2219 intptr_t cid, |
2432 int64_t offset) { | 2220 int64_t offset) { |
2433 int32_t offset_mask = 0; | 2221 int32_t offset_mask = 0; |
2434 if (is_load) { | 2222 if (is_load) { |
2435 return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask); | 2223 return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask); |
2436 } else { | 2224 } else { |
2437 return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask); | 2225 return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask); |
2438 } | 2226 } |
2439 } | 2227 } |
2440 | 2228 |
2441 | |
2442 void Assembler::Push(Register rd, Condition cond) { | 2229 void Assembler::Push(Register rd, Condition cond) { |
2443 str(rd, Address(SP, -kWordSize, Address::PreIndex), cond); | 2230 str(rd, Address(SP, -kWordSize, Address::PreIndex), cond); |
2444 } | 2231 } |
2445 | 2232 |
2446 | |
2447 void Assembler::Pop(Register rd, Condition cond) { | 2233 void Assembler::Pop(Register rd, Condition cond) { |
2448 ldr(rd, Address(SP, kWordSize, Address::PostIndex), cond); | 2234 ldr(rd, Address(SP, kWordSize, Address::PostIndex), cond); |
2449 } | 2235 } |
2450 | 2236 |
2451 | |
2452 void Assembler::PushList(RegList regs, Condition cond) { | 2237 void Assembler::PushList(RegList regs, Condition cond) { |
2453 stm(DB_W, SP, regs, cond); | 2238 stm(DB_W, SP, regs, cond); |
2454 } | 2239 } |
2455 | 2240 |
2456 | |
2457 void Assembler::PopList(RegList regs, Condition cond) { | 2241 void Assembler::PopList(RegList regs, Condition cond) { |
2458 ldm(IA_W, SP, regs, cond); | 2242 ldm(IA_W, SP, regs, cond); |
2459 } | 2243 } |
2460 | 2244 |
2461 | |
2462 void Assembler::MoveRegister(Register rd, Register rm, Condition cond) { | 2245 void Assembler::MoveRegister(Register rd, Register rm, Condition cond) { |
2463 if (rd != rm) { | 2246 if (rd != rm) { |
2464 mov(rd, Operand(rm), cond); | 2247 mov(rd, Operand(rm), cond); |
2465 } | 2248 } |
2466 } | 2249 } |
2467 | 2250 |
2468 | |
2469 void Assembler::Lsl(Register rd, | 2251 void Assembler::Lsl(Register rd, |
2470 Register rm, | 2252 Register rm, |
2471 const Operand& shift_imm, | 2253 const Operand& shift_imm, |
2472 Condition cond) { | 2254 Condition cond) { |
2473 ASSERT(shift_imm.type() == 1); | 2255 ASSERT(shift_imm.type() == 1); |
2474 ASSERT(shift_imm.encoding() != 0); // Do not use Lsl if no shift is wanted. | 2256 ASSERT(shift_imm.encoding() != 0); // Do not use Lsl if no shift is wanted. |
2475 mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond); | 2257 mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond); |
2476 } | 2258 } |
2477 | 2259 |
2478 | |
2479 void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) { | 2260 void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) { |
2480 mov(rd, Operand(rm, LSL, rs), cond); | 2261 mov(rd, Operand(rm, LSL, rs), cond); |
2481 } | 2262 } |
2482 | 2263 |
2483 | |
2484 void Assembler::Lsr(Register rd, | 2264 void Assembler::Lsr(Register rd, |
2485 Register rm, | 2265 Register rm, |
2486 const Operand& shift_imm, | 2266 const Operand& shift_imm, |
2487 Condition cond) { | 2267 Condition cond) { |
2488 ASSERT(shift_imm.type() == 1); | 2268 ASSERT(shift_imm.type() == 1); |
2489 uint32_t shift = shift_imm.encoding(); | 2269 uint32_t shift = shift_imm.encoding(); |
2490 ASSERT(shift != 0); // Do not use Lsr if no shift is wanted. | 2270 ASSERT(shift != 0); // Do not use Lsr if no shift is wanted. |
2491 if (shift == 32) { | 2271 if (shift == 32) { |
2492 shift = 0; // Comply to UAL syntax. | 2272 shift = 0; // Comply to UAL syntax. |
2493 } | 2273 } |
2494 mov(rd, Operand(rm, LSR, shift), cond); | 2274 mov(rd, Operand(rm, LSR, shift), cond); |
2495 } | 2275 } |
2496 | 2276 |
2497 | |
2498 void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) { | 2277 void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) { |
2499 mov(rd, Operand(rm, LSR, rs), cond); | 2278 mov(rd, Operand(rm, LSR, rs), cond); |
2500 } | 2279 } |
2501 | 2280 |
2502 | |
2503 void Assembler::Asr(Register rd, | 2281 void Assembler::Asr(Register rd, |
2504 Register rm, | 2282 Register rm, |
2505 const Operand& shift_imm, | 2283 const Operand& shift_imm, |
2506 Condition cond) { | 2284 Condition cond) { |
2507 ASSERT(shift_imm.type() == 1); | 2285 ASSERT(shift_imm.type() == 1); |
2508 uint32_t shift = shift_imm.encoding(); | 2286 uint32_t shift = shift_imm.encoding(); |
2509 ASSERT(shift != 0); // Do not use Asr if no shift is wanted. | 2287 ASSERT(shift != 0); // Do not use Asr if no shift is wanted. |
2510 if (shift == 32) { | 2288 if (shift == 32) { |
2511 shift = 0; // Comply to UAL syntax. | 2289 shift = 0; // Comply to UAL syntax. |
2512 } | 2290 } |
2513 mov(rd, Operand(rm, ASR, shift), cond); | 2291 mov(rd, Operand(rm, ASR, shift), cond); |
2514 } | 2292 } |
2515 | 2293 |
2516 | |
2517 void Assembler::Asrs(Register rd, | 2294 void Assembler::Asrs(Register rd, |
2518 Register rm, | 2295 Register rm, |
2519 const Operand& shift_imm, | 2296 const Operand& shift_imm, |
2520 Condition cond) { | 2297 Condition cond) { |
2521 ASSERT(shift_imm.type() == 1); | 2298 ASSERT(shift_imm.type() == 1); |
2522 uint32_t shift = shift_imm.encoding(); | 2299 uint32_t shift = shift_imm.encoding(); |
2523 ASSERT(shift != 0); // Do not use Asr if no shift is wanted. | 2300 ASSERT(shift != 0); // Do not use Asr if no shift is wanted. |
2524 if (shift == 32) { | 2301 if (shift == 32) { |
2525 shift = 0; // Comply to UAL syntax. | 2302 shift = 0; // Comply to UAL syntax. |
2526 } | 2303 } |
2527 movs(rd, Operand(rm, ASR, shift), cond); | 2304 movs(rd, Operand(rm, ASR, shift), cond); |
2528 } | 2305 } |
2529 | 2306 |
2530 | |
2531 void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) { | 2307 void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) { |
2532 mov(rd, Operand(rm, ASR, rs), cond); | 2308 mov(rd, Operand(rm, ASR, rs), cond); |
2533 } | 2309 } |
2534 | 2310 |
2535 | |
2536 void Assembler::Ror(Register rd, | 2311 void Assembler::Ror(Register rd, |
2537 Register rm, | 2312 Register rm, |
2538 const Operand& shift_imm, | 2313 const Operand& shift_imm, |
2539 Condition cond) { | 2314 Condition cond) { |
2540 ASSERT(shift_imm.type() == 1); | 2315 ASSERT(shift_imm.type() == 1); |
2541 ASSERT(shift_imm.encoding() != 0); // Use Rrx instruction. | 2316 ASSERT(shift_imm.encoding() != 0); // Use Rrx instruction. |
2542 mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond); | 2317 mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond); |
2543 } | 2318 } |
2544 | 2319 |
2545 | |
2546 void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) { | 2320 void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) { |
2547 mov(rd, Operand(rm, ROR, rs), cond); | 2321 mov(rd, Operand(rm, ROR, rs), cond); |
2548 } | 2322 } |
2549 | 2323 |
2550 | |
2551 void Assembler::Rrx(Register rd, Register rm, Condition cond) { | 2324 void Assembler::Rrx(Register rd, Register rm, Condition cond) { |
2552 mov(rd, Operand(rm, ROR, 0), cond); | 2325 mov(rd, Operand(rm, ROR, 0), cond); |
2553 } | 2326 } |
2554 | 2327 |
2555 | |
2556 void Assembler::SignFill(Register rd, Register rm, Condition cond) { | 2328 void Assembler::SignFill(Register rd, Register rm, Condition cond) { |
2557 Asr(rd, rm, Operand(31), cond); | 2329 Asr(rd, rm, Operand(31), cond); |
2558 } | 2330 } |
2559 | 2331 |
2560 | |
2561 void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) { | 2332 void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) { |
2562 ASSERT(qm != QTMP); | 2333 ASSERT(qm != QTMP); |
2563 ASSERT(qd != QTMP); | 2334 ASSERT(qd != QTMP); |
2564 | 2335 |
2565 // Reciprocal estimate. | 2336 // Reciprocal estimate. |
2566 vrecpeqs(qd, qm); | 2337 vrecpeqs(qd, qm); |
2567 // 2 Newton-Raphson steps. | 2338 // 2 Newton-Raphson steps. |
2568 vrecpsqs(QTMP, qm, qd); | 2339 vrecpsqs(QTMP, qm, qd); |
2569 vmulqs(qd, qd, QTMP); | 2340 vmulqs(qd, qd, QTMP); |
2570 vrecpsqs(QTMP, qm, qd); | 2341 vrecpsqs(QTMP, qm, qd); |
2571 vmulqs(qd, qd, QTMP); | 2342 vmulqs(qd, qd, QTMP); |
2572 } | 2343 } |
2573 | 2344 |
2574 | |
2575 void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) { | 2345 void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) { |
2576 ASSERT(qm != QTMP); | 2346 ASSERT(qm != QTMP); |
2577 ASSERT(qd != QTMP); | 2347 ASSERT(qd != QTMP); |
2578 | 2348 |
2579 // Reciprocal square root estimate. | 2349 // Reciprocal square root estimate. |
2580 vrsqrteqs(qd, qm); | 2350 vrsqrteqs(qd, qm); |
2581 // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. | 2351 // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2. |
2582 // First step. | 2352 // First step. |
2583 vmulqs(QTMP, qd, qd); // QTMP <- xn^2 | 2353 vmulqs(QTMP, qd, qd); // QTMP <- xn^2 |
2584 vrsqrtsqs(QTMP, qm, QTMP); // QTMP <- (3 - Q1*QTMP) / 2. | 2354 vrsqrtsqs(QTMP, qm, QTMP); // QTMP <- (3 - Q1*QTMP) / 2. |
2585 vmulqs(qd, qd, QTMP); // xn+1 <- xn * QTMP | 2355 vmulqs(qd, qd, QTMP); // xn+1 <- xn * QTMP |
2586 // Second step. | 2356 // Second step. |
2587 vmulqs(QTMP, qd, qd); | 2357 vmulqs(QTMP, qd, qd); |
2588 vrsqrtsqs(QTMP, qm, QTMP); | 2358 vrsqrtsqs(QTMP, qm, QTMP); |
2589 vmulqs(qd, qd, QTMP); | 2359 vmulqs(qd, qd, QTMP); |
2590 } | 2360 } |
2591 | 2361 |
2592 | |
2593 void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) { | 2362 void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) { |
2594 ASSERT(temp != QTMP); | 2363 ASSERT(temp != QTMP); |
2595 ASSERT(qm != QTMP); | 2364 ASSERT(qm != QTMP); |
2596 ASSERT(qd != QTMP); | 2365 ASSERT(qd != QTMP); |
2597 | 2366 |
2598 if (temp != kNoQRegister) { | 2367 if (temp != kNoQRegister) { |
2599 vmovq(temp, qm); | 2368 vmovq(temp, qm); |
2600 qm = temp; | 2369 qm = temp; |
2601 } | 2370 } |
2602 | 2371 |
2603 VreciprocalSqrtqs(qd, qm); | 2372 VreciprocalSqrtqs(qd, qm); |
2604 vmovq(qm, qd); | 2373 vmovq(qm, qd); |
2605 Vreciprocalqs(qd, qm); | 2374 Vreciprocalqs(qd, qm); |
2606 } | 2375 } |
2607 | 2376 |
2608 | |
2609 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { | 2377 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { |
2610 ASSERT(qd != QTMP); | 2378 ASSERT(qd != QTMP); |
2611 ASSERT(qn != QTMP); | 2379 ASSERT(qn != QTMP); |
2612 ASSERT(qm != QTMP); | 2380 ASSERT(qm != QTMP); |
2613 | 2381 |
2614 Vreciprocalqs(qd, qm); | 2382 Vreciprocalqs(qd, qm); |
2615 vmulqs(qd, qn, qd); | 2383 vmulqs(qd, qn, qd); |
2616 } | 2384 } |
2617 | 2385 |
2618 | |
2619 void Assembler::Branch(const StubEntry& stub_entry, | 2386 void Assembler::Branch(const StubEntry& stub_entry, |
2620 Patchability patchable, | 2387 Patchability patchable, |
2621 Register pp, | 2388 Register pp, |
2622 Condition cond) { | 2389 Condition cond) { |
2623 const Code& target_code = Code::ZoneHandle(stub_entry.code()); | 2390 const Code& target_code = Code::ZoneHandle(stub_entry.code()); |
2624 const int32_t offset = ObjectPool::element_offset( | 2391 const int32_t offset = ObjectPool::element_offset( |
2625 object_pool_wrapper_.FindObject(target_code, patchable)); | 2392 object_pool_wrapper_.FindObject(target_code, patchable)); |
2626 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond); | 2393 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond); |
2627 ldr(IP, FieldAddress(CODE_REG, Code::entry_point_offset()), cond); | 2394 ldr(IP, FieldAddress(CODE_REG, Code::entry_point_offset()), cond); |
2628 bx(IP, cond); | 2395 bx(IP, cond); |
2629 } | 2396 } |
2630 | 2397 |
2631 | |
2632 void Assembler::BranchLink(const Code& target, Patchability patchable) { | 2398 void Assembler::BranchLink(const Code& target, Patchability patchable) { |
2633 // Make sure that class CallPattern is able to patch the label referred | 2399 // Make sure that class CallPattern is able to patch the label referred |
2634 // to by this code sequence. | 2400 // to by this code sequence. |
2635 // For added code robustness, use 'blx lr' in a patchable sequence and | 2401 // For added code robustness, use 'blx lr' in a patchable sequence and |
2636 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). | 2402 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). |
2637 const int32_t offset = ObjectPool::element_offset( | 2403 const int32_t offset = ObjectPool::element_offset( |
2638 object_pool_wrapper_.FindObject(target, patchable)); | 2404 object_pool_wrapper_.FindObject(target, patchable)); |
2639 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL); | 2405 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL); |
2640 ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset())); | 2406 ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset())); |
2641 blx(LR); // Use blx instruction so that the return branch prediction works. | 2407 blx(LR); // Use blx instruction so that the return branch prediction works. |
2642 } | 2408 } |
2643 | 2409 |
2644 | |
2645 void Assembler::BranchLink(const StubEntry& stub_entry, | 2410 void Assembler::BranchLink(const StubEntry& stub_entry, |
2646 Patchability patchable) { | 2411 Patchability patchable) { |
2647 const Code& code = Code::ZoneHandle(stub_entry.code()); | 2412 const Code& code = Code::ZoneHandle(stub_entry.code()); |
2648 BranchLink(code, patchable); | 2413 BranchLink(code, patchable); |
2649 } | 2414 } |
2650 | 2415 |
2651 | |
2652 void Assembler::BranchLinkPatchable(const Code& target) { | 2416 void Assembler::BranchLinkPatchable(const Code& target) { |
2653 BranchLink(target, kPatchable); | 2417 BranchLink(target, kPatchable); |
2654 } | 2418 } |
2655 | 2419 |
2656 | |
2657 void Assembler::BranchLinkToRuntime() { | 2420 void Assembler::BranchLinkToRuntime() { |
2658 ldr(IP, Address(THR, Thread::call_to_runtime_entry_point_offset())); | 2421 ldr(IP, Address(THR, Thread::call_to_runtime_entry_point_offset())); |
2659 ldr(CODE_REG, Address(THR, Thread::call_to_runtime_stub_offset())); | 2422 ldr(CODE_REG, Address(THR, Thread::call_to_runtime_stub_offset())); |
2660 blx(IP); | 2423 blx(IP); |
2661 } | 2424 } |
2662 | 2425 |
2663 | |
2664 void Assembler::BranchLinkWithEquivalence(const StubEntry& stub_entry, | 2426 void Assembler::BranchLinkWithEquivalence(const StubEntry& stub_entry, |
2665 const Object& equivalence) { | 2427 const Object& equivalence) { |
2666 const Code& target = Code::ZoneHandle(stub_entry.code()); | 2428 const Code& target = Code::ZoneHandle(stub_entry.code()); |
2667 // Make sure that class CallPattern is able to patch the label referred | 2429 // Make sure that class CallPattern is able to patch the label referred |
2668 // to by this code sequence. | 2430 // to by this code sequence. |
2669 // For added code robustness, use 'blx lr' in a patchable sequence and | 2431 // For added code robustness, use 'blx lr' in a patchable sequence and |
2670 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). | 2432 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). |
2671 const int32_t offset = ObjectPool::element_offset( | 2433 const int32_t offset = ObjectPool::element_offset( |
2672 object_pool_wrapper_.FindObject(target, equivalence)); | 2434 object_pool_wrapper_.FindObject(target, equivalence)); |
2673 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL); | 2435 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL); |
2674 ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset())); | 2436 ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset())); |
2675 blx(LR); // Use blx instruction so that the return branch prediction works. | 2437 blx(LR); // Use blx instruction so that the return branch prediction works. |
2676 } | 2438 } |
2677 | 2439 |
2678 | |
2679 void Assembler::BranchLink(const ExternalLabel* label) { | 2440 void Assembler::BranchLink(const ExternalLabel* label) { |
2680 LoadImmediate(LR, label->address()); // Target address is never patched. | 2441 LoadImmediate(LR, label->address()); // Target address is never patched. |
2681 blx(LR); // Use blx instruction so that the return branch prediction works. | 2442 blx(LR); // Use blx instruction so that the return branch prediction works. |
2682 } | 2443 } |
2683 | 2444 |
2684 | |
2685 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { | 2445 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { |
2686 BranchLinkPatchable(Code::ZoneHandle(stub_entry.code())); | 2446 BranchLinkPatchable(Code::ZoneHandle(stub_entry.code())); |
2687 } | 2447 } |
2688 | 2448 |
2689 | |
2690 void Assembler::BranchLinkOffset(Register base, int32_t offset) { | 2449 void Assembler::BranchLinkOffset(Register base, int32_t offset) { |
2691 ASSERT(base != PC); | 2450 ASSERT(base != PC); |
2692 ASSERT(base != IP); | 2451 ASSERT(base != IP); |
2693 LoadFromOffset(kWord, IP, base, offset); | 2452 LoadFromOffset(kWord, IP, base, offset); |
2694 blx(IP); // Use blx instruction so that the return branch prediction works. | 2453 blx(IP); // Use blx instruction so that the return branch prediction works. |
2695 } | 2454 } |
2696 | 2455 |
2697 | |
2698 void Assembler::LoadPatchableImmediate(Register rd, | 2456 void Assembler::LoadPatchableImmediate(Register rd, |
2699 int32_t value, | 2457 int32_t value, |
2700 Condition cond) { | 2458 Condition cond) { |
2701 const ARMVersion version = TargetCPUFeatures::arm_version(); | 2459 const ARMVersion version = TargetCPUFeatures::arm_version(); |
2702 if ((version == ARMv5TE) || (version == ARMv6)) { | 2460 if ((version == ARMv5TE) || (version == ARMv6)) { |
2703 // This sequence is patched in a few places, and should remain fixed. | 2461 // This sequence is patched in a few places, and should remain fixed. |
2704 const uint32_t byte0 = (value & 0x000000ff); | 2462 const uint32_t byte0 = (value & 0x000000ff); |
2705 const uint32_t byte1 = (value & 0x0000ff00) >> 8; | 2463 const uint32_t byte1 = (value & 0x0000ff00) >> 8; |
2706 const uint32_t byte2 = (value & 0x00ff0000) >> 16; | 2464 const uint32_t byte2 = (value & 0x00ff0000) >> 16; |
2707 const uint32_t byte3 = (value & 0xff000000) >> 24; | 2465 const uint32_t byte3 = (value & 0xff000000) >> 24; |
2708 mov(rd, Operand(4, byte3), cond); | 2466 mov(rd, Operand(4, byte3), cond); |
2709 orr(rd, rd, Operand(8, byte2), cond); | 2467 orr(rd, rd, Operand(8, byte2), cond); |
2710 orr(rd, rd, Operand(12, byte1), cond); | 2468 orr(rd, rd, Operand(12, byte1), cond); |
2711 orr(rd, rd, Operand(byte0), cond); | 2469 orr(rd, rd, Operand(byte0), cond); |
2712 } else { | 2470 } else { |
2713 ASSERT(version == ARMv7); | 2471 ASSERT(version == ARMv7); |
2714 const uint16_t value_low = Utils::Low16Bits(value); | 2472 const uint16_t value_low = Utils::Low16Bits(value); |
2715 const uint16_t value_high = Utils::High16Bits(value); | 2473 const uint16_t value_high = Utils::High16Bits(value); |
2716 movw(rd, value_low, cond); | 2474 movw(rd, value_low, cond); |
2717 movt(rd, value_high, cond); | 2475 movt(rd, value_high, cond); |
2718 } | 2476 } |
2719 } | 2477 } |
2720 | 2478 |
2721 | |
2722 void Assembler::LoadDecodableImmediate(Register rd, | 2479 void Assembler::LoadDecodableImmediate(Register rd, |
2723 int32_t value, | 2480 int32_t value, |
2724 Condition cond) { | 2481 Condition cond) { |
2725 const ARMVersion version = TargetCPUFeatures::arm_version(); | 2482 const ARMVersion version = TargetCPUFeatures::arm_version(); |
2726 if ((version == ARMv5TE) || (version == ARMv6)) { | 2483 if ((version == ARMv5TE) || (version == ARMv6)) { |
2727 if (constant_pool_allowed()) { | 2484 if (constant_pool_allowed()) { |
2728 const int32_t offset = Array::element_offset(FindImmediate(value)); | 2485 const int32_t offset = Array::element_offset(FindImmediate(value)); |
2729 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); | 2486 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); |
2730 } else { | 2487 } else { |
2731 LoadPatchableImmediate(rd, value, cond); | 2488 LoadPatchableImmediate(rd, value, cond); |
2732 } | 2489 } |
2733 } else { | 2490 } else { |
2734 ASSERT(version == ARMv7); | 2491 ASSERT(version == ARMv7); |
2735 movw(rd, Utils::Low16Bits(value), cond); | 2492 movw(rd, Utils::Low16Bits(value), cond); |
2736 const uint16_t value_high = Utils::High16Bits(value); | 2493 const uint16_t value_high = Utils::High16Bits(value); |
2737 if (value_high != 0) { | 2494 if (value_high != 0) { |
2738 movt(rd, value_high, cond); | 2495 movt(rd, value_high, cond); |
2739 } | 2496 } |
2740 } | 2497 } |
2741 } | 2498 } |
2742 | 2499 |
2743 | |
2744 void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) { | 2500 void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) { |
2745 Operand o; | 2501 Operand o; |
2746 if (Operand::CanHold(value, &o)) { | 2502 if (Operand::CanHold(value, &o)) { |
2747 mov(rd, o, cond); | 2503 mov(rd, o, cond); |
2748 } else if (Operand::CanHold(~value, &o)) { | 2504 } else if (Operand::CanHold(~value, &o)) { |
2749 mvn(rd, o, cond); | 2505 mvn(rd, o, cond); |
2750 } else { | 2506 } else { |
2751 LoadDecodableImmediate(rd, value, cond); | 2507 LoadDecodableImmediate(rd, value, cond); |
2752 } | 2508 } |
2753 } | 2509 } |
2754 | 2510 |
2755 | |
2756 void Assembler::LoadSImmediate(SRegister sd, float value, Condition cond) { | 2511 void Assembler::LoadSImmediate(SRegister sd, float value, Condition cond) { |
2757 if (!vmovs(sd, value, cond)) { | 2512 if (!vmovs(sd, value, cond)) { |
2758 const DRegister dd = static_cast<DRegister>(sd >> 1); | 2513 const DRegister dd = static_cast<DRegister>(sd >> 1); |
2759 const int index = sd & 1; | 2514 const int index = sd & 1; |
2760 LoadImmediate(IP, bit_cast<int32_t, float>(value), cond); | 2515 LoadImmediate(IP, bit_cast<int32_t, float>(value), cond); |
2761 vmovdr(dd, index, IP, cond); | 2516 vmovdr(dd, index, IP, cond); |
2762 } | 2517 } |
2763 } | 2518 } |
2764 | 2519 |
2765 | |
2766 void Assembler::LoadDImmediate(DRegister dd, | 2520 void Assembler::LoadDImmediate(DRegister dd, |
2767 double value, | 2521 double value, |
2768 Register scratch, | 2522 Register scratch, |
2769 Condition cond) { | 2523 Condition cond) { |
2770 ASSERT(scratch != PC); | 2524 ASSERT(scratch != PC); |
2771 ASSERT(scratch != IP); | 2525 ASSERT(scratch != IP); |
2772 if (!vmovd(dd, value, cond)) { | 2526 if (!vmovd(dd, value, cond)) { |
2773 // A scratch register and IP are needed to load an arbitrary double. | 2527 // A scratch register and IP are needed to load an arbitrary double. |
2774 ASSERT(scratch != kNoRegister); | 2528 ASSERT(scratch != kNoRegister); |
2775 int64_t imm64 = bit_cast<int64_t, double>(value); | 2529 int64_t imm64 = bit_cast<int64_t, double>(value); |
2776 LoadImmediate(IP, Utils::Low32Bits(imm64), cond); | 2530 LoadImmediate(IP, Utils::Low32Bits(imm64), cond); |
2777 LoadImmediate(scratch, Utils::High32Bits(imm64), cond); | 2531 LoadImmediate(scratch, Utils::High32Bits(imm64), cond); |
2778 vmovdrr(dd, IP, scratch, cond); | 2532 vmovdrr(dd, IP, scratch, cond); |
2779 } | 2533 } |
2780 } | 2534 } |
2781 | 2535 |
2782 | |
2783 void Assembler::LoadFromOffset(OperandSize size, | 2536 void Assembler::LoadFromOffset(OperandSize size, |
2784 Register reg, | 2537 Register reg, |
2785 Register base, | 2538 Register base, |
2786 int32_t offset, | 2539 int32_t offset, |
2787 Condition cond) { | 2540 Condition cond) { |
2788 ASSERT(size != kWordPair); | 2541 ASSERT(size != kWordPair); |
2789 int32_t offset_mask = 0; | 2542 int32_t offset_mask = 0; |
2790 if (!Address::CanHoldLoadOffset(size, offset, &offset_mask)) { | 2543 if (!Address::CanHoldLoadOffset(size, offset, &offset_mask)) { |
2791 ASSERT(base != IP); | 2544 ASSERT(base != IP); |
2792 AddImmediate(IP, base, offset & ~offset_mask, cond); | 2545 AddImmediate(IP, base, offset & ~offset_mask, cond); |
(...skipping 14 matching lines...) Expand all Loading... |
2807 ldrh(reg, Address(base, offset), cond); | 2560 ldrh(reg, Address(base, offset), cond); |
2808 break; | 2561 break; |
2809 case kWord: | 2562 case kWord: |
2810 ldr(reg, Address(base, offset), cond); | 2563 ldr(reg, Address(base, offset), cond); |
2811 break; | 2564 break; |
2812 default: | 2565 default: |
2813 UNREACHABLE(); | 2566 UNREACHABLE(); |
2814 } | 2567 } |
2815 } | 2568 } |
2816 | 2569 |
2817 | |
2818 void Assembler::StoreToOffset(OperandSize size, | 2570 void Assembler::StoreToOffset(OperandSize size, |
2819 Register reg, | 2571 Register reg, |
2820 Register base, | 2572 Register base, |
2821 int32_t offset, | 2573 int32_t offset, |
2822 Condition cond) { | 2574 Condition cond) { |
2823 ASSERT(size != kWordPair); | 2575 ASSERT(size != kWordPair); |
2824 int32_t offset_mask = 0; | 2576 int32_t offset_mask = 0; |
2825 if (!Address::CanHoldStoreOffset(size, offset, &offset_mask)) { | 2577 if (!Address::CanHoldStoreOffset(size, offset, &offset_mask)) { |
2826 ASSERT(reg != IP); | 2578 ASSERT(reg != IP); |
2827 ASSERT(base != IP); | 2579 ASSERT(base != IP); |
2828 AddImmediate(IP, base, offset & ~offset_mask, cond); | 2580 AddImmediate(IP, base, offset & ~offset_mask, cond); |
2829 base = IP; | 2581 base = IP; |
2830 offset = offset & offset_mask; | 2582 offset = offset & offset_mask; |
2831 } | 2583 } |
2832 switch (size) { | 2584 switch (size) { |
2833 case kByte: | 2585 case kByte: |
2834 strb(reg, Address(base, offset), cond); | 2586 strb(reg, Address(base, offset), cond); |
2835 break; | 2587 break; |
2836 case kHalfword: | 2588 case kHalfword: |
2837 strh(reg, Address(base, offset), cond); | 2589 strh(reg, Address(base, offset), cond); |
2838 break; | 2590 break; |
2839 case kWord: | 2591 case kWord: |
2840 str(reg, Address(base, offset), cond); | 2592 str(reg, Address(base, offset), cond); |
2841 break; | 2593 break; |
2842 default: | 2594 default: |
2843 UNREACHABLE(); | 2595 UNREACHABLE(); |
2844 } | 2596 } |
2845 } | 2597 } |
2846 | 2598 |
2847 | |
2848 void Assembler::LoadSFromOffset(SRegister reg, | 2599 void Assembler::LoadSFromOffset(SRegister reg, |
2849 Register base, | 2600 Register base, |
2850 int32_t offset, | 2601 int32_t offset, |
2851 Condition cond) { | 2602 Condition cond) { |
2852 int32_t offset_mask = 0; | 2603 int32_t offset_mask = 0; |
2853 if (!Address::CanHoldLoadOffset(kSWord, offset, &offset_mask)) { | 2604 if (!Address::CanHoldLoadOffset(kSWord, offset, &offset_mask)) { |
2854 ASSERT(base != IP); | 2605 ASSERT(base != IP); |
2855 AddImmediate(IP, base, offset & ~offset_mask, cond); | 2606 AddImmediate(IP, base, offset & ~offset_mask, cond); |
2856 base = IP; | 2607 base = IP; |
2857 offset = offset & offset_mask; | 2608 offset = offset & offset_mask; |
2858 } | 2609 } |
2859 vldrs(reg, Address(base, offset), cond); | 2610 vldrs(reg, Address(base, offset), cond); |
2860 } | 2611 } |
2861 | 2612 |
2862 | |
2863 void Assembler::StoreSToOffset(SRegister reg, | 2613 void Assembler::StoreSToOffset(SRegister reg, |
2864 Register base, | 2614 Register base, |
2865 int32_t offset, | 2615 int32_t offset, |
2866 Condition cond) { | 2616 Condition cond) { |
2867 int32_t offset_mask = 0; | 2617 int32_t offset_mask = 0; |
2868 if (!Address::CanHoldStoreOffset(kSWord, offset, &offset_mask)) { | 2618 if (!Address::CanHoldStoreOffset(kSWord, offset, &offset_mask)) { |
2869 ASSERT(base != IP); | 2619 ASSERT(base != IP); |
2870 AddImmediate(IP, base, offset & ~offset_mask, cond); | 2620 AddImmediate(IP, base, offset & ~offset_mask, cond); |
2871 base = IP; | 2621 base = IP; |
2872 offset = offset & offset_mask; | 2622 offset = offset & offset_mask; |
2873 } | 2623 } |
2874 vstrs(reg, Address(base, offset), cond); | 2624 vstrs(reg, Address(base, offset), cond); |
2875 } | 2625 } |
2876 | 2626 |
2877 | |
2878 void Assembler::LoadDFromOffset(DRegister reg, | 2627 void Assembler::LoadDFromOffset(DRegister reg, |
2879 Register base, | 2628 Register base, |
2880 int32_t offset, | 2629 int32_t offset, |
2881 Condition cond) { | 2630 Condition cond) { |
2882 int32_t offset_mask = 0; | 2631 int32_t offset_mask = 0; |
2883 if (!Address::CanHoldLoadOffset(kDWord, offset, &offset_mask)) { | 2632 if (!Address::CanHoldLoadOffset(kDWord, offset, &offset_mask)) { |
2884 ASSERT(base != IP); | 2633 ASSERT(base != IP); |
2885 AddImmediate(IP, base, offset & ~offset_mask, cond); | 2634 AddImmediate(IP, base, offset & ~offset_mask, cond); |
2886 base = IP; | 2635 base = IP; |
2887 offset = offset & offset_mask; | 2636 offset = offset & offset_mask; |
2888 } | 2637 } |
2889 vldrd(reg, Address(base, offset), cond); | 2638 vldrd(reg, Address(base, offset), cond); |
2890 } | 2639 } |
2891 | 2640 |
2892 | |
2893 void Assembler::StoreDToOffset(DRegister reg, | 2641 void Assembler::StoreDToOffset(DRegister reg, |
2894 Register base, | 2642 Register base, |
2895 int32_t offset, | 2643 int32_t offset, |
2896 Condition cond) { | 2644 Condition cond) { |
2897 int32_t offset_mask = 0; | 2645 int32_t offset_mask = 0; |
2898 if (!Address::CanHoldStoreOffset(kDWord, offset, &offset_mask)) { | 2646 if (!Address::CanHoldStoreOffset(kDWord, offset, &offset_mask)) { |
2899 ASSERT(base != IP); | 2647 ASSERT(base != IP); |
2900 AddImmediate(IP, base, offset & ~offset_mask, cond); | 2648 AddImmediate(IP, base, offset & ~offset_mask, cond); |
2901 base = IP; | 2649 base = IP; |
2902 offset = offset & offset_mask; | 2650 offset = offset & offset_mask; |
2903 } | 2651 } |
2904 vstrd(reg, Address(base, offset), cond); | 2652 vstrd(reg, Address(base, offset), cond); |
2905 } | 2653 } |
2906 | 2654 |
2907 | |
2908 void Assembler::LoadMultipleDFromOffset(DRegister first, | 2655 void Assembler::LoadMultipleDFromOffset(DRegister first, |
2909 intptr_t count, | 2656 intptr_t count, |
2910 Register base, | 2657 Register base, |
2911 int32_t offset) { | 2658 int32_t offset) { |
2912 ASSERT(base != IP); | 2659 ASSERT(base != IP); |
2913 AddImmediate(IP, base, offset); | 2660 AddImmediate(IP, base, offset); |
2914 vldmd(IA, IP, first, count); | 2661 vldmd(IA, IP, first, count); |
2915 } | 2662 } |
2916 | 2663 |
2917 | |
2918 void Assembler::StoreMultipleDToOffset(DRegister first, | 2664 void Assembler::StoreMultipleDToOffset(DRegister first, |
2919 intptr_t count, | 2665 intptr_t count, |
2920 Register base, | 2666 Register base, |
2921 int32_t offset) { | 2667 int32_t offset) { |
2922 ASSERT(base != IP); | 2668 ASSERT(base != IP); |
2923 AddImmediate(IP, base, offset); | 2669 AddImmediate(IP, base, offset); |
2924 vstmd(IA, IP, first, count); | 2670 vstmd(IA, IP, first, count); |
2925 } | 2671 } |
2926 | 2672 |
2927 | |
2928 void Assembler::CopyDoubleField(Register dst, | 2673 void Assembler::CopyDoubleField(Register dst, |
2929 Register src, | 2674 Register src, |
2930 Register tmp1, | 2675 Register tmp1, |
2931 Register tmp2, | 2676 Register tmp2, |
2932 DRegister dtmp) { | 2677 DRegister dtmp) { |
2933 if (TargetCPUFeatures::vfp_supported()) { | 2678 if (TargetCPUFeatures::vfp_supported()) { |
2934 LoadDFromOffset(dtmp, src, Double::value_offset() - kHeapObjectTag); | 2679 LoadDFromOffset(dtmp, src, Double::value_offset() - kHeapObjectTag); |
2935 StoreDToOffset(dtmp, dst, Double::value_offset() - kHeapObjectTag); | 2680 StoreDToOffset(dtmp, dst, Double::value_offset() - kHeapObjectTag); |
2936 } else { | 2681 } else { |
2937 LoadFromOffset(kWord, tmp1, src, Double::value_offset() - kHeapObjectTag); | 2682 LoadFromOffset(kWord, tmp1, src, Double::value_offset() - kHeapObjectTag); |
2938 LoadFromOffset(kWord, tmp2, src, | 2683 LoadFromOffset(kWord, tmp2, src, |
2939 Double::value_offset() + kWordSize - kHeapObjectTag); | 2684 Double::value_offset() + kWordSize - kHeapObjectTag); |
2940 StoreToOffset(kWord, tmp1, dst, Double::value_offset() - kHeapObjectTag); | 2685 StoreToOffset(kWord, tmp1, dst, Double::value_offset() - kHeapObjectTag); |
2941 StoreToOffset(kWord, tmp2, dst, | 2686 StoreToOffset(kWord, tmp2, dst, |
2942 Double::value_offset() + kWordSize - kHeapObjectTag); | 2687 Double::value_offset() + kWordSize - kHeapObjectTag); |
2943 } | 2688 } |
2944 } | 2689 } |
2945 | 2690 |
2946 | |
2947 void Assembler::CopyFloat32x4Field(Register dst, | 2691 void Assembler::CopyFloat32x4Field(Register dst, |
2948 Register src, | 2692 Register src, |
2949 Register tmp1, | 2693 Register tmp1, |
2950 Register tmp2, | 2694 Register tmp2, |
2951 DRegister dtmp) { | 2695 DRegister dtmp) { |
2952 if (TargetCPUFeatures::neon_supported()) { | 2696 if (TargetCPUFeatures::neon_supported()) { |
2953 LoadMultipleDFromOffset(dtmp, 2, src, | 2697 LoadMultipleDFromOffset(dtmp, 2, src, |
2954 Float32x4::value_offset() - kHeapObjectTag); | 2698 Float32x4::value_offset() - kHeapObjectTag); |
2955 StoreMultipleDToOffset(dtmp, 2, dst, | 2699 StoreMultipleDToOffset(dtmp, 2, dst, |
2956 Float32x4::value_offset() - kHeapObjectTag); | 2700 Float32x4::value_offset() - kHeapObjectTag); |
(...skipping 15 matching lines...) Expand all Loading... |
2972 LoadFromOffset( | 2716 LoadFromOffset( |
2973 kWord, tmp2, src, | 2717 kWord, tmp2, src, |
2974 (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag); | 2718 (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag); |
2975 StoreToOffset(kWord, tmp1, dst, | 2719 StoreToOffset(kWord, tmp1, dst, |
2976 (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag); | 2720 (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag); |
2977 StoreToOffset(kWord, tmp2, dst, | 2721 StoreToOffset(kWord, tmp2, dst, |
2978 (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag); | 2722 (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag); |
2979 } | 2723 } |
2980 } | 2724 } |
2981 | 2725 |
2982 | |
2983 void Assembler::CopyFloat64x2Field(Register dst, | 2726 void Assembler::CopyFloat64x2Field(Register dst, |
2984 Register src, | 2727 Register src, |
2985 Register tmp1, | 2728 Register tmp1, |
2986 Register tmp2, | 2729 Register tmp2, |
2987 DRegister dtmp) { | 2730 DRegister dtmp) { |
2988 if (TargetCPUFeatures::neon_supported()) { | 2731 if (TargetCPUFeatures::neon_supported()) { |
2989 LoadMultipleDFromOffset(dtmp, 2, src, | 2732 LoadMultipleDFromOffset(dtmp, 2, src, |
2990 Float64x2::value_offset() - kHeapObjectTag); | 2733 Float64x2::value_offset() - kHeapObjectTag); |
2991 StoreMultipleDToOffset(dtmp, 2, dst, | 2734 StoreMultipleDToOffset(dtmp, 2, dst, |
2992 Float64x2::value_offset() - kHeapObjectTag); | 2735 Float64x2::value_offset() - kHeapObjectTag); |
(...skipping 15 matching lines...) Expand all Loading... |
3008 LoadFromOffset( | 2751 LoadFromOffset( |
3009 kWord, tmp2, src, | 2752 kWord, tmp2, src, |
3010 (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag); | 2753 (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag); |
3011 StoreToOffset(kWord, tmp1, dst, | 2754 StoreToOffset(kWord, tmp1, dst, |
3012 (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag); | 2755 (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag); |
3013 StoreToOffset(kWord, tmp2, dst, | 2756 StoreToOffset(kWord, tmp2, dst, |
3014 (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag); | 2757 (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag); |
3015 } | 2758 } |
3016 } | 2759 } |
3017 | 2760 |
3018 | |
3019 void Assembler::AddImmediate(Register rd, | 2761 void Assembler::AddImmediate(Register rd, |
3020 Register rn, | 2762 Register rn, |
3021 int32_t value, | 2763 int32_t value, |
3022 Condition cond) { | 2764 Condition cond) { |
3023 if (value == 0) { | 2765 if (value == 0) { |
3024 if (rd != rn) { | 2766 if (rd != rn) { |
3025 mov(rd, Operand(rn), cond); | 2767 mov(rd, Operand(rn), cond); |
3026 } | 2768 } |
3027 return; | 2769 return; |
3028 } | 2770 } |
(...skipping 16 matching lines...) Expand all Loading... |
3045 } else if (value > 0) { | 2787 } else if (value > 0) { |
3046 LoadDecodableImmediate(IP, value, cond); | 2788 LoadDecodableImmediate(IP, value, cond); |
3047 add(rd, rn, Operand(IP), cond); | 2789 add(rd, rn, Operand(IP), cond); |
3048 } else { | 2790 } else { |
3049 LoadDecodableImmediate(IP, -value, cond); | 2791 LoadDecodableImmediate(IP, -value, cond); |
3050 sub(rd, rn, Operand(IP), cond); | 2792 sub(rd, rn, Operand(IP), cond); |
3051 } | 2793 } |
3052 } | 2794 } |
3053 } | 2795 } |
3054 | 2796 |
3055 | |
3056 void Assembler::AddImmediateSetFlags(Register rd, | 2797 void Assembler::AddImmediateSetFlags(Register rd, |
3057 Register rn, | 2798 Register rn, |
3058 int32_t value, | 2799 int32_t value, |
3059 Condition cond) { | 2800 Condition cond) { |
3060 Operand o; | 2801 Operand o; |
3061 if (Operand::CanHold(value, &o)) { | 2802 if (Operand::CanHold(value, &o)) { |
3062 // Handles value == kMinInt32. | 2803 // Handles value == kMinInt32. |
3063 adds(rd, rn, o, cond); | 2804 adds(rd, rn, o, cond); |
3064 } else if (Operand::CanHold(-value, &o)) { | 2805 } else if (Operand::CanHold(-value, &o)) { |
3065 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. | 2806 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
3066 subs(rd, rn, o, cond); | 2807 subs(rd, rn, o, cond); |
3067 } else { | 2808 } else { |
3068 ASSERT(rn != IP); | 2809 ASSERT(rn != IP); |
3069 if (Operand::CanHold(~value, &o)) { | 2810 if (Operand::CanHold(~value, &o)) { |
3070 mvn(IP, o, cond); | 2811 mvn(IP, o, cond); |
3071 adds(rd, rn, Operand(IP), cond); | 2812 adds(rd, rn, Operand(IP), cond); |
3072 } else if (Operand::CanHold(~(-value), &o)) { | 2813 } else if (Operand::CanHold(~(-value), &o)) { |
3073 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. | 2814 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
3074 mvn(IP, o, cond); | 2815 mvn(IP, o, cond); |
3075 subs(rd, rn, Operand(IP), cond); | 2816 subs(rd, rn, Operand(IP), cond); |
3076 } else { | 2817 } else { |
3077 LoadDecodableImmediate(IP, value, cond); | 2818 LoadDecodableImmediate(IP, value, cond); |
3078 adds(rd, rn, Operand(IP), cond); | 2819 adds(rd, rn, Operand(IP), cond); |
3079 } | 2820 } |
3080 } | 2821 } |
3081 } | 2822 } |
3082 | 2823 |
3083 | |
3084 void Assembler::SubImmediateSetFlags(Register rd, | 2824 void Assembler::SubImmediateSetFlags(Register rd, |
3085 Register rn, | 2825 Register rn, |
3086 int32_t value, | 2826 int32_t value, |
3087 Condition cond) { | 2827 Condition cond) { |
3088 Operand o; | 2828 Operand o; |
3089 if (Operand::CanHold(value, &o)) { | 2829 if (Operand::CanHold(value, &o)) { |
3090 // Handles value == kMinInt32. | 2830 // Handles value == kMinInt32. |
3091 subs(rd, rn, o, cond); | 2831 subs(rd, rn, o, cond); |
3092 } else if (Operand::CanHold(-value, &o)) { | 2832 } else if (Operand::CanHold(-value, &o)) { |
3093 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. | 2833 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
3094 adds(rd, rn, o, cond); | 2834 adds(rd, rn, o, cond); |
3095 } else { | 2835 } else { |
3096 ASSERT(rn != IP); | 2836 ASSERT(rn != IP); |
3097 if (Operand::CanHold(~value, &o)) { | 2837 if (Operand::CanHold(~value, &o)) { |
3098 mvn(IP, o, cond); | 2838 mvn(IP, o, cond); |
3099 subs(rd, rn, Operand(IP), cond); | 2839 subs(rd, rn, Operand(IP), cond); |
3100 } else if (Operand::CanHold(~(-value), &o)) { | 2840 } else if (Operand::CanHold(~(-value), &o)) { |
3101 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. | 2841 ASSERT(value != kMinInt32); // Would cause erroneous overflow detection. |
3102 mvn(IP, o, cond); | 2842 mvn(IP, o, cond); |
3103 adds(rd, rn, Operand(IP), cond); | 2843 adds(rd, rn, Operand(IP), cond); |
3104 } else { | 2844 } else { |
3105 LoadDecodableImmediate(IP, value, cond); | 2845 LoadDecodableImmediate(IP, value, cond); |
3106 subs(rd, rn, Operand(IP), cond); | 2846 subs(rd, rn, Operand(IP), cond); |
3107 } | 2847 } |
3108 } | 2848 } |
3109 } | 2849 } |
3110 | 2850 |
3111 | |
3112 void Assembler::AndImmediate(Register rd, | 2851 void Assembler::AndImmediate(Register rd, |
3113 Register rs, | 2852 Register rs, |
3114 int32_t imm, | 2853 int32_t imm, |
3115 Condition cond) { | 2854 Condition cond) { |
3116 Operand o; | 2855 Operand o; |
3117 if (Operand::CanHold(imm, &o)) { | 2856 if (Operand::CanHold(imm, &o)) { |
3118 and_(rd, rs, Operand(o), cond); | 2857 and_(rd, rs, Operand(o), cond); |
3119 } else { | 2858 } else { |
3120 LoadImmediate(TMP, imm, cond); | 2859 LoadImmediate(TMP, imm, cond); |
3121 and_(rd, rs, Operand(TMP), cond); | 2860 and_(rd, rs, Operand(TMP), cond); |
3122 } | 2861 } |
3123 } | 2862 } |
3124 | 2863 |
3125 | |
3126 void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) { | 2864 void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) { |
3127 Operand o; | 2865 Operand o; |
3128 if (Operand::CanHold(value, &o)) { | 2866 if (Operand::CanHold(value, &o)) { |
3129 cmp(rn, o, cond); | 2867 cmp(rn, o, cond); |
3130 } else { | 2868 } else { |
3131 ASSERT(rn != IP); | 2869 ASSERT(rn != IP); |
3132 LoadImmediate(IP, value, cond); | 2870 LoadImmediate(IP, value, cond); |
3133 cmp(rn, Operand(IP), cond); | 2871 cmp(rn, Operand(IP), cond); |
3134 } | 2872 } |
3135 } | 2873 } |
3136 | 2874 |
3137 | |
3138 void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) { | 2875 void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) { |
3139 Operand o; | 2876 Operand o; |
3140 if (Operand::CanHold(imm, &o)) { | 2877 if (Operand::CanHold(imm, &o)) { |
3141 tst(rn, o, cond); | 2878 tst(rn, o, cond); |
3142 } else { | 2879 } else { |
3143 LoadImmediate(IP, imm); | 2880 LoadImmediate(IP, imm); |
3144 tst(rn, Operand(IP), cond); | 2881 tst(rn, Operand(IP), cond); |
3145 } | 2882 } |
3146 } | 2883 } |
3147 | 2884 |
(...skipping 12 matching lines...) Expand all Loading... |
3160 vmovsr(stmpl, left); | 2897 vmovsr(stmpl, left); |
3161 vcvtdi(tmpl, stmpl); // left is in tmpl. | 2898 vcvtdi(tmpl, stmpl); // left is in tmpl. |
3162 vmovsr(stmpr, right); | 2899 vmovsr(stmpr, right); |
3163 vcvtdi(tmpr, stmpr); // right is in tmpr. | 2900 vcvtdi(tmpr, stmpr); // right is in tmpr. |
3164 vdivd(tmpr, tmpl, tmpr); | 2901 vdivd(tmpr, tmpl, tmpr); |
3165 vcvtid(stmpr, tmpr); | 2902 vcvtid(stmpr, tmpr); |
3166 vmovrs(result, stmpr); | 2903 vmovrs(result, stmpr); |
3167 } | 2904 } |
3168 } | 2905 } |
3169 | 2906 |
3170 | |
3171 static int NumRegsBelowFP(RegList regs) { | 2907 static int NumRegsBelowFP(RegList regs) { |
3172 int count = 0; | 2908 int count = 0; |
3173 for (int i = 0; i < FP; i++) { | 2909 for (int i = 0; i < FP; i++) { |
3174 if ((regs & (1 << i)) != 0) { | 2910 if ((regs & (1 << i)) != 0) { |
3175 count++; | 2911 count++; |
3176 } | 2912 } |
3177 } | 2913 } |
3178 return count; | 2914 return count; |
3179 } | 2915 } |
3180 | 2916 |
3181 | |
3182 void Assembler::EnterFrame(RegList regs, intptr_t frame_size) { | 2917 void Assembler::EnterFrame(RegList regs, intptr_t frame_size) { |
3183 if (prologue_offset_ == -1) { | 2918 if (prologue_offset_ == -1) { |
3184 prologue_offset_ = CodeSize(); | 2919 prologue_offset_ = CodeSize(); |
3185 } | 2920 } |
3186 PushList(regs); | 2921 PushList(regs); |
3187 if ((regs & (1 << FP)) != 0) { | 2922 if ((regs & (1 << FP)) != 0) { |
3188 // Set FP to the saved previous FP. | 2923 // Set FP to the saved previous FP. |
3189 add(FP, SP, Operand(4 * NumRegsBelowFP(regs))); | 2924 add(FP, SP, Operand(4 * NumRegsBelowFP(regs))); |
3190 } | 2925 } |
3191 if (frame_size != 0) { | 2926 if (frame_size != 0) { |
3192 AddImmediate(SP, -frame_size); | 2927 AddImmediate(SP, -frame_size); |
3193 } | 2928 } |
3194 } | 2929 } |
3195 | 2930 |
3196 | |
3197 void Assembler::LeaveFrame(RegList regs) { | 2931 void Assembler::LeaveFrame(RegList regs) { |
3198 ASSERT((regs & (1 << PC)) == 0); // Must not pop PC. | 2932 ASSERT((regs & (1 << PC)) == 0); // Must not pop PC. |
3199 if ((regs & (1 << FP)) != 0) { | 2933 if ((regs & (1 << FP)) != 0) { |
3200 // Use FP to set SP. | 2934 // Use FP to set SP. |
3201 sub(SP, FP, Operand(4 * NumRegsBelowFP(regs))); | 2935 sub(SP, FP, Operand(4 * NumRegsBelowFP(regs))); |
3202 } | 2936 } |
3203 PopList(regs); | 2937 PopList(regs); |
3204 } | 2938 } |
3205 | 2939 |
3206 | |
3207 void Assembler::Ret() { | 2940 void Assembler::Ret() { |
3208 bx(LR); | 2941 bx(LR); |
3209 } | 2942 } |
3210 | 2943 |
3211 | |
3212 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { | 2944 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { |
3213 // Reserve space for arguments and align frame before entering | 2945 // Reserve space for arguments and align frame before entering |
3214 // the C++ world. | 2946 // the C++ world. |
3215 AddImmediate(SP, -frame_space); | 2947 AddImmediate(SP, -frame_space); |
3216 if (OS::ActivationFrameAlignment() > 1) { | 2948 if (OS::ActivationFrameAlignment() > 1) { |
3217 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1)); | 2949 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1)); |
3218 } | 2950 } |
3219 } | 2951 } |
3220 | 2952 |
3221 | |
3222 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { | 2953 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { |
3223 Comment("EnterCallRuntimeFrame"); | 2954 Comment("EnterCallRuntimeFrame"); |
3224 // Preserve volatile CPU registers and PP. | 2955 // Preserve volatile CPU registers and PP. |
3225 EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP), 0); | 2956 EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP), 0); |
3226 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0); | 2957 COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0); |
3227 | 2958 |
3228 // Preserve all volatile FPU registers. | 2959 // Preserve all volatile FPU registers. |
3229 if (TargetCPUFeatures::vfp_supported()) { | 2960 if (TargetCPUFeatures::vfp_supported()) { |
3230 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg); | 2961 DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg); |
3231 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg); | 2962 DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg); |
3232 if ((lastv - firstv + 1) >= 16) { | 2963 if ((lastv - firstv + 1) >= 16) { |
3233 DRegister mid = static_cast<DRegister>(firstv + 16); | 2964 DRegister mid = static_cast<DRegister>(firstv + 16); |
3234 vstmd(DB_W, SP, mid, lastv - mid + 1); | 2965 vstmd(DB_W, SP, mid, lastv - mid + 1); |
3235 vstmd(DB_W, SP, firstv, 16); | 2966 vstmd(DB_W, SP, firstv, 16); |
3236 } else { | 2967 } else { |
3237 vstmd(DB_W, SP, firstv, lastv - firstv + 1); | 2968 vstmd(DB_W, SP, firstv, lastv - firstv + 1); |
3238 } | 2969 } |
3239 } | 2970 } |
3240 | 2971 |
3241 LoadPoolPointer(); | 2972 LoadPoolPointer(); |
3242 | 2973 |
3243 ReserveAlignedFrameSpace(frame_space); | 2974 ReserveAlignedFrameSpace(frame_space); |
3244 } | 2975 } |
3245 | 2976 |
3246 | |
3247 void Assembler::LeaveCallRuntimeFrame() { | 2977 void Assembler::LeaveCallRuntimeFrame() { |
3248 // SP might have been modified to reserve space for arguments | 2978 // SP might have been modified to reserve space for arguments |
3249 // and ensure proper alignment of the stack frame. | 2979 // and ensure proper alignment of the stack frame. |
3250 // We need to restore it before restoring registers. | 2980 // We need to restore it before restoring registers. |
3251 const intptr_t kPushedFpuRegisterSize = | 2981 const intptr_t kPushedFpuRegisterSize = |
3252 TargetCPUFeatures::vfp_supported() | 2982 TargetCPUFeatures::vfp_supported() |
3253 ? kDartVolatileFpuRegCount * kFpuRegisterSize | 2983 ? kDartVolatileFpuRegCount * kFpuRegisterSize |
3254 : 0; | 2984 : 0; |
3255 | 2985 |
3256 COMPILE_ASSERT(PP < FP); | 2986 COMPILE_ASSERT(PP < FP); |
(...skipping 14 matching lines...) Expand all Loading... |
3271 vldmd(IA_W, SP, mid, lastv - mid + 1); | 3001 vldmd(IA_W, SP, mid, lastv - mid + 1); |
3272 } else { | 3002 } else { |
3273 vldmd(IA_W, SP, firstv, lastv - firstv + 1); | 3003 vldmd(IA_W, SP, firstv, lastv - firstv + 1); |
3274 } | 3004 } |
3275 } | 3005 } |
3276 | 3006 |
3277 // Restore volatile CPU registers. | 3007 // Restore volatile CPU registers. |
3278 LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP)); | 3008 LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP)); |
3279 } | 3009 } |
3280 | 3010 |
3281 | |
3282 void Assembler::CallRuntime(const RuntimeEntry& entry, | 3011 void Assembler::CallRuntime(const RuntimeEntry& entry, |
3283 intptr_t argument_count) { | 3012 intptr_t argument_count) { |
3284 entry.Call(this, argument_count); | 3013 entry.Call(this, argument_count); |
3285 } | 3014 } |
3286 | 3015 |
3287 | |
3288 void Assembler::EnterDartFrame(intptr_t frame_size) { | 3016 void Assembler::EnterDartFrame(intptr_t frame_size) { |
3289 ASSERT(!constant_pool_allowed()); | 3017 ASSERT(!constant_pool_allowed()); |
3290 | 3018 |
3291 // Registers are pushed in descending order: R5 | R6 | R7/R11 | R14. | 3019 // Registers are pushed in descending order: R5 | R6 | R7/R11 | R14. |
3292 COMPILE_ASSERT(PP < CODE_REG); | 3020 COMPILE_ASSERT(PP < CODE_REG); |
3293 COMPILE_ASSERT(CODE_REG < FP); | 3021 COMPILE_ASSERT(CODE_REG < FP); |
3294 COMPILE_ASSERT(FP < LR); | 3022 COMPILE_ASSERT(FP < LR); |
3295 EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0); | 3023 EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0); |
3296 | 3024 |
3297 // Setup pool pointer for this dart function. | 3025 // Setup pool pointer for this dart function. |
3298 LoadPoolPointer(); | 3026 LoadPoolPointer(); |
3299 | 3027 |
3300 // Reserve space for locals. | 3028 // Reserve space for locals. |
3301 AddImmediate(SP, -frame_size); | 3029 AddImmediate(SP, -frame_size); |
3302 } | 3030 } |
3303 | 3031 |
3304 | |
3305 // On entry to a function compiled for OSR, the caller's frame pointer, the | 3032 // On entry to a function compiled for OSR, the caller's frame pointer, the |
3306 // stack locals, and any copied parameters are already in place. The frame | 3033 // stack locals, and any copied parameters are already in place. The frame |
3307 // pointer is already set up. The PC marker is not correct for the | 3034 // pointer is already set up. The PC marker is not correct for the |
3308 // optimized function and there may be extra space for spill slots to | 3035 // optimized function and there may be extra space for spill slots to |
3309 // allocate. We must also set up the pool pointer for the function. | 3036 // allocate. We must also set up the pool pointer for the function. |
3310 void Assembler::EnterOsrFrame(intptr_t extra_size) { | 3037 void Assembler::EnterOsrFrame(intptr_t extra_size) { |
3311 ASSERT(!constant_pool_allowed()); | 3038 ASSERT(!constant_pool_allowed()); |
3312 Comment("EnterOsrFrame"); | 3039 Comment("EnterOsrFrame"); |
3313 RestoreCodePointer(); | 3040 RestoreCodePointer(); |
3314 LoadPoolPointer(); | 3041 LoadPoolPointer(); |
3315 | 3042 |
3316 AddImmediate(SP, -extra_size); | 3043 AddImmediate(SP, -extra_size); |
3317 } | 3044 } |
3318 | 3045 |
3319 | |
3320 void Assembler::LeaveDartFrame(RestorePP restore_pp) { | 3046 void Assembler::LeaveDartFrame(RestorePP restore_pp) { |
3321 if (restore_pp == kRestoreCallerPP) { | 3047 if (restore_pp == kRestoreCallerPP) { |
3322 ldr(PP, Address(FP, kSavedCallerPpSlotFromFp * kWordSize)); | 3048 ldr(PP, Address(FP, kSavedCallerPpSlotFromFp * kWordSize)); |
3323 set_constant_pool_allowed(false); | 3049 set_constant_pool_allowed(false); |
3324 } | 3050 } |
3325 | 3051 |
3326 // This will implicitly drop saved PP, PC marker due to restoring SP from FP | 3052 // This will implicitly drop saved PP, PC marker due to restoring SP from FP |
3327 // first. | 3053 // first. |
3328 LeaveFrame((1 << FP) | (1 << LR)); | 3054 LeaveFrame((1 << FP) | (1 << LR)); |
3329 } | 3055 } |
3330 | 3056 |
3331 | |
3332 void Assembler::EnterStubFrame() { | 3057 void Assembler::EnterStubFrame() { |
3333 EnterDartFrame(0); | 3058 EnterDartFrame(0); |
3334 } | 3059 } |
3335 | 3060 |
3336 | |
3337 void Assembler::LeaveStubFrame() { | 3061 void Assembler::LeaveStubFrame() { |
3338 LeaveDartFrame(); | 3062 LeaveDartFrame(); |
3339 } | 3063 } |
3340 | 3064 |
3341 | |
3342 // R0 receiver, R9 guarded cid as Smi | 3065 // R0 receiver, R9 guarded cid as Smi |
3343 void Assembler::MonomorphicCheckedEntry() { | 3066 void Assembler::MonomorphicCheckedEntry() { |
3344 ASSERT(has_single_entry_point_); | 3067 ASSERT(has_single_entry_point_); |
3345 has_single_entry_point_ = false; | 3068 has_single_entry_point_ = false; |
3346 #if defined(TESTING) || defined(DEBUG) | 3069 #if defined(TESTING) || defined(DEBUG) |
3347 bool saved_use_far_branches = use_far_branches(); | 3070 bool saved_use_far_branches = use_far_branches(); |
3348 set_use_far_branches(false); | 3071 set_use_far_branches(false); |
3349 #endif | 3072 #endif |
3350 | 3073 |
3351 Label miss; | 3074 Label miss; |
3352 Bind(&miss); | 3075 Bind(&miss); |
3353 ldr(IP, Address(THR, Thread::monomorphic_miss_entry_offset())); | 3076 ldr(IP, Address(THR, Thread::monomorphic_miss_entry_offset())); |
3354 bx(IP); | 3077 bx(IP); |
3355 | 3078 |
3356 Comment("MonomorphicCheckedEntry"); | 3079 Comment("MonomorphicCheckedEntry"); |
3357 ASSERT(CodeSize() == Instructions::kCheckedEntryOffset); | 3080 ASSERT(CodeSize() == Instructions::kCheckedEntryOffset); |
3358 LoadClassIdMayBeSmi(R4, R0); | 3081 LoadClassIdMayBeSmi(R4, R0); |
3359 SmiUntag(R9); | 3082 SmiUntag(R9); |
3360 cmp(R4, Operand(R9)); | 3083 cmp(R4, Operand(R9)); |
3361 b(&miss, NE); | 3084 b(&miss, NE); |
3362 | 3085 |
3363 // Fall through to unchecked entry. | 3086 // Fall through to unchecked entry. |
3364 ASSERT(CodeSize() == Instructions::kUncheckedEntryOffset); | 3087 ASSERT(CodeSize() == Instructions::kUncheckedEntryOffset); |
3365 | 3088 |
3366 #if defined(TESTING) || defined(DEBUG) | 3089 #if defined(TESTING) || defined(DEBUG) |
3367 set_use_far_branches(saved_use_far_branches); | 3090 set_use_far_branches(saved_use_far_branches); |
3368 #endif | 3091 #endif |
3369 } | 3092 } |
3370 | 3093 |
3371 | |
3372 #ifndef PRODUCT | 3094 #ifndef PRODUCT |
3373 void Assembler::MaybeTraceAllocation(intptr_t cid, | 3095 void Assembler::MaybeTraceAllocation(intptr_t cid, |
3374 Register temp_reg, | 3096 Register temp_reg, |
3375 Label* trace) { | 3097 Label* trace) { |
3376 LoadAllocationStatsAddress(temp_reg, cid); | 3098 LoadAllocationStatsAddress(temp_reg, cid); |
3377 const uword state_offset = ClassHeapStats::state_offset(); | 3099 const uword state_offset = ClassHeapStats::state_offset(); |
3378 ldr(temp_reg, Address(temp_reg, state_offset)); | 3100 ldr(temp_reg, Address(temp_reg, state_offset)); |
3379 tst(temp_reg, Operand(ClassHeapStats::TraceAllocationMask())); | 3101 tst(temp_reg, Operand(ClassHeapStats::TraceAllocationMask())); |
3380 b(trace, NE); | 3102 b(trace, NE); |
3381 } | 3103 } |
3382 | 3104 |
3383 | |
3384 void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid) { | 3105 void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid) { |
3385 ASSERT(dest != kNoRegister); | 3106 ASSERT(dest != kNoRegister); |
3386 ASSERT(dest != TMP); | 3107 ASSERT(dest != TMP); |
3387 ASSERT(cid > 0); | 3108 ASSERT(cid > 0); |
3388 const intptr_t class_offset = ClassTable::ClassOffsetFor(cid); | 3109 const intptr_t class_offset = ClassTable::ClassOffsetFor(cid); |
3389 LoadIsolate(dest); | 3110 LoadIsolate(dest); |
3390 intptr_t table_offset = | 3111 intptr_t table_offset = |
3391 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 3112 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
3392 ldr(dest, Address(dest, table_offset)); | 3113 ldr(dest, Address(dest, table_offset)); |
3393 AddImmediate(dest, class_offset); | 3114 AddImmediate(dest, class_offset); |
3394 } | 3115 } |
3395 | 3116 |
3396 | |
3397 void Assembler::IncrementAllocationStats(Register stats_addr_reg, | 3117 void Assembler::IncrementAllocationStats(Register stats_addr_reg, |
3398 intptr_t cid, | 3118 intptr_t cid, |
3399 Heap::Space space) { | 3119 Heap::Space space) { |
3400 ASSERT(stats_addr_reg != kNoRegister); | 3120 ASSERT(stats_addr_reg != kNoRegister); |
3401 ASSERT(stats_addr_reg != TMP); | 3121 ASSERT(stats_addr_reg != TMP); |
3402 ASSERT(cid > 0); | 3122 ASSERT(cid > 0); |
3403 const uword count_field_offset = | 3123 const uword count_field_offset = |
3404 (space == Heap::kNew) | 3124 (space == Heap::kNew) |
3405 ? ClassHeapStats::allocated_since_gc_new_space_offset() | 3125 ? ClassHeapStats::allocated_since_gc_new_space_offset() |
3406 : ClassHeapStats::allocated_since_gc_old_space_offset(); | 3126 : ClassHeapStats::allocated_since_gc_old_space_offset(); |
3407 const Address& count_address = Address(stats_addr_reg, count_field_offset); | 3127 const Address& count_address = Address(stats_addr_reg, count_field_offset); |
3408 ldr(TMP, count_address); | 3128 ldr(TMP, count_address); |
3409 AddImmediate(TMP, 1); | 3129 AddImmediate(TMP, 1); |
3410 str(TMP, count_address); | 3130 str(TMP, count_address); |
3411 } | 3131 } |
3412 | 3132 |
3413 | |
3414 void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg, | 3133 void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg, |
3415 Register size_reg, | 3134 Register size_reg, |
3416 Heap::Space space) { | 3135 Heap::Space space) { |
3417 ASSERT(stats_addr_reg != kNoRegister); | 3136 ASSERT(stats_addr_reg != kNoRegister); |
3418 ASSERT(stats_addr_reg != TMP); | 3137 ASSERT(stats_addr_reg != TMP); |
3419 const uword count_field_offset = | 3138 const uword count_field_offset = |
3420 (space == Heap::kNew) | 3139 (space == Heap::kNew) |
3421 ? ClassHeapStats::allocated_since_gc_new_space_offset() | 3140 ? ClassHeapStats::allocated_since_gc_new_space_offset() |
3422 : ClassHeapStats::allocated_since_gc_old_space_offset(); | 3141 : ClassHeapStats::allocated_since_gc_old_space_offset(); |
3423 const uword size_field_offset = | 3142 const uword size_field_offset = |
3424 (space == Heap::kNew) | 3143 (space == Heap::kNew) |
3425 ? ClassHeapStats::allocated_size_since_gc_new_space_offset() | 3144 ? ClassHeapStats::allocated_size_since_gc_new_space_offset() |
3426 : ClassHeapStats::allocated_size_since_gc_old_space_offset(); | 3145 : ClassHeapStats::allocated_size_since_gc_old_space_offset(); |
3427 const Address& count_address = Address(stats_addr_reg, count_field_offset); | 3146 const Address& count_address = Address(stats_addr_reg, count_field_offset); |
3428 const Address& size_address = Address(stats_addr_reg, size_field_offset); | 3147 const Address& size_address = Address(stats_addr_reg, size_field_offset); |
3429 ldr(TMP, count_address); | 3148 ldr(TMP, count_address); |
3430 AddImmediate(TMP, 1); | 3149 AddImmediate(TMP, 1); |
3431 str(TMP, count_address); | 3150 str(TMP, count_address); |
3432 ldr(TMP, size_address); | 3151 ldr(TMP, size_address); |
3433 add(TMP, TMP, Operand(size_reg)); | 3152 add(TMP, TMP, Operand(size_reg)); |
3434 str(TMP, size_address); | 3153 str(TMP, size_address); |
3435 } | 3154 } |
3436 #endif // !PRODUCT | 3155 #endif // !PRODUCT |
3437 | 3156 |
3438 | |
3439 void Assembler::TryAllocate(const Class& cls, | 3157 void Assembler::TryAllocate(const Class& cls, |
3440 Label* failure, | 3158 Label* failure, |
3441 Register instance_reg, | 3159 Register instance_reg, |
3442 Register temp_reg) { | 3160 Register temp_reg) { |
3443 ASSERT(failure != NULL); | 3161 ASSERT(failure != NULL); |
3444 if (FLAG_inline_alloc) { | 3162 if (FLAG_inline_alloc) { |
3445 ASSERT(instance_reg != temp_reg); | 3163 ASSERT(instance_reg != temp_reg); |
3446 ASSERT(temp_reg != IP); | 3164 ASSERT(temp_reg != IP); |
3447 const intptr_t instance_size = cls.instance_size(); | 3165 const intptr_t instance_size = cls.instance_size(); |
3448 ASSERT(instance_size != 0); | 3166 ASSERT(instance_size != 0); |
(...skipping 28 matching lines...) Expand all Loading... |
3477 tags = RawObject::ClassIdTag::update(cls.id(), tags); | 3195 tags = RawObject::ClassIdTag::update(cls.id(), tags); |
3478 LoadImmediate(IP, tags); | 3196 LoadImmediate(IP, tags); |
3479 str(IP, FieldAddress(instance_reg, Object::tags_offset())); | 3197 str(IP, FieldAddress(instance_reg, Object::tags_offset())); |
3480 | 3198 |
3481 NOT_IN_PRODUCT(IncrementAllocationStats(temp_reg, cls.id(), space)); | 3199 NOT_IN_PRODUCT(IncrementAllocationStats(temp_reg, cls.id(), space)); |
3482 } else { | 3200 } else { |
3483 b(failure); | 3201 b(failure); |
3484 } | 3202 } |
3485 } | 3203 } |
3486 | 3204 |
3487 | |
3488 void Assembler::TryAllocateArray(intptr_t cid, | 3205 void Assembler::TryAllocateArray(intptr_t cid, |
3489 intptr_t instance_size, | 3206 intptr_t instance_size, |
3490 Label* failure, | 3207 Label* failure, |
3491 Register instance, | 3208 Register instance, |
3492 Register end_address, | 3209 Register end_address, |
3493 Register temp1, | 3210 Register temp1, |
3494 Register temp2) { | 3211 Register temp2) { |
3495 if (FLAG_inline_alloc) { | 3212 if (FLAG_inline_alloc) { |
3496 // If this allocation is traced, program will jump to failure path | 3213 // If this allocation is traced, program will jump to failure path |
3497 // (i.e. the allocation stub) which will allocate the object and trace the | 3214 // (i.e. the allocation stub) which will allocate the object and trace the |
(...skipping 28 matching lines...) Expand all Loading... |
3526 LoadImmediate(temp1, tags); | 3243 LoadImmediate(temp1, tags); |
3527 str(temp1, FieldAddress(instance, Array::tags_offset())); // Store tags. | 3244 str(temp1, FieldAddress(instance, Array::tags_offset())); // Store tags. |
3528 | 3245 |
3529 LoadImmediate(temp1, instance_size); | 3246 LoadImmediate(temp1, instance_size); |
3530 NOT_IN_PRODUCT(IncrementAllocationStatsWithSize(temp2, temp1, space)); | 3247 NOT_IN_PRODUCT(IncrementAllocationStatsWithSize(temp2, temp1, space)); |
3531 } else { | 3248 } else { |
3532 b(failure); | 3249 b(failure); |
3533 } | 3250 } |
3534 } | 3251 } |
3535 | 3252 |
3536 | |
3537 void Assembler::Stop(const char* message) { | 3253 void Assembler::Stop(const char* message) { |
3538 if (FLAG_print_stop_message) { | 3254 if (FLAG_print_stop_message) { |
3539 PushList((1 << R0) | (1 << IP) | (1 << LR)); // Preserve R0, IP, LR. | 3255 PushList((1 << R0) | (1 << IP) | (1 << LR)); // Preserve R0, IP, LR. |
3540 LoadImmediate(R0, reinterpret_cast<int32_t>(message)); | 3256 LoadImmediate(R0, reinterpret_cast<int32_t>(message)); |
3541 // PrintStopMessage() preserves all registers. | 3257 // PrintStopMessage() preserves all registers. |
3542 BranchLink(&StubCode::PrintStopMessage_entry()->label()); | 3258 BranchLink(&StubCode::PrintStopMessage_entry()->label()); |
3543 PopList((1 << R0) | (1 << IP) | (1 << LR)); // Restore R0, IP, LR. | 3259 PopList((1 << R0) | (1 << IP) | (1 << LR)); // Restore R0, IP, LR. |
3544 } | 3260 } |
3545 // Emit the message address before the svc instruction, so that we can | 3261 // Emit the message address before the svc instruction, so that we can |
3546 // 'unstop' and continue execution in the simulator or jump to the next | 3262 // 'unstop' and continue execution in the simulator or jump to the next |
3547 // instruction in gdb. | 3263 // instruction in gdb. |
3548 Label stop; | 3264 Label stop; |
3549 b(&stop); | 3265 b(&stop); |
3550 Emit(reinterpret_cast<int32_t>(message)); | 3266 Emit(reinterpret_cast<int32_t>(message)); |
3551 Bind(&stop); | 3267 Bind(&stop); |
3552 bkpt(Instr::kStopMessageCode); | 3268 bkpt(Instr::kStopMessageCode); |
3553 } | 3269 } |
3554 | 3270 |
3555 | |
3556 Address Assembler::ElementAddressForIntIndex(bool is_load, | 3271 Address Assembler::ElementAddressForIntIndex(bool is_load, |
3557 bool is_external, | 3272 bool is_external, |
3558 intptr_t cid, | 3273 intptr_t cid, |
3559 intptr_t index_scale, | 3274 intptr_t index_scale, |
3560 Register array, | 3275 Register array, |
3561 intptr_t index, | 3276 intptr_t index, |
3562 Register temp) { | 3277 Register temp) { |
3563 const int64_t offset_base = | 3278 const int64_t offset_base = |
3564 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | 3279 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
3565 const int64_t offset = | 3280 const int64_t offset = |
3566 offset_base + static_cast<int64_t>(index) * index_scale; | 3281 offset_base + static_cast<int64_t>(index) * index_scale; |
3567 ASSERT(Utils::IsInt(32, offset)); | 3282 ASSERT(Utils::IsInt(32, offset)); |
3568 | 3283 |
3569 if (Address::CanHoldImmediateOffset(is_load, cid, offset)) { | 3284 if (Address::CanHoldImmediateOffset(is_load, cid, offset)) { |
3570 return Address(array, static_cast<int32_t>(offset)); | 3285 return Address(array, static_cast<int32_t>(offset)); |
3571 } else { | 3286 } else { |
3572 ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base)); | 3287 ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base)); |
3573 AddImmediate(temp, array, static_cast<int32_t>(offset_base)); | 3288 AddImmediate(temp, array, static_cast<int32_t>(offset_base)); |
3574 return Address(temp, static_cast<int32_t>(offset - offset_base)); | 3289 return Address(temp, static_cast<int32_t>(offset - offset_base)); |
3575 } | 3290 } |
3576 } | 3291 } |
3577 | 3292 |
3578 | |
3579 void Assembler::LoadElementAddressForIntIndex(Register address, | 3293 void Assembler::LoadElementAddressForIntIndex(Register address, |
3580 bool is_load, | 3294 bool is_load, |
3581 bool is_external, | 3295 bool is_external, |
3582 intptr_t cid, | 3296 intptr_t cid, |
3583 intptr_t index_scale, | 3297 intptr_t index_scale, |
3584 Register array, | 3298 Register array, |
3585 intptr_t index) { | 3299 intptr_t index) { |
3586 const int64_t offset_base = | 3300 const int64_t offset_base = |
3587 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | 3301 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
3588 const int64_t offset = | 3302 const int64_t offset = |
3589 offset_base + static_cast<int64_t>(index) * index_scale; | 3303 offset_base + static_cast<int64_t>(index) * index_scale; |
3590 ASSERT(Utils::IsInt(32, offset)); | 3304 ASSERT(Utils::IsInt(32, offset)); |
3591 AddImmediate(address, array, offset); | 3305 AddImmediate(address, array, offset); |
3592 } | 3306 } |
3593 | 3307 |
3594 | |
3595 Address Assembler::ElementAddressForRegIndex(bool is_load, | 3308 Address Assembler::ElementAddressForRegIndex(bool is_load, |
3596 bool is_external, | 3309 bool is_external, |
3597 intptr_t cid, | 3310 intptr_t cid, |
3598 intptr_t index_scale, | 3311 intptr_t index_scale, |
3599 Register array, | 3312 Register array, |
3600 Register index) { | 3313 Register index) { |
3601 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. | 3314 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. |
3602 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; | 3315 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; |
3603 int32_t offset = | 3316 int32_t offset = |
3604 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); | 3317 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); |
(...skipping 19 matching lines...) Expand all Loading... |
3624 } | 3337 } |
3625 int32_t offset_mask = 0; | 3338 int32_t offset_mask = 0; |
3626 if ((is_load && !Address::CanHoldLoadOffset(size, offset, &offset_mask)) || | 3339 if ((is_load && !Address::CanHoldLoadOffset(size, offset, &offset_mask)) || |
3627 (!is_load && !Address::CanHoldStoreOffset(size, offset, &offset_mask))) { | 3340 (!is_load && !Address::CanHoldStoreOffset(size, offset, &offset_mask))) { |
3628 AddImmediate(base, offset & ~offset_mask); | 3341 AddImmediate(base, offset & ~offset_mask); |
3629 offset = offset & offset_mask; | 3342 offset = offset & offset_mask; |
3630 } | 3343 } |
3631 return Address(base, offset); | 3344 return Address(base, offset); |
3632 } | 3345 } |
3633 | 3346 |
3634 | |
3635 void Assembler::LoadElementAddressForRegIndex(Register address, | 3347 void Assembler::LoadElementAddressForRegIndex(Register address, |
3636 bool is_load, | 3348 bool is_load, |
3637 bool is_external, | 3349 bool is_external, |
3638 intptr_t cid, | 3350 intptr_t cid, |
3639 intptr_t index_scale, | 3351 intptr_t index_scale, |
3640 Register array, | 3352 Register array, |
3641 Register index) { | 3353 Register index) { |
3642 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. | 3354 // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays. |
3643 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; | 3355 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift; |
3644 int32_t offset = | 3356 int32_t offset = |
3645 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); | 3357 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); |
3646 if (shift < 0) { | 3358 if (shift < 0) { |
3647 ASSERT(shift == -1); | 3359 ASSERT(shift == -1); |
3648 add(address, array, Operand(index, ASR, 1)); | 3360 add(address, array, Operand(index, ASR, 1)); |
3649 } else { | 3361 } else { |
3650 add(address, array, Operand(index, LSL, shift)); | 3362 add(address, array, Operand(index, LSL, shift)); |
3651 } | 3363 } |
3652 if (offset != 0) { | 3364 if (offset != 0) { |
3653 AddImmediate(address, offset); | 3365 AddImmediate(address, offset); |
3654 } | 3366 } |
3655 } | 3367 } |
3656 | 3368 |
3657 | |
3658 void Assembler::LoadHalfWordUnaligned(Register dst, | 3369 void Assembler::LoadHalfWordUnaligned(Register dst, |
3659 Register addr, | 3370 Register addr, |
3660 Register tmp) { | 3371 Register tmp) { |
3661 ASSERT(dst != addr); | 3372 ASSERT(dst != addr); |
3662 ldrb(dst, Address(addr, 0)); | 3373 ldrb(dst, Address(addr, 0)); |
3663 ldrsb(tmp, Address(addr, 1)); | 3374 ldrsb(tmp, Address(addr, 1)); |
3664 orr(dst, dst, Operand(tmp, LSL, 8)); | 3375 orr(dst, dst, Operand(tmp, LSL, 8)); |
3665 } | 3376 } |
3666 | 3377 |
3667 | |
3668 void Assembler::LoadHalfWordUnsignedUnaligned(Register dst, | 3378 void Assembler::LoadHalfWordUnsignedUnaligned(Register dst, |
3669 Register addr, | 3379 Register addr, |
3670 Register tmp) { | 3380 Register tmp) { |
3671 ASSERT(dst != addr); | 3381 ASSERT(dst != addr); |
3672 ldrb(dst, Address(addr, 0)); | 3382 ldrb(dst, Address(addr, 0)); |
3673 ldrb(tmp, Address(addr, 1)); | 3383 ldrb(tmp, Address(addr, 1)); |
3674 orr(dst, dst, Operand(tmp, LSL, 8)); | 3384 orr(dst, dst, Operand(tmp, LSL, 8)); |
3675 } | 3385 } |
3676 | 3386 |
3677 | |
3678 void Assembler::StoreHalfWordUnaligned(Register src, | 3387 void Assembler::StoreHalfWordUnaligned(Register src, |
3679 Register addr, | 3388 Register addr, |
3680 Register tmp) { | 3389 Register tmp) { |
3681 strb(src, Address(addr, 0)); | 3390 strb(src, Address(addr, 0)); |
3682 Lsr(tmp, src, Operand(8)); | 3391 Lsr(tmp, src, Operand(8)); |
3683 strb(tmp, Address(addr, 1)); | 3392 strb(tmp, Address(addr, 1)); |
3684 } | 3393 } |
3685 | 3394 |
3686 | |
3687 void Assembler::LoadWordUnaligned(Register dst, Register addr, Register tmp) { | 3395 void Assembler::LoadWordUnaligned(Register dst, Register addr, Register tmp) { |
3688 ASSERT(dst != addr); | 3396 ASSERT(dst != addr); |
3689 ldrb(dst, Address(addr, 0)); | 3397 ldrb(dst, Address(addr, 0)); |
3690 ldrb(tmp, Address(addr, 1)); | 3398 ldrb(tmp, Address(addr, 1)); |
3691 orr(dst, dst, Operand(tmp, LSL, 8)); | 3399 orr(dst, dst, Operand(tmp, LSL, 8)); |
3692 ldrb(tmp, Address(addr, 2)); | 3400 ldrb(tmp, Address(addr, 2)); |
3693 orr(dst, dst, Operand(tmp, LSL, 16)); | 3401 orr(dst, dst, Operand(tmp, LSL, 16)); |
3694 ldrb(tmp, Address(addr, 3)); | 3402 ldrb(tmp, Address(addr, 3)); |
3695 orr(dst, dst, Operand(tmp, LSL, 24)); | 3403 orr(dst, dst, Operand(tmp, LSL, 24)); |
3696 } | 3404 } |
3697 | 3405 |
3698 | |
3699 void Assembler::StoreWordUnaligned(Register src, Register addr, Register tmp) { | 3406 void Assembler::StoreWordUnaligned(Register src, Register addr, Register tmp) { |
3700 strb(src, Address(addr, 0)); | 3407 strb(src, Address(addr, 0)); |
3701 Lsr(tmp, src, Operand(8)); | 3408 Lsr(tmp, src, Operand(8)); |
3702 strb(tmp, Address(addr, 1)); | 3409 strb(tmp, Address(addr, 1)); |
3703 Lsr(tmp, src, Operand(16)); | 3410 Lsr(tmp, src, Operand(16)); |
3704 strb(tmp, Address(addr, 2)); | 3411 strb(tmp, Address(addr, 2)); |
3705 Lsr(tmp, src, Operand(24)); | 3412 Lsr(tmp, src, Operand(24)); |
3706 strb(tmp, Address(addr, 3)); | 3413 strb(tmp, Address(addr, 3)); |
3707 } | 3414 } |
3708 | 3415 |
3709 | |
3710 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { | 3416 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { |
3711 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | 3417 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", |
3712 "r8", "ctx", "pp", "fp", "ip", "sp", "lr", "pc", | 3418 "r8", "ctx", "pp", "fp", "ip", "sp", "lr", "pc", |
3713 }; | 3419 }; |
3714 | 3420 |
3715 | |
3716 const char* Assembler::RegisterName(Register reg) { | 3421 const char* Assembler::RegisterName(Register reg) { |
3717 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); | 3422 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); |
3718 return cpu_reg_names[reg]; | 3423 return cpu_reg_names[reg]; |
3719 } | 3424 } |
3720 | 3425 |
3721 | |
3722 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { | 3426 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { |
3723 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", | 3427 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", |
3724 #if defined(VFPv3_D32) | 3428 #if defined(VFPv3_D32) |
3725 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", | 3429 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", |
3726 #endif | 3430 #endif |
3727 }; | 3431 }; |
3728 | 3432 |
3729 | |
3730 const char* Assembler::FpuRegisterName(FpuRegister reg) { | 3433 const char* Assembler::FpuRegisterName(FpuRegister reg) { |
3731 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); | 3434 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); |
3732 return fpu_reg_names[reg]; | 3435 return fpu_reg_names[reg]; |
3733 } | 3436 } |
3734 | 3437 |
3735 } // namespace dart | 3438 } // namespace dart |
3736 | 3439 |
3737 #endif // defined TARGET_ARCH_ARM | 3440 #endif // defined TARGET_ARCH_ARM |
OLD | NEW |