| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_ARM |
| 8 | 8 |
| 9 #include "src/assembler-inl.h" | 9 #include "src/assembler-inl.h" |
| 10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
| (...skipping 1184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1195 } | 1195 } |
| 1196 | 1196 |
| 1197 void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, | 1197 void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, |
| 1198 SwVfpRegister src_lane, Register scratch, | 1198 SwVfpRegister src_lane, Register scratch, |
| 1199 int lane) { | 1199 int lane) { |
| 1200 Move(dst, src); | 1200 Move(dst, src); |
| 1201 int s_code = dst.code() * 4 + lane; | 1201 int s_code = dst.code() * 4 + lane; |
| 1202 VmovExtended(s_code, src_lane.code(), scratch); | 1202 VmovExtended(s_code, src_lane.code(), scratch); |
| 1203 } | 1203 } |
| 1204 | 1204 |
| 1205 void MacroAssembler::Swizzle(QwNeonRegister dst, QwNeonRegister src, | |
| 1206 Register scratch, NeonSize size, uint32_t lanes) { | |
| 1207 // TODO(bbudge) Handle Int16x8, Int8x16 vectors. | |
| 1208 DCHECK_EQ(Neon32, size); | |
| 1209 DCHECK_IMPLIES(size == Neon32, lanes < 0xFFFFu); | |
| 1210 if (size == Neon32) { | |
| 1211 switch (lanes) { | |
| 1212 // TODO(bbudge) Handle more special cases. | |
| 1213 case 0x3210: // Identity. | |
| 1214 Move(dst, src); | |
| 1215 return; | |
| 1216 case 0x1032: // Swap top and bottom. | |
| 1217 vext(dst, src, src, 8); | |
| 1218 return; | |
| 1219 case 0x2103: // Rotation. | |
| 1220 vext(dst, src, src, 12); | |
| 1221 return; | |
| 1222 case 0x0321: // Rotation. | |
| 1223 vext(dst, src, src, 4); | |
| 1224 return; | |
| 1225 case 0x0000: // Equivalent to vdup. | |
| 1226 case 0x1111: | |
| 1227 case 0x2222: | |
| 1228 case 0x3333: { | |
| 1229 int lane_code = src.code() * 4 + (lanes & 0xF); | |
| 1230 if (lane_code >= SwVfpRegister::kMaxNumRegisters) { | |
| 1231 // TODO(bbudge) use vdup (vdup.32 dst, D<src>[lane]) once implemented. | |
| 1232 int temp_code = kScratchDoubleReg.code() * 2; | |
| 1233 VmovExtended(temp_code, lane_code, scratch); | |
| 1234 lane_code = temp_code; | |
| 1235 } | |
| 1236 vdup(dst, SwVfpRegister::from_code(lane_code)); | |
| 1237 return; | |
| 1238 } | |
| 1239 case 0x2301: // Swap lanes 0, 1 and lanes 2, 3. | |
| 1240 vrev64(Neon32, dst, src); | |
| 1241 return; | |
| 1242 default: // Handle all other cases with vmovs. | |
| 1243 int src_code = src.code() * 4; | |
| 1244 int dst_code = dst.code() * 4; | |
| 1245 bool in_place = src.is(dst); | |
| 1246 if (in_place) { | |
| 1247 vmov(kScratchQuadReg, src); | |
| 1248 src_code = kScratchQuadReg.code() * 4; | |
| 1249 } | |
| 1250 for (int i = 0; i < 4; i++) { | |
| 1251 int lane = (lanes >> (i * 4) & 0xF); | |
| 1252 VmovExtended(dst_code + i, src_code + lane, scratch); | |
| 1253 } | |
| 1254 if (in_place) { | |
| 1255 // Restore zero reg. | |
| 1256 veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero); | |
| 1257 } | |
| 1258 return; | |
| 1259 } | |
| 1260 } | |
| 1261 } | |
| 1262 | |
| 1263 void MacroAssembler::LslPair(Register dst_low, Register dst_high, | 1205 void MacroAssembler::LslPair(Register dst_low, Register dst_high, |
| 1264 Register src_low, Register src_high, | 1206 Register src_low, Register src_high, |
| 1265 Register scratch, Register shift) { | 1207 Register scratch, Register shift) { |
| 1266 DCHECK(!AreAliased(dst_high, src_low)); | 1208 DCHECK(!AreAliased(dst_high, src_low)); |
| 1267 DCHECK(!AreAliased(dst_high, shift)); | 1209 DCHECK(!AreAliased(dst_high, shift)); |
| 1268 | 1210 |
| 1269 Label less_than_32; | 1211 Label less_than_32; |
| 1270 Label done; | 1212 Label done; |
| 1271 rsb(scratch, shift, Operand(32), SetCC); | 1213 rsb(scratch, shift, Operand(32), SetCC); |
| 1272 b(gt, &less_than_32); | 1214 b(gt, &less_than_32); |
| (...skipping 2534 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3807 } | 3749 } |
| 3808 } | 3750 } |
| 3809 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); | 3751 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); |
| 3810 add(result, result, Operand(dividend, LSR, 31)); | 3752 add(result, result, Operand(dividend, LSR, 31)); |
| 3811 } | 3753 } |
| 3812 | 3754 |
| 3813 } // namespace internal | 3755 } // namespace internal |
| 3814 } // namespace v8 | 3756 } // namespace v8 |
| 3815 | 3757 |
| 3816 #endif // V8_TARGET_ARCH_ARM | 3758 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |