OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_ARM |
8 | 8 |
9 #include "src/assembler-inl.h" | 9 #include "src/assembler-inl.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 1153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1164 } | 1164 } |
1165 | 1165 |
1166 void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, | 1166 void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, |
1167 SwVfpRegister src_lane, Register scratch, | 1167 SwVfpRegister src_lane, Register scratch, |
1168 int lane) { | 1168 int lane) { |
1169 Move(dst, src); | 1169 Move(dst, src); |
1170 int s_code = dst.code() * 4 + lane; | 1170 int s_code = dst.code() * 4 + lane; |
1171 VmovExtended(s_code, src_lane.code(), scratch); | 1171 VmovExtended(s_code, src_lane.code(), scratch); |
1172 } | 1172 } |
1173 | 1173 |
1174 void MacroAssembler::Swizzle(QwNeonRegister dst, QwNeonRegister src, | |
1175 Register scratch, NeonSize size, uint32_t lanes) { | |
1176 // TODO(bbudge) Handle Int16x8, Int8x16 vectors. | |
1177 DCHECK_EQ(Neon32, size); | |
1178 DCHECK_IMPLIES(size == Neon32, lanes < 0xFFFFu); | |
1179 if (size == Neon32) { | |
1180 switch (lanes) { | |
1181 // TODO(bbudge) Handle more special cases. | |
1182 case 0x3210: // Identity. | |
1183 Move(dst, src); | |
1184 return; | |
1185 case 0x1032: // Swap top and bottom. | |
1186 vext(dst, src, src, 8); | |
1187 return; | |
1188 case 0x2103: // Rotation. | |
1189 vext(dst, src, src, 12); | |
1190 return; | |
1191 case 0x0321: // Rotation. | |
1192 vext(dst, src, src, 4); | |
1193 return; | |
1194 case 0x0000: // Equivalent to vdup. | |
1195 case 0x1111: | |
1196 case 0x2222: | |
1197 case 0x3333: { | |
1198 int lane_code = src.code() * 4 + (lanes & 0xF); | |
1199 if (lane_code >= SwVfpRegister::kMaxNumRegisters) { | |
1200 // TODO(bbudge) use vdup (vdup.32 dst, D<src>[lane]) once implemented. | |
1201 int temp_code = kScratchDoubleReg.code() * 2; | |
1202 VmovExtended(temp_code, lane_code, scratch); | |
1203 lane_code = temp_code; | |
1204 } | |
1205 vdup(dst, SwVfpRegister::from_code(lane_code)); | |
1206 return; | |
1207 } | |
1208 case 0x2301: // Swap lanes 0, 1 and lanes 2, 3. | |
1209 vrev64(Neon32, dst, src); | |
1210 return; | |
1211 default: // Handle all other cases with vmovs. | |
1212 int src_code = src.code() * 4; | |
1213 int dst_code = dst.code() * 4; | |
1214 bool in_place = src.is(dst); | |
1215 if (in_place) { | |
1216 vmov(kScratchQuadReg, src); | |
1217 src_code = kScratchQuadReg.code() * 4; | |
1218 } | |
1219 for (int i = 0; i < 4; i++) { | |
1220 int lane = (lanes >> (i * 4) & 0xF); | |
1221 VmovExtended(dst_code + i, src_code + lane, scratch); | |
1222 } | |
1223 if (in_place) { | |
1224 // Restore zero reg. | |
1225 veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero); | |
1226 } | |
1227 return; | |
1228 } | |
1229 } | |
1230 } | |
1231 | |
1232 void MacroAssembler::LslPair(Register dst_low, Register dst_high, | 1174 void MacroAssembler::LslPair(Register dst_low, Register dst_high, |
1233 Register src_low, Register src_high, | 1175 Register src_low, Register src_high, |
1234 Register scratch, Register shift) { | 1176 Register scratch, Register shift) { |
1235 DCHECK(!AreAliased(dst_high, src_low)); | 1177 DCHECK(!AreAliased(dst_high, src_low)); |
1236 DCHECK(!AreAliased(dst_high, shift)); | 1178 DCHECK(!AreAliased(dst_high, shift)); |
1237 | 1179 |
1238 Label less_than_32; | 1180 Label less_than_32; |
1239 Label done; | 1181 Label done; |
1240 rsb(scratch, shift, Operand(32), SetCC); | 1182 rsb(scratch, shift, Operand(32), SetCC); |
1241 b(gt, &less_than_32); | 1183 b(gt, &less_than_32); |
(...skipping 2492 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3734 } | 3676 } |
3735 } | 3677 } |
3736 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); | 3678 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); |
3737 add(result, result, Operand(dividend, LSR, 31)); | 3679 add(result, result, Operand(dividend, LSR, 31)); |
3738 } | 3680 } |
3739 | 3681 |
3740 } // namespace internal | 3682 } // namespace internal |
3741 } // namespace v8 | 3683 } // namespace v8 |
3742 | 3684 |
3743 #endif // V8_TARGET_ARCH_ARM | 3685 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |