Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(370)

Side by Side Diff: src/compiler/arm/code-generator-arm.cc

Issue 2176173003: [Turbofan] Revert FP register aliasing support on Arm. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase. Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/compiler/instruction.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/code-generator.h" 5 #include "src/compiler/code-generator.h"
6 6
7 #include "src/arm/macro-assembler-arm.h" 7 #include "src/arm/macro-assembler-arm.h"
8 #include "src/ast/scopes.h" 8 #include "src/ast/scopes.h"
9 #include "src/compiler/code-generator-impl.h" 9 #include "src/compiler/code-generator-impl.h"
10 #include "src/compiler/gap-resolver.h" 10 #include "src/compiler/gap-resolver.h"
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
129 MemOperand ToMemOperand(InstructionOperand* op) const { 129 MemOperand ToMemOperand(InstructionOperand* op) const {
130 DCHECK_NOT_NULL(op); 130 DCHECK_NOT_NULL(op);
131 DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); 131 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
132 return SlotToMemOperand(AllocatedOperand::cast(op)->index()); 132 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
133 } 133 }
134 134
135 MemOperand SlotToMemOperand(int slot) const { 135 MemOperand SlotToMemOperand(int slot) const {
136 FrameOffset offset = frame_access_state()->GetFrameOffset(slot); 136 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
137 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); 137 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
138 } 138 }
139
140 FloatRegister InputFloat32Register(size_t index) {
141 return ToFloat32Register(instr_->InputAt(index));
142 }
143
144 FloatRegister OutputFloat32Register() {
145 return ToFloat32Register(instr_->Output());
146 }
147
148 FloatRegister ToFloat32Register(InstructionOperand* op) {
149 return LowDwVfpRegister::from_code(ToDoubleRegister(op).code()).low();
150 }
139 }; 151 };
140 152
141
142 namespace { 153 namespace {
143 154
144 class OutOfLineLoadFloat final : public OutOfLineCode { 155 class OutOfLineLoadFloat32 final : public OutOfLineCode {
145 public: 156 public:
146 OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result) 157 OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
147 : OutOfLineCode(gen), result_(result) {} 158 : OutOfLineCode(gen), result_(result) {}
148 159
149 void Generate() final { 160 void Generate() final {
150 // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN. 161 // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
151 __ vmov(result_, -1.0f); 162 __ vmov(result_, -1.0f);
152 __ vsqrt(result_, result_); 163 __ vsqrt(result_, result_);
153 } 164 }
154 165
155 private: 166 private:
156 SwVfpRegister const result_; 167 SwVfpRegister const result_;
(...skipping 910 matching lines...) Expand 10 before | Expand all | Expand 10 after
1067 if (instr->InputAt(2)->IsImmediate()) { 1078 if (instr->InputAt(2)->IsImmediate()) {
1068 __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0), 1079 __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1069 i.InputRegister(1), i.InputInt32(2)); 1080 i.InputRegister(1), i.InputInt32(2));
1070 } else { 1081 } else {
1071 __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0), 1082 __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1072 i.InputRegister(1), kScratchReg, i.InputRegister(2)); 1083 i.InputRegister(1), kScratchReg, i.InputRegister(2));
1073 } 1084 }
1074 break; 1085 break;
1075 case kArmVcmpF32: 1086 case kArmVcmpF32:
1076 if (instr->InputAt(1)->IsFPRegister()) { 1087 if (instr->InputAt(1)->IsFPRegister()) {
1077 __ VFPCompareAndSetFlags(i.InputFloatRegister(0), 1088 __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
1078 i.InputFloatRegister(1)); 1089 i.InputFloat32Register(1));
1079 } else { 1090 } else {
1080 DCHECK(instr->InputAt(1)->IsImmediate()); 1091 DCHECK(instr->InputAt(1)->IsImmediate());
1081 // 0.0 is the only immediate supported by vcmp instructions. 1092 // 0.0 is the only immediate supported by vcmp instructions.
1082 DCHECK(i.InputFloat32(1) == 0.0f); 1093 DCHECK(i.InputFloat32(1) == 0.0f);
1083 __ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1)); 1094 __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
1084 } 1095 }
1085 DCHECK_EQ(SetCC, i.OutputSBit()); 1096 DCHECK_EQ(SetCC, i.OutputSBit());
1086 break; 1097 break;
1087 case kArmVaddF32: 1098 case kArmVaddF32:
1088 __ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0), 1099 __ vadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
1089 i.InputFloatRegister(1)); 1100 i.InputFloat32Register(1));
1090 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1101 DCHECK_EQ(LeaveCC, i.OutputSBit());
1091 break; 1102 break;
1092 case kArmVsubF32: 1103 case kArmVsubF32:
1093 __ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0), 1104 __ vsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
1094 i.InputFloatRegister(1)); 1105 i.InputFloat32Register(1));
1095 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1106 DCHECK_EQ(LeaveCC, i.OutputSBit());
1096 break; 1107 break;
1097 case kArmVmulF32: 1108 case kArmVmulF32:
1098 __ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0), 1109 __ vmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
1099 i.InputFloatRegister(1)); 1110 i.InputFloat32Register(1));
1100 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1111 DCHECK_EQ(LeaveCC, i.OutputSBit());
1101 break; 1112 break;
1102 case kArmVmlaF32: 1113 case kArmVmlaF32:
1103 __ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1), 1114 __ vmla(i.OutputFloat32Register(), i.InputFloat32Register(1),
1104 i.InputFloatRegister(2)); 1115 i.InputFloat32Register(2));
1105 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1116 DCHECK_EQ(LeaveCC, i.OutputSBit());
1106 break; 1117 break;
1107 case kArmVmlsF32: 1118 case kArmVmlsF32:
1108 __ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1), 1119 __ vmls(i.OutputFloat32Register(), i.InputFloat32Register(1),
1109 i.InputFloatRegister(2)); 1120 i.InputFloat32Register(2));
1110 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1121 DCHECK_EQ(LeaveCC, i.OutputSBit());
1111 break; 1122 break;
1112 case kArmVdivF32: 1123 case kArmVdivF32:
1113 __ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0), 1124 __ vdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
1114 i.InputFloatRegister(1)); 1125 i.InputFloat32Register(1));
1115 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1126 DCHECK_EQ(LeaveCC, i.OutputSBit());
1116 break; 1127 break;
1117 case kArmVsqrtF32: 1128 case kArmVsqrtF32:
1118 __ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0)); 1129 __ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
1119 break; 1130 break;
1120 case kArmVabsF32: 1131 case kArmVabsF32:
1121 __ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0)); 1132 __ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
1122 break; 1133 break;
1123 case kArmVnegF32: 1134 case kArmVnegF32:
1124 __ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0)); 1135 __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
1125 break; 1136 break;
1126 case kArmVcmpF64: 1137 case kArmVcmpF64:
1127 if (instr->InputAt(1)->IsFPRegister()) { 1138 if (instr->InputAt(1)->IsFPRegister()) {
1128 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), 1139 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
1129 i.InputDoubleRegister(1)); 1140 i.InputDoubleRegister(1));
1130 } else { 1141 } else {
1131 DCHECK(instr->InputAt(1)->IsImmediate()); 1142 DCHECK(instr->InputAt(1)->IsImmediate());
1132 // 0.0 is the only immediate supported by vcmp instructions. 1143 // 0.0 is the only immediate supported by vcmp instructions.
1133 DCHECK(i.InputDouble(1) == 0.0); 1144 DCHECK(i.InputDouble(1) == 0.0);
1134 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), i.InputDouble(1)); 1145 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), i.InputDouble(1));
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1182 case kArmVsqrtF64: 1193 case kArmVsqrtF64:
1183 __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1194 __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1184 break; 1195 break;
1185 case kArmVabsF64: 1196 case kArmVabsF64:
1186 __ vabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1197 __ vabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1187 break; 1198 break;
1188 case kArmVnegF64: 1199 case kArmVnegF64:
1189 __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1200 __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1190 break; 1201 break;
1191 case kArmVrintmF32: 1202 case kArmVrintmF32:
1192 __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0)); 1203 __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
1193 break; 1204 break;
1194 case kArmVrintmF64: 1205 case kArmVrintmF64:
1195 __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1206 __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1196 break; 1207 break;
1197 case kArmVrintpF32: 1208 case kArmVrintpF32:
1198 __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0)); 1209 __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
1199 break; 1210 break;
1200 case kArmVrintpF64: 1211 case kArmVrintpF64:
1201 __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1212 __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1202 break; 1213 break;
1203 case kArmVrintzF32: 1214 case kArmVrintzF32:
1204 __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0)); 1215 __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
1205 break; 1216 break;
1206 case kArmVrintzF64: 1217 case kArmVrintzF64:
1207 __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1218 __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1208 break; 1219 break;
1209 case kArmVrintaF64: 1220 case kArmVrintaF64:
1210 __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1221 __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1211 break; 1222 break;
1212 case kArmVrintnF32: 1223 case kArmVrintnF32:
1213 __ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0)); 1224 __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
1214 break; 1225 break;
1215 case kArmVrintnF64: 1226 case kArmVrintnF64:
1216 __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1227 __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1217 break; 1228 break;
1218 case kArmVcvtF32F64: { 1229 case kArmVcvtF32F64: {
1219 __ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0)); 1230 __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
1220 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1231 DCHECK_EQ(LeaveCC, i.OutputSBit());
1221 break; 1232 break;
1222 } 1233 }
1223 case kArmVcvtF64F32: { 1234 case kArmVcvtF64F32: {
1224 __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0)); 1235 __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloat32Register(0));
1225 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1236 DCHECK_EQ(LeaveCC, i.OutputSBit());
1226 break; 1237 break;
1227 } 1238 }
1228 case kArmVcvtF32S32: { 1239 case kArmVcvtF32S32: {
1229 SwVfpRegister scratch = kScratchDoubleReg.low(); 1240 SwVfpRegister scratch = kScratchDoubleReg.low();
1230 __ vmov(scratch, i.InputRegister(0)); 1241 __ vmov(scratch, i.InputRegister(0));
1231 __ vcvt_f32_s32(i.OutputFloatRegister(), scratch); 1242 __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
1232 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1243 DCHECK_EQ(LeaveCC, i.OutputSBit());
1233 break; 1244 break;
1234 } 1245 }
1235 case kArmVcvtF32U32: { 1246 case kArmVcvtF32U32: {
1236 SwVfpRegister scratch = kScratchDoubleReg.low(); 1247 SwVfpRegister scratch = kScratchDoubleReg.low();
1237 __ vmov(scratch, i.InputRegister(0)); 1248 __ vmov(scratch, i.InputRegister(0));
1238 __ vcvt_f32_u32(i.OutputFloatRegister(), scratch); 1249 __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
1239 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1250 DCHECK_EQ(LeaveCC, i.OutputSBit());
1240 break; 1251 break;
1241 } 1252 }
1242 case kArmVcvtF64S32: { 1253 case kArmVcvtF64S32: {
1243 SwVfpRegister scratch = kScratchDoubleReg.low(); 1254 SwVfpRegister scratch = kScratchDoubleReg.low();
1244 __ vmov(scratch, i.InputRegister(0)); 1255 __ vmov(scratch, i.InputRegister(0));
1245 __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch); 1256 __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
1246 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1257 DCHECK_EQ(LeaveCC, i.OutputSBit());
1247 break; 1258 break;
1248 } 1259 }
1249 case kArmVcvtF64U32: { 1260 case kArmVcvtF64U32: {
1250 SwVfpRegister scratch = kScratchDoubleReg.low(); 1261 SwVfpRegister scratch = kScratchDoubleReg.low();
1251 __ vmov(scratch, i.InputRegister(0)); 1262 __ vmov(scratch, i.InputRegister(0));
1252 __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch); 1263 __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
1253 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1264 DCHECK_EQ(LeaveCC, i.OutputSBit());
1254 break; 1265 break;
1255 } 1266 }
1256 case kArmVcvtS32F32: { 1267 case kArmVcvtS32F32: {
1257 SwVfpRegister scratch = kScratchDoubleReg.low(); 1268 SwVfpRegister scratch = kScratchDoubleReg.low();
1258 __ vcvt_s32_f32(scratch, i.InputFloatRegister(0)); 1269 __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
1259 __ vmov(i.OutputRegister(), scratch); 1270 __ vmov(i.OutputRegister(), scratch);
1260 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, 1271 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1261 // because INT32_MIN allows easier out-of-bounds detection. 1272 // because INT32_MIN allows easier out-of-bounds detection.
1262 __ cmn(i.OutputRegister(), Operand(1)); 1273 __ cmn(i.OutputRegister(), Operand(1));
1263 __ mov(i.OutputRegister(), Operand(INT32_MIN), SBit::LeaveCC, vs); 1274 __ mov(i.OutputRegister(), Operand(INT32_MIN), SBit::LeaveCC, vs);
1264 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1275 DCHECK_EQ(LeaveCC, i.OutputSBit());
1265 break; 1276 break;
1266 } 1277 }
1267 case kArmVcvtU32F32: { 1278 case kArmVcvtU32F32: {
1268 SwVfpRegister scratch = kScratchDoubleReg.low(); 1279 SwVfpRegister scratch = kScratchDoubleReg.low();
1269 __ vcvt_u32_f32(scratch, i.InputFloatRegister(0)); 1280 __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
1270 __ vmov(i.OutputRegister(), scratch); 1281 __ vmov(i.OutputRegister(), scratch);
1271 // Avoid UINT32_MAX as an overflow indicator and use 0 instead, 1282 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1272 // because 0 allows easier out-of-bounds detection. 1283 // because 0 allows easier out-of-bounds detection.
1273 __ cmn(i.OutputRegister(), Operand(1)); 1284 __ cmn(i.OutputRegister(), Operand(1));
1274 __ adc(i.OutputRegister(), i.OutputRegister(), Operand::Zero()); 1285 __ adc(i.OutputRegister(), i.OutputRegister(), Operand::Zero());
1275 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1286 DCHECK_EQ(LeaveCC, i.OutputSBit());
1276 break; 1287 break;
1277 } 1288 }
1278 case kArmVcvtS32F64: { 1289 case kArmVcvtS32F64: {
1279 SwVfpRegister scratch = kScratchDoubleReg.low(); 1290 SwVfpRegister scratch = kScratchDoubleReg.low();
1280 __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0)); 1291 __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
1281 __ vmov(i.OutputRegister(), scratch); 1292 __ vmov(i.OutputRegister(), scratch);
1282 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1293 DCHECK_EQ(LeaveCC, i.OutputSBit());
1283 break; 1294 break;
1284 } 1295 }
1285 case kArmVcvtU32F64: { 1296 case kArmVcvtU32F64: {
1286 SwVfpRegister scratch = kScratchDoubleReg.low(); 1297 SwVfpRegister scratch = kScratchDoubleReg.low();
1287 __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0)); 1298 __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
1288 __ vmov(i.OutputRegister(), scratch); 1299 __ vmov(i.OutputRegister(), scratch);
1289 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1300 DCHECK_EQ(LeaveCC, i.OutputSBit());
1290 break; 1301 break;
1291 } 1302 }
1292 case kArmVmovU32F32: 1303 case kArmVmovU32F32:
1293 __ vmov(i.OutputRegister(), i.InputFloatRegister(0)); 1304 __ vmov(i.OutputRegister(), i.InputFloat32Register(0));
1294 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1305 DCHECK_EQ(LeaveCC, i.OutputSBit());
1295 break; 1306 break;
1296 case kArmVmovF32U32: 1307 case kArmVmovF32U32:
1297 __ vmov(i.OutputFloatRegister(), i.InputRegister(0)); 1308 __ vmov(i.OutputFloat32Register(), i.InputRegister(0));
1298 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1309 DCHECK_EQ(LeaveCC, i.OutputSBit());
1299 break; 1310 break;
1300 case kArmVmovLowU32F64: 1311 case kArmVmovLowU32F64:
1301 __ VmovLow(i.OutputRegister(), i.InputDoubleRegister(0)); 1312 __ VmovLow(i.OutputRegister(), i.InputDoubleRegister(0));
1302 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1313 DCHECK_EQ(LeaveCC, i.OutputSBit());
1303 break; 1314 break;
1304 case kArmVmovLowF64U32: 1315 case kArmVmovLowF64U32:
1305 __ VmovLow(i.OutputDoubleRegister(), i.InputRegister(1)); 1316 __ VmovLow(i.OutputDoubleRegister(), i.InputRegister(1));
1306 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1317 DCHECK_EQ(LeaveCC, i.OutputSBit());
1307 break; 1318 break;
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1345 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1356 DCHECK_EQ(LeaveCC, i.OutputSBit());
1346 break; 1357 break;
1347 case kArmLdr: 1358 case kArmLdr:
1348 __ ldr(i.OutputRegister(), i.InputOffset()); 1359 __ ldr(i.OutputRegister(), i.InputOffset());
1349 break; 1360 break;
1350 case kArmStr: 1361 case kArmStr:
1351 __ str(i.InputRegister(0), i.InputOffset(1)); 1362 __ str(i.InputRegister(0), i.InputOffset(1));
1352 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1363 DCHECK_EQ(LeaveCC, i.OutputSBit());
1353 break; 1364 break;
1354 case kArmVldrF32: { 1365 case kArmVldrF32: {
1355 __ vldr(i.OutputFloatRegister(), i.InputOffset()); 1366 __ vldr(i.OutputFloat32Register(), i.InputOffset());
1356 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1367 DCHECK_EQ(LeaveCC, i.OutputSBit());
1357 break; 1368 break;
1358 } 1369 }
1359 case kArmVstrF32: 1370 case kArmVstrF32:
1360 __ vstr(i.InputFloatRegister(0), i.InputOffset(1)); 1371 __ vstr(i.InputFloat32Register(0), i.InputOffset(1));
1361 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1372 DCHECK_EQ(LeaveCC, i.OutputSBit());
1362 break; 1373 break;
1363 case kArmVldrF64: 1374 case kArmVldrF64:
1364 __ vldr(i.OutputDoubleRegister(), i.InputOffset()); 1375 __ vldr(i.OutputDoubleRegister(), i.InputOffset());
1365 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1376 DCHECK_EQ(LeaveCC, i.OutputSBit());
1366 break; 1377 break;
1367 case kArmVstrF64: 1378 case kArmVstrF64:
1368 __ vstr(i.InputDoubleRegister(0), i.InputOffset(1)); 1379 __ vstr(i.InputDoubleRegister(0), i.InputOffset(1));
1369 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1380 DCHECK_EQ(LeaveCC, i.OutputSBit());
1370 break; 1381 break;
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
1446 break; 1457 break;
1447 } 1458 }
1448 case kArmPush: 1459 case kArmPush:
1449 if (instr->InputAt(0)->IsFPRegister()) { 1460 if (instr->InputAt(0)->IsFPRegister()) {
1450 LocationOperand* op = LocationOperand::cast(instr->InputAt(0)); 1461 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1451 if (op->representation() == MachineRepresentation::kFloat64) { 1462 if (op->representation() == MachineRepresentation::kFloat64) {
1452 __ vpush(i.InputDoubleRegister(0)); 1463 __ vpush(i.InputDoubleRegister(0));
1453 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize); 1464 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1454 } else { 1465 } else {
1455 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation()); 1466 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1456 __ vpush(i.InputFloatRegister(0)); 1467 __ vpush(i.InputFloat32Register(0));
1457 frame_access_state()->IncreaseSPDelta(1); 1468 frame_access_state()->IncreaseSPDelta(1);
1458 } 1469 }
1459 } else { 1470 } else {
1460 __ push(i.InputRegister(0)); 1471 __ push(i.InputRegister(0));
1461 frame_access_state()->IncreaseSPDelta(1); 1472 frame_access_state()->IncreaseSPDelta(1);
1462 } 1473 }
1463 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1474 DCHECK_EQ(LeaveCC, i.OutputSBit());
1464 break; 1475 break;
1465 case kArmPoke: { 1476 case kArmPoke: {
1466 int const slot = MiscField::decode(instr->opcode()); 1477 int const slot = MiscField::decode(instr->opcode());
(...skipping 10 matching lines...) Expand all
1477 case kCheckedLoadInt16: 1488 case kCheckedLoadInt16:
1478 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh); 1489 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
1479 break; 1490 break;
1480 case kCheckedLoadUint16: 1491 case kCheckedLoadUint16:
1481 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh); 1492 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
1482 break; 1493 break;
1483 case kCheckedLoadWord32: 1494 case kCheckedLoadWord32:
1484 ASSEMBLE_CHECKED_LOAD_INTEGER(ldr); 1495 ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
1485 break; 1496 break;
1486 case kCheckedLoadFloat32: 1497 case kCheckedLoadFloat32:
1487 ASSEMBLE_CHECKED_LOAD_FP(Float); 1498 ASSEMBLE_CHECKED_LOAD_FP(Float32);
1488 break; 1499 break;
1489 case kCheckedLoadFloat64: 1500 case kCheckedLoadFloat64:
1490 ASSEMBLE_CHECKED_LOAD_FP(Double); 1501 ASSEMBLE_CHECKED_LOAD_FP(Double);
1491 break; 1502 break;
1492 case kCheckedStoreWord8: 1503 case kCheckedStoreWord8:
1493 ASSEMBLE_CHECKED_STORE_INTEGER(strb); 1504 ASSEMBLE_CHECKED_STORE_INTEGER(strb);
1494 break; 1505 break;
1495 case kCheckedStoreWord16: 1506 case kCheckedStoreWord16:
1496 ASSEMBLE_CHECKED_STORE_INTEGER(strh); 1507 ASSEMBLE_CHECKED_STORE_INTEGER(strh);
1497 break; 1508 break;
1498 case kCheckedStoreWord32: 1509 case kCheckedStoreWord32:
1499 ASSEMBLE_CHECKED_STORE_INTEGER(str); 1510 ASSEMBLE_CHECKED_STORE_INTEGER(str);
1500 break; 1511 break;
1501 case kCheckedStoreFloat32: 1512 case kCheckedStoreFloat32:
1502 ASSEMBLE_CHECKED_STORE_FP(Float); 1513 ASSEMBLE_CHECKED_STORE_FP(Float32);
1503 break; 1514 break;
1504 case kCheckedStoreFloat64: 1515 case kCheckedStoreFloat64:
1505 ASSEMBLE_CHECKED_STORE_FP(Double); 1516 ASSEMBLE_CHECKED_STORE_FP(Double);
1506 break; 1517 break;
1507 case kCheckedLoadWord64: 1518 case kCheckedLoadWord64:
1508 case kCheckedStoreWord64: 1519 case kCheckedStoreWord64:
1509 UNREACHABLE(); // currently unsupported checked int64 load/store. 1520 UNREACHABLE(); // currently unsupported checked int64 load/store.
1510 break; 1521 break;
1511 1522
1512 case kAtomicLoadInt8: 1523 case kAtomicLoadInt8:
(...skipping 297 matching lines...) Expand 10 before | Expand all | Expand 10 after
1810 UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm. 1821 UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
1811 break; 1822 break;
1812 } 1823 }
1813 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination)); 1824 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
1814 } else if (src.type() == Constant::kFloat32) { 1825 } else if (src.type() == Constant::kFloat32) {
1815 if (destination->IsFPStackSlot()) { 1826 if (destination->IsFPStackSlot()) {
1816 MemOperand dst = g.ToMemOperand(destination); 1827 MemOperand dst = g.ToMemOperand(destination);
1817 __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32()))); 1828 __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
1818 __ str(ip, dst); 1829 __ str(ip, dst);
1819 } else { 1830 } else {
1820 SwVfpRegister dst = g.ToFloatRegister(destination); 1831 SwVfpRegister dst = g.ToFloat32Register(destination);
1821 __ vmov(dst, src.ToFloat32()); 1832 __ vmov(dst, src.ToFloat32());
1822 } 1833 }
1823 } else { 1834 } else {
1824 DCHECK_EQ(Constant::kFloat64, src.type()); 1835 DCHECK_EQ(Constant::kFloat64, src.type());
1825 DwVfpRegister dst = destination->IsFPRegister() 1836 DwVfpRegister dst = destination->IsFPRegister()
1826 ? g.ToDoubleRegister(destination) 1837 ? g.ToDoubleRegister(destination)
1827 : kScratchDoubleReg; 1838 : kScratchDoubleReg;
1828 __ vmov(dst, src.ToFloat64(), kScratchReg); 1839 __ vmov(dst, src.ToFloat64(), kScratchReg);
1829 if (destination->IsFPStackSlot()) { 1840 if (destination->IsFPStackSlot()) {
1830 __ vstr(dst, g.ToMemOperand(destination)); 1841 __ vstr(dst, g.ToMemOperand(destination));
1831 } 1842 }
1832 } 1843 }
1833 } else if (source->IsFPRegister()) { 1844 } else if (source->IsFPRegister()) {
1834 MachineRepresentation rep = LocationOperand::cast(source)->representation(); 1845 DwVfpRegister src = g.ToDoubleRegister(source);
1835 if (rep == MachineRepresentation::kFloat64) { 1846 if (destination->IsFPRegister()) {
1836 DwVfpRegister src = g.ToDoubleRegister(source); 1847 DwVfpRegister dst = g.ToDoubleRegister(destination);
1837 if (destination->IsFPRegister()) { 1848 __ Move(dst, src);
1838 DwVfpRegister dst = g.ToDoubleRegister(destination);
1839 __ Move(dst, src);
1840 } else {
1841 DCHECK(destination->IsFPStackSlot());
1842 __ vstr(src, g.ToMemOperand(destination));
1843 }
1844 } else { 1849 } else {
1845 DCHECK_EQ(MachineRepresentation::kFloat32, rep); 1850 DCHECK(destination->IsFPStackSlot());
1846 SwVfpRegister src = g.ToFloatRegister(source); 1851 __ vstr(src, g.ToMemOperand(destination));
1847 if (destination->IsFPRegister()) {
1848 SwVfpRegister dst = g.ToFloatRegister(destination);
1849 __ Move(dst, src);
1850 } else {
1851 DCHECK(destination->IsFPStackSlot());
1852 __ vstr(src, g.ToMemOperand(destination));
1853 }
1854 } 1852 }
1855 } else if (source->IsFPStackSlot()) { 1853 } else if (source->IsFPStackSlot()) {
1856 MemOperand src = g.ToMemOperand(source); 1854 MemOperand src = g.ToMemOperand(source);
1857 MachineRepresentation rep =
1858 LocationOperand::cast(destination)->representation();
1859 if (destination->IsFPRegister()) { 1855 if (destination->IsFPRegister()) {
1860 if (rep == MachineRepresentation::kFloat64) {
1861 __ vldr(g.ToDoubleRegister(destination), src); 1856 __ vldr(g.ToDoubleRegister(destination), src);
1862 } else {
1863 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1864 __ vldr(g.ToFloatRegister(destination), src);
1865 }
1866 } else { 1857 } else {
1867 DCHECK(destination->IsFPStackSlot()); 1858 DCHECK(destination->IsFPStackSlot());
1868 if (rep == MachineRepresentation::kFloat64) {
1869 DwVfpRegister temp = kScratchDoubleReg; 1859 DwVfpRegister temp = kScratchDoubleReg;
1870 __ vldr(temp, src); 1860 __ vldr(temp, src);
1871 __ vstr(temp, g.ToMemOperand(destination)); 1861 __ vstr(temp, g.ToMemOperand(destination));
1872 } else {
1873 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1874 SwVfpRegister temp = kScratchDoubleReg.low();
1875 __ vldr(temp, src);
1876 __ vstr(temp, g.ToMemOperand(destination));
1877 }
1878 } 1862 }
1879 } else { 1863 } else {
1880 UNREACHABLE(); 1864 UNREACHABLE();
1881 } 1865 }
1882 } 1866 }
1883 1867
1884 1868
1885 void CodeGenerator::AssembleSwap(InstructionOperand* source, 1869 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1886 InstructionOperand* destination) { 1870 InstructionOperand* destination) {
1887 ArmOperandConverter g(this, nullptr); 1871 ArmOperandConverter g(this, nullptr);
(...skipping 19 matching lines...) Expand all
1907 DCHECK(destination->IsStackSlot()); 1891 DCHECK(destination->IsStackSlot());
1908 Register temp_0 = kScratchReg; 1892 Register temp_0 = kScratchReg;
1909 SwVfpRegister temp_1 = kScratchDoubleReg.low(); 1893 SwVfpRegister temp_1 = kScratchDoubleReg.low();
1910 MemOperand src = g.ToMemOperand(source); 1894 MemOperand src = g.ToMemOperand(source);
1911 MemOperand dst = g.ToMemOperand(destination); 1895 MemOperand dst = g.ToMemOperand(destination);
1912 __ ldr(temp_0, src); 1896 __ ldr(temp_0, src);
1913 __ vldr(temp_1, dst); 1897 __ vldr(temp_1, dst);
1914 __ str(temp_0, dst); 1898 __ str(temp_0, dst);
1915 __ vstr(temp_1, src); 1899 __ vstr(temp_1, src);
1916 } else if (source->IsFPRegister()) { 1900 } else if (source->IsFPRegister()) {
1917 MachineRepresentation rep = LocationOperand::cast(source)->representation();
1918 LowDwVfpRegister temp = kScratchDoubleReg; 1901 LowDwVfpRegister temp = kScratchDoubleReg;
1919 if (rep == MachineRepresentation::kFloat64) {
1920 DwVfpRegister src = g.ToDoubleRegister(source); 1902 DwVfpRegister src = g.ToDoubleRegister(source);
1921 if (destination->IsFPRegister()) { 1903 if (destination->IsFPRegister()) {
1922 DwVfpRegister dst = g.ToDoubleRegister(destination); 1904 DwVfpRegister dst = g.ToDoubleRegister(destination);
1923 __ Move(temp, src); 1905 __ Move(temp, src);
1924 __ Move(src, dst); 1906 __ Move(src, dst);
1925 __ Move(dst, temp); 1907 __ Move(dst, temp);
1926 } else { 1908 } else {
1927 DCHECK(destination->IsFPStackSlot()); 1909 DCHECK(destination->IsFPStackSlot());
1928 MemOperand dst = g.ToMemOperand(destination); 1910 MemOperand dst = g.ToMemOperand(destination);
1929 __ Move(temp, src); 1911 __ Move(temp, src);
1930 __ vldr(src, dst); 1912 __ vldr(src, dst);
1931 __ vstr(temp, dst); 1913 __ vstr(temp, dst);
1932 } 1914 }
1933 } else {
1934 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1935 SwVfpRegister src = g.ToFloatRegister(source);
1936 if (destination->IsFPRegister()) {
1937 SwVfpRegister dst = g.ToFloatRegister(destination);
1938 __ Move(temp.low(), src);
1939 __ Move(src, dst);
1940 __ Move(dst, temp.low());
1941 } else {
1942 DCHECK(destination->IsFPStackSlot());
1943 MemOperand dst = g.ToMemOperand(destination);
1944 __ Move(temp.low(), src);
1945 __ vldr(src, dst);
1946 __ vstr(temp.low(), dst);
1947 }
1948 }
1949 } else if (source->IsFPStackSlot()) { 1915 } else if (source->IsFPStackSlot()) {
1950 DCHECK(destination->IsFPStackSlot()); 1916 DCHECK(destination->IsFPStackSlot());
1951 Register temp_0 = kScratchReg; 1917 Register temp_0 = kScratchReg;
1952 LowDwVfpRegister temp_1 = kScratchDoubleReg; 1918 LowDwVfpRegister temp_1 = kScratchDoubleReg;
1953 MemOperand src0 = g.ToMemOperand(source); 1919 MemOperand src0 = g.ToMemOperand(source);
1954 MemOperand dst0 = g.ToMemOperand(destination); 1920 MemOperand dst0 = g.ToMemOperand(destination);
1955 MachineRepresentation rep = LocationOperand::cast(source)->representation();
1956 if (rep == MachineRepresentation::kFloat64) {
1957 MemOperand src1(src0.rn(), src0.offset() + kPointerSize); 1921 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
1958 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize); 1922 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
1959 __ vldr(temp_1, dst0); // Save destination in temp_1. 1923 __ vldr(temp_1, dst0); // Save destination in temp_1.
1960 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. 1924 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
1961 __ str(temp_0, dst0); 1925 __ str(temp_0, dst0);
1962 __ ldr(temp_0, src1); 1926 __ ldr(temp_0, src1);
1963 __ str(temp_0, dst1); 1927 __ str(temp_0, dst1);
1964 __ vstr(temp_1, src0); 1928 __ vstr(temp_1, src0);
1965 } else {
1966 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1967 __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
1968 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
1969 __ str(temp_0, dst0);
1970 __ vstr(temp_1.low(), src0);
1971 }
1972 } else { 1929 } else {
1973 // No other combinations are possible. 1930 // No other combinations are possible.
1974 UNREACHABLE(); 1931 UNREACHABLE();
1975 } 1932 }
1976 } 1933 }
1977 1934
1978 1935
1979 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { 1936 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1980 // On 32-bit ARM we emit the jump tables inline. 1937 // On 32-bit ARM we emit the jump tables inline.
1981 UNREACHABLE(); 1938 UNREACHABLE();
(...skipping 19 matching lines...) Expand all
2001 padding_size -= v8::internal::Assembler::kInstrSize; 1958 padding_size -= v8::internal::Assembler::kInstrSize;
2002 } 1959 }
2003 } 1960 }
2004 } 1961 }
2005 1962
2006 #undef __ 1963 #undef __
2007 1964
2008 } // namespace compiler 1965 } // namespace compiler
2009 } // namespace internal 1966 } // namespace internal
2010 } // namespace v8 1967 } // namespace v8
OLDNEW
« no previous file with comments | « no previous file | src/compiler/instruction.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698