Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 1131573006: ARM64: Enable shorten-64-to-32 warning (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/arm64/code-generator-arm64.cc ('k') | src/frames.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h" 7 #include "src/compiler/node-properties.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
271 271
272 template <typename Matcher> 272 template <typename Matcher>
273 void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode, 273 void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
274 ArchOpcode negate_opcode) { 274 ArchOpcode negate_opcode) {
275 Arm64OperandGenerator g(selector); 275 Arm64OperandGenerator g(selector);
276 Matcher m(node); 276 Matcher m(node);
277 if (m.right().HasValue() && (m.right().Value() < 0) && 277 if (m.right().HasValue() && (m.right().Value() < 0) &&
278 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) { 278 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
279 selector->Emit(negate_opcode, g.DefineAsRegister(node), 279 selector->Emit(negate_opcode, g.DefineAsRegister(node),
280 g.UseRegister(m.left().node()), 280 g.UseRegister(m.left().node()),
281 g.TempImmediate(-m.right().Value())); 281 g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
282 } else { 282 } else {
283 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm); 283 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
284 } 284 }
285 } 285 }
286 286
287 } // namespace 287 } // namespace
288 288
289 289
290 void InstructionSelector::VisitLoad(Node* node) { 290 void InstructionSelector::VisitLoad(Node* node) {
291 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); 291 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
(...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after
588 if (mleft.right().IsInRange(0, 63)) { 588 if (mleft.right().IsInRange(0, 63)) {
589 // Ubfx cannot extract bits past the register size, however since 589 // Ubfx cannot extract bits past the register size, however since
590 // shifting the original value would have introduced some zeros we can 590 // shifting the original value would have introduced some zeros we can
591 // still use ubfx with a smaller mask and the remaining bits will be 591 // still use ubfx with a smaller mask and the remaining bits will be
592 // zeros. 592 // zeros.
593 uint64_t lsb = mleft.right().Value(); 593 uint64_t lsb = mleft.right().Value();
594 if (lsb + mask_width > 64) mask_width = 64 - lsb; 594 if (lsb + mask_width > 64) mask_width = 64 - lsb;
595 595
596 Emit(kArm64Ubfx, g.DefineAsRegister(node), 596 Emit(kArm64Ubfx, g.DefineAsRegister(node),
597 g.UseRegister(mleft.left().node()), 597 g.UseRegister(mleft.left().node()),
598 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width)); 598 g.UseImmediate(mleft.right().node()),
599 g.TempImmediate(static_cast<int32_t>(mask_width)));
599 return; 600 return;
600 } 601 }
601 // Other cases fall through to the normal And operation. 602 // Other cases fall through to the normal And operation.
602 } 603 }
603 } 604 }
604 VisitLogical<Int64BinopMatcher>( 605 VisitLogical<Int64BinopMatcher>(
605 this, node, &m, kArm64And, CanCover(node, m.left().node()), 606 this, node, &m, kArm64And, CanCover(node, m.left().node()),
606 CanCover(node, m.right().node()), kLogical64Imm); 607 CanCover(node, m.right().node()), kLogical64Imm);
607 } 608 }
608 609
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
724 Int64BinopMatcher mleft(m.left().node()); 725 Int64BinopMatcher mleft(m.left().node());
725 if (mleft.right().HasValue()) { 726 if (mleft.right().HasValue()) {
726 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is 727 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
727 // shifted into the least-significant bits. 728 // shifted into the least-significant bits.
728 uint64_t mask = (mleft.right().Value() >> lsb) << lsb; 729 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
729 uint64_t mask_width = base::bits::CountPopulation64(mask); 730 uint64_t mask_width = base::bits::CountPopulation64(mask);
730 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); 731 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
731 if ((mask_msb + mask_width + lsb) == 64) { 732 if ((mask_msb + mask_width + lsb) == 64) {
732 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); 733 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
733 Emit(kArm64Ubfx, g.DefineAsRegister(node), 734 Emit(kArm64Ubfx, g.DefineAsRegister(node),
734 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), 735 g.UseRegister(mleft.left().node()),
735 g.TempImmediate(mask_width)); 736 g.TempImmediate(static_cast<int32_t>(lsb)),
737 g.TempImmediate(static_cast<int32_t>(mask_width)));
736 return; 738 return;
737 } 739 }
738 } 740 }
739 } 741 }
740 VisitRRO(this, kArm64Lsr, node, kShift64Imm); 742 VisitRRO(this, kArm64Lsr, node, kShift64Imm);
741 } 743 }
742 744
743 745
744 void InstructionSelector::VisitWord32Sar(Node* node) { 746 void InstructionSelector::VisitWord32Sar(Node* node) {
745 if (TryEmitBitfieldExtract32(this, node)) { 747 if (TryEmitBitfieldExtract32(this, node)) {
(...skipping 476 matching lines...) Expand 10 before | Expand all | Expand 10 after
1222 VisitRR(this, kArm64Float64RoundTiesAway, node); 1224 VisitRR(this, kArm64Float64RoundTiesAway, node);
1223 } 1225 }
1224 1226
1225 1227
1226 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { 1228 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
1227 Arm64OperandGenerator g(this); 1229 Arm64OperandGenerator g(this);
1228 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node); 1230 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
1229 1231
1230 FrameStateDescriptor* frame_state_descriptor = nullptr; 1232 FrameStateDescriptor* frame_state_descriptor = nullptr;
1231 if (descriptor->NeedsFrameState()) { 1233 if (descriptor->NeedsFrameState()) {
1232 frame_state_descriptor = 1234 frame_state_descriptor = GetFrameStateDescriptor(
1233 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount())); 1235 node->InputAt(static_cast<int>(descriptor->InputCount())));
1234 } 1236 }
1235 1237
1236 CallBuffer buffer(zone(), descriptor, frame_state_descriptor); 1238 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
1237 1239
1238 // Compute InstructionOperands for inputs and outputs. 1240 // Compute InstructionOperands for inputs and outputs.
1239 // TODO(turbofan): on ARM64 it's probably better to use the code object in a 1241 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
1240 // register if there are multiple uses of it. Improve constant pool and the 1242 // register if there are multiple uses of it. Improve constant pool and the
1241 // heuristics in the register allocator for where to emit constants. 1243 // heuristics in the register allocator for where to emit constants.
1242 InitializeCallBuffer(node, &buffer, true, false); 1244 InitializeCallBuffer(node, &buffer, true, false);
1243 1245
1244 // Push the arguments to the stack. 1246 // Push the arguments to the stack.
1245 bool pushed_count_uneven = buffer.pushed_nodes.size() & 1; 1247 int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
1246 int aligned_push_count = buffer.pushed_nodes.size(); 1248 bool pushed_count_uneven = aligned_push_count & 1;
1247 // TODO(dcarney): claim and poke probably take small immediates, 1249 // TODO(dcarney): claim and poke probably take small immediates,
1248 // loop here or whatever. 1250 // loop here or whatever.
1249 // Bump the stack pointer(s). 1251 // Bump the stack pointer(s).
1250 if (aligned_push_count > 0) { 1252 if (aligned_push_count > 0) {
1251 // TODO(dcarney): it would be better to bump the csp here only 1253 // TODO(dcarney): it would be better to bump the csp here only
1252 // and emit paired stores with increment for non c frames. 1254 // and emit paired stores with increment for non c frames.
1253 Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count)); 1255 Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
1254 } 1256 }
1255 // Move arguments to the stack. 1257 // Move arguments to the stack.
1256 { 1258 {
1257 int slot = buffer.pushed_nodes.size() - 1; 1259 int slot = aligned_push_count - 1;
1258 // Emit the uneven pushes. 1260 // Emit the uneven pushes.
1259 if (pushed_count_uneven) { 1261 if (pushed_count_uneven) {
1260 Node* input = buffer.pushed_nodes[slot]; 1262 Node* input = buffer.pushed_nodes[slot];
1261 Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input), 1263 Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
1262 g.TempImmediate(slot)); 1264 g.TempImmediate(slot));
1263 slot--; 1265 slot--;
1264 } 1266 }
1265 // Now all pushes can be done in pairs. 1267 // Now all pushes can be done in pairs.
1266 for (; slot >= 0; slot -= 2) { 1268 for (; slot >= 0; slot -= 2) {
1267 Emit(kArm64PokePair, g.NoOutput(), 1269 Emit(kArm64PokePair, g.NoOutput(),
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
1337 return; 1339 return;
1338 } 1340 }
1339 opcode |= MiscField::encode(descriptor->flags()); 1341 opcode |= MiscField::encode(descriptor->flags());
1340 1342
1341 // Emit the tailcall instruction. 1343 // Emit the tailcall instruction.
1342 Emit(opcode, 0, nullptr, buffer.instruction_args.size(), 1344 Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
1343 &buffer.instruction_args.front()); 1345 &buffer.instruction_args.front());
1344 } else { 1346 } else {
1345 FrameStateDescriptor* frame_state_descriptor = nullptr; 1347 FrameStateDescriptor* frame_state_descriptor = nullptr;
1346 if (descriptor->NeedsFrameState()) { 1348 if (descriptor->NeedsFrameState()) {
1347 frame_state_descriptor = 1349 frame_state_descriptor = GetFrameStateDescriptor(
1348 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount())); 1350 node->InputAt(static_cast<int>(descriptor->InputCount())));
1349 } 1351 }
1350 1352
1351 CallBuffer buffer(zone(), descriptor, frame_state_descriptor); 1353 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
1352 1354
1353 // Compute InstructionOperands for inputs and outputs. 1355 // Compute InstructionOperands for inputs and outputs.
1354 // TODO(turbofan): on ARM64 it's probably better to use the code object in a 1356 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
1355 // register if there are multiple uses of it. Improve constant pool and the 1357 // register if there are multiple uses of it. Improve constant pool and the
1356 // heuristics in the register allocator for where to emit constants. 1358 // heuristics in the register allocator for where to emit constants.
1357 InitializeCallBuffer(node, &buffer, true, false); 1359 InitializeCallBuffer(node, &buffer, true, false);
1358 1360
1359 // Push the arguments to the stack. 1361 // Push the arguments to the stack.
1360 bool pushed_count_uneven = buffer.pushed_nodes.size() & 1; 1362 int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
1361 int aligned_push_count = buffer.pushed_nodes.size(); 1363 bool pushed_count_uneven = aligned_push_count & 1;
1362 // TODO(dcarney): claim and poke probably take small immediates, 1364 // TODO(dcarney): claim and poke probably take small immediates,
1363 // loop here or whatever. 1365 // loop here or whatever.
1364 // Bump the stack pointer(s). 1366 // Bump the stack pointer(s).
1365 if (aligned_push_count > 0) { 1367 if (aligned_push_count > 0) {
1366 // TODO(dcarney): it would be better to bump the csp here only 1368 // TODO(dcarney): it would be better to bump the csp here only
1367 // and emit paired stores with increment for non c frames. 1369 // and emit paired stores with increment for non c frames.
1368 Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count)); 1370 Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
1369 } 1371 }
1370 // Move arguments to the stack. 1372 // Move arguments to the stack.
1371 { 1373 {
1372 int slot = buffer.pushed_nodes.size() - 1; 1374 int slot = aligned_push_count - 1;
1373 // Emit the uneven pushes. 1375 // Emit the uneven pushes.
1374 if (pushed_count_uneven) { 1376 if (pushed_count_uneven) {
1375 Node* input = buffer.pushed_nodes[slot]; 1377 Node* input = buffer.pushed_nodes[slot];
1376 Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input), 1378 Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
1377 g.TempImmediate(slot)); 1379 g.TempImmediate(slot));
1378 slot--; 1380 slot--;
1379 } 1381 }
1380 // Now all pushes can be done in pairs. 1382 // Now all pushes can be done in pairs.
1381 for (; slot >= 0; slot -= 2) { 1383 for (; slot >= 0; slot -= 2) {
1382 Emit(kArm64PokePair, g.NoOutput(), 1384 Emit(kArm64PokePair, g.NoOutput(),
(...skipping 515 matching lines...) Expand 10 before | Expand all | Expand 10 after
1898 MachineOperatorBuilder::kFloat64RoundTruncate | 1900 MachineOperatorBuilder::kFloat64RoundTruncate |
1899 MachineOperatorBuilder::kFloat64RoundTiesAway | 1901 MachineOperatorBuilder::kFloat64RoundTiesAway |
1900 MachineOperatorBuilder::kWord32ShiftIsSafe | 1902 MachineOperatorBuilder::kWord32ShiftIsSafe |
1901 MachineOperatorBuilder::kInt32DivIsSafe | 1903 MachineOperatorBuilder::kInt32DivIsSafe |
1902 MachineOperatorBuilder::kUint32DivIsSafe; 1904 MachineOperatorBuilder::kUint32DivIsSafe;
1903 } 1905 }
1904 1906
1905 } // namespace compiler 1907 } // namespace compiler
1906 } // namespace internal 1908 } // namespace internal
1907 } // namespace v8 1909 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm64/code-generator-arm64.cc ('k') | src/frames.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698