Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(45)

Side by Side Diff: src/arm64/macro-assembler-arm64.cc

Issue 1131573006: ARM64: Enable shorten-64-to-32 warning (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm64/macro-assembler-arm64.h ('k') | src/arm64/simulator-arm64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_ARM64 7 #if V8_TARGET_ARCH_ARM64
8 8
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h" 10 #include "src/base/division-by-constant.h"
(...skipping 908 matching lines...) Expand 10 before | Expand all | Expand 10 after
919 919
920 920
921 void MacroAssembler::PushPopQueue::PushQueued( 921 void MacroAssembler::PushPopQueue::PushQueued(
922 PreambleDirective preamble_directive) { 922 PreambleDirective preamble_directive) {
923 if (queued_.empty()) return; 923 if (queued_.empty()) return;
924 924
925 if (preamble_directive == WITH_PREAMBLE) { 925 if (preamble_directive == WITH_PREAMBLE) {
926 masm_->PushPreamble(size_); 926 masm_->PushPreamble(size_);
927 } 927 }
928 928
929 int count = queued_.size(); 929 size_t count = queued_.size();
930 int index = 0; 930 size_t index = 0;
931 while (index < count) { 931 while (index < count) {
932 // PushHelper can only handle registers with the same size and type, and it 932 // PushHelper can only handle registers with the same size and type, and it
933 // can handle only four at a time. Batch them up accordingly. 933 // can handle only four at a time. Batch them up accordingly.
934 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; 934 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
935 int batch_index = 0; 935 int batch_index = 0;
936 do { 936 do {
937 batch[batch_index++] = queued_[index++]; 937 batch[batch_index++] = queued_[index++];
938 } while ((batch_index < 4) && (index < count) && 938 } while ((batch_index < 4) && (index < count) &&
939 batch[0].IsSameSizeAndType(queued_[index])); 939 batch[0].IsSameSizeAndType(queued_[index]));
940 940
941 masm_->PushHelper(batch_index, batch[0].SizeInBytes(), 941 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
942 batch[0], batch[1], batch[2], batch[3]); 942 batch[0], batch[1], batch[2], batch[3]);
943 } 943 }
944 944
945 queued_.clear(); 945 queued_.clear();
946 } 946 }
947 947
948 948
949 void MacroAssembler::PushPopQueue::PopQueued() { 949 void MacroAssembler::PushPopQueue::PopQueued() {
950 if (queued_.empty()) return; 950 if (queued_.empty()) return;
951 951
952 int count = queued_.size(); 952 size_t count = queued_.size();
953 int index = 0; 953 size_t index = 0;
954 while (index < count) { 954 while (index < count) {
955 // PopHelper can only handle registers with the same size and type, and it 955 // PopHelper can only handle registers with the same size and type, and it
956 // can handle only four at a time. Batch them up accordingly. 956 // can handle only four at a time. Batch them up accordingly.
957 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; 957 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
958 int batch_index = 0; 958 int batch_index = 0;
959 do { 959 do {
960 batch[batch_index++] = queued_[index++]; 960 batch[batch_index++] = queued_[index++];
961 } while ((batch_index < 4) && (index < count) && 961 } while ((batch_index < 4) && (index < count) &&
962 batch[0].IsSameSizeAndType(queued_[index])); 962 batch[0].IsSameSizeAndType(queued_[index]));
963 963
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after
1256 1256
1257 1257
1258 void MacroAssembler::PushCalleeSavedRegisters() { 1258 void MacroAssembler::PushCalleeSavedRegisters() {
1259 // Ensure that the macro-assembler doesn't use any scratch registers. 1259 // Ensure that the macro-assembler doesn't use any scratch registers.
1260 InstructionAccurateScope scope(this); 1260 InstructionAccurateScope scope(this);
1261 1261
1262 // This method must not be called unless the current stack pointer is the 1262 // This method must not be called unless the current stack pointer is the
1263 // system stack pointer (csp). 1263 // system stack pointer (csp).
1264 DCHECK(csp.Is(StackPointer())); 1264 DCHECK(csp.Is(StackPointer()));
1265 1265
1266 MemOperand tos(csp, -2 * kXRegSize, PreIndex); 1266 MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
1267 1267
1268 stp(d14, d15, tos); 1268 stp(d14, d15, tos);
1269 stp(d12, d13, tos); 1269 stp(d12, d13, tos);
1270 stp(d10, d11, tos); 1270 stp(d10, d11, tos);
1271 stp(d8, d9, tos); 1271 stp(d8, d9, tos);
1272 1272
1273 stp(x29, x30, tos); 1273 stp(x29, x30, tos);
1274 stp(x27, x28, tos); // x28 = jssp 1274 stp(x27, x28, tos); // x28 = jssp
1275 stp(x25, x26, tos); 1275 stp(x25, x26, tos);
1276 stp(x23, x24, tos); 1276 stp(x23, x24, tos);
(...skipping 3409 matching lines...) Expand 10 before | Expand all | Expand 10 after
4686 Register map_in_out, 4686 Register map_in_out,
4687 Register scratch1, 4687 Register scratch1,
4688 Register scratch2, 4688 Register scratch2,
4689 Label* no_map_match) { 4689 Label* no_map_match) {
4690 // Load the global or builtins object from the current context. 4690 // Load the global or builtins object from the current context.
4691 Ldr(scratch1, GlobalObjectMemOperand()); 4691 Ldr(scratch1, GlobalObjectMemOperand());
4692 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); 4692 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4693 4693
4694 // Check that the function's map is the same as the expected cached map. 4694 // Check that the function's map is the same as the expected cached map.
4695 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX)); 4695 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
4696 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; 4696 int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4697 Ldr(scratch2, FieldMemOperand(scratch1, offset)); 4697 Ldr(scratch2, FieldMemOperand(scratch1, offset));
4698 Cmp(map_in_out, scratch2); 4698 Cmp(map_in_out, scratch2);
4699 B(ne, no_map_match); 4699 B(ne, no_map_match);
4700 4700
4701 // Use the transitioned cached map. 4701 // Use the transitioned cached map.
4702 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; 4702 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4703 Ldr(map_in_out, FieldMemOperand(scratch1, offset)); 4703 Ldr(map_in_out, FieldMemOperand(scratch1, offset));
4704 } 4704 }
4705 4705
4706 4706
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after
5108 const Label* smi_check) { 5108 const Label* smi_check) {
5109 Assembler::BlockPoolsScope scope(masm); 5109 Assembler::BlockPoolsScope scope(masm);
5110 if (reg.IsValid()) { 5110 if (reg.IsValid()) {
5111 DCHECK(smi_check->is_bound()); 5111 DCHECK(smi_check->is_bound());
5112 DCHECK(reg.Is64Bits()); 5112 DCHECK(reg.Is64Bits());
5113 5113
5114 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to 5114 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5115 // 'check' in the other bits. The possible offset is limited in that we 5115 // 'check' in the other bits. The possible offset is limited in that we
5116 // use BitField to pack the data, and the underlying data type is a 5116 // use BitField to pack the data, and the underlying data type is a
5117 // uint32_t. 5117 // uint32_t.
5118 uint32_t delta = __ InstructionsGeneratedSince(smi_check); 5118 uint32_t delta =
5119 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
5119 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); 5120 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5120 } else { 5121 } else {
5121 DCHECK(!smi_check->is_bound()); 5122 DCHECK(!smi_check->is_bound());
5122 5123
5123 // An offset of 0 indicates that there is no patch site. 5124 // An offset of 0 indicates that there is no patch site.
5124 __ InlineData(0); 5125 __ InlineData(0);
5125 } 5126 }
5126 } 5127 }
5127 5128
5128 5129
5129 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info) 5130 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5130 : reg_(NoReg), smi_check_(NULL) { 5131 : reg_(NoReg), smi_check_(NULL) {
5131 InstructionSequence* inline_data = InstructionSequence::At(info); 5132 InstructionSequence* inline_data = InstructionSequence::At(info);
5132 DCHECK(inline_data->IsInlineData()); 5133 DCHECK(inline_data->IsInlineData());
5133 if (inline_data->IsInlineData()) { 5134 if (inline_data->IsInlineData()) {
5134 uint64_t payload = inline_data->InlineData(); 5135 uint64_t payload = inline_data->InlineData();
5135 // We use BitField to decode the payload, and BitField can only handle 5136 // We use BitField to decode the payload, and BitField can only handle
5136 // 32-bit values. 5137 // 32-bit values.
5137 DCHECK(is_uint32(payload)); 5138 DCHECK(is_uint32(payload));
5138 if (payload != 0) { 5139 if (payload != 0) {
5139 int reg_code = RegisterBits::decode(payload); 5140 uint32_t payload32 = static_cast<uint32_t>(payload);
5141 int reg_code = RegisterBits::decode(payload32);
5140 reg_ = Register::XRegFromCode(reg_code); 5142 reg_ = Register::XRegFromCode(reg_code);
5141 uint64_t smi_check_delta = DeltaBits::decode(payload); 5143 int smi_check_delta = DeltaBits::decode(payload32);
5142 DCHECK(smi_check_delta != 0); 5144 DCHECK(smi_check_delta != 0);
5143 smi_check_ = inline_data->preceding(smi_check_delta); 5145 smi_check_ = inline_data->preceding(smi_check_delta);
5144 } 5146 }
5145 } 5147 }
5146 } 5148 }
5147 5149
5148 5150
5149 #undef __ 5151 #undef __
5150 5152
5151 5153
5152 } } // namespace v8::internal 5154 } } // namespace v8::internal
5153 5155
5154 #endif // V8_TARGET_ARCH_ARM64 5156 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/arm64/macro-assembler-arm64.h ('k') | src/arm64/simulator-arm64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698