Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(213)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 2816703002: [arm] Remove embedded constant pool support. (Closed)
Patch Set: [arm] Remove embedded constant pool support. Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after
320 320
321 // ----------------------------------------------------------------------------- 321 // -----------------------------------------------------------------------------
322 // Implementation of RelocInfo 322 // Implementation of RelocInfo
323 323
324 // static 324 // static
325 const int RelocInfo::kApplyMask = 0; 325 const int RelocInfo::kApplyMask = 0;
326 326
327 327
328 bool RelocInfo::IsCodedSpecially() { 328 bool RelocInfo::IsCodedSpecially() {
329 // The deserializer needs to know whether a pointer is specially coded.  Being 329 // The deserializer needs to know whether a pointer is specially coded.  Being
330 // specially coded on ARM means that it is a movw/movt instruction, or is an 330 // specially coded on ARM means that it is a movw/movt instruction. We don't
331 // embedded constant pool entry.  These only occur if 331 // generate those for relocatable pointers.
332 // FLAG_enable_embedded_constant_pool is true. 332 return false;
333 return FLAG_enable_embedded_constant_pool;
334 } 333 }
335 334
336 335
337 bool RelocInfo::IsInConstantPool() { 336 bool RelocInfo::IsInConstantPool() {
338 return Assembler::is_constant_pool_load(pc_); 337 return Assembler::is_constant_pool_load(pc_);
339 } 338 }
340 339
341 Address RelocInfo::wasm_memory_reference() { 340 Address RelocInfo::wasm_memory_reference() {
342 DCHECK(IsWasmMemoryReference(rmode_)); 341 DCHECK(IsWasmMemoryReference(rmode_));
343 return Assembler::target_address_at(pc_, host_); 342 return Assembler::target_address_at(pc_, host_);
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
496 // register r is not encoded. 495 // register r is not encoded.
497 const Instr kPushRegPattern = 496 const Instr kPushRegPattern =
498 al | B26 | 4 | NegPreIndex | Register::kCode_sp * B16; 497 al | B26 | 4 | NegPreIndex | Register::kCode_sp * B16;
499 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) 498 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
500 // register r is not encoded. 499 // register r is not encoded.
501 const Instr kPopRegPattern = 500 const Instr kPopRegPattern =
502 al | B26 | L | 4 | PostIndex | Register::kCode_sp * B16; 501 al | B26 | L | 4 | PostIndex | Register::kCode_sp * B16;
503 // ldr rd, [pc, #offset] 502 // ldr rd, [pc, #offset]
504 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16; 503 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
505 const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16; 504 const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16;
506 // ldr rd, [pp, #offset]
507 const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
508 const Instr kLdrPpImmedPattern = 5 * B24 | L | Register::kCode_r8 * B16;
509 // ldr rd, [pp, rn]
510 const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
511 const Instr kLdrPpRegPattern = 7 * B24 | L | Register::kCode_r8 * B16;
512 // vldr dd, [pc, #offset] 505 // vldr dd, [pc, #offset]
513 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; 506 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
514 const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8; 507 const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8;
515 // vldr dd, [pp, #offset]
516 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
517 const Instr kVldrDPpPattern = 13 * B24 | L | Register::kCode_r8 * B16 | 11 * B8;
518 // blxcc rm 508 // blxcc rm
519 const Instr kBlxRegMask = 509 const Instr kBlxRegMask =
520 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; 510 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
521 const Instr kBlxRegPattern = 511 const Instr kBlxRegPattern =
522 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX; 512 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
523 const Instr kBlxIp = al | kBlxRegPattern | ip.code(); 513 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
524 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; 514 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
525 const Instr kMovMvnPattern = 0xd * B21; 515 const Instr kMovMvnPattern = 0xd * B21;
526 const Instr kMovMvnFlip = B22; 516 const Instr kMovMvnFlip = B22;
527 const Instr kMovLeaveCCMask = 0xdff * B16; 517 const Instr kMovLeaveCCMask = 0xdff * B16;
(...skipping 19 matching lines...) Expand all
547 const Instr kLdrRegFpNegOffsetPattern = 537 const Instr kLdrRegFpNegOffsetPattern =
548 al | B26 | L | NegOffset | Register::kCode_fp * B16; 538 al | B26 | L | NegOffset | Register::kCode_fp * B16;
549 const Instr kStrRegFpNegOffsetPattern = 539 const Instr kStrRegFpNegOffsetPattern =
550 al | B26 | NegOffset | Register::kCode_fp * B16; 540 al | B26 | NegOffset | Register::kCode_fp * B16;
551 const Instr kLdrStrInstrTypeMask = 0xffff0000; 541 const Instr kLdrStrInstrTypeMask = 0xffff0000;
552 542
553 Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size) 543 Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
554 : AssemblerBase(isolate_data, buffer, buffer_size), 544 : AssemblerBase(isolate_data, buffer, buffer_size),
555 recorded_ast_id_(TypeFeedbackId::None()), 545 recorded_ast_id_(TypeFeedbackId::None()),
556 pending_32_bit_constants_(), 546 pending_32_bit_constants_(),
557 pending_64_bit_constants_(), 547 pending_64_bit_constants_() {
558 constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits) {
559 pending_32_bit_constants_.reserve(kMinNumPendingConstants); 548 pending_32_bit_constants_.reserve(kMinNumPendingConstants);
560 pending_64_bit_constants_.reserve(kMinNumPendingConstants); 549 pending_64_bit_constants_.reserve(kMinNumPendingConstants);
561 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); 550 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
562 next_buffer_check_ = 0; 551 next_buffer_check_ = 0;
563 const_pool_blocked_nesting_ = 0; 552 const_pool_blocked_nesting_ = 0;
564 no_const_pool_before_ = 0; 553 no_const_pool_before_ = 0;
565 first_const_pool_32_use_ = -1; 554 first_const_pool_32_use_ = -1;
566 first_const_pool_64_use_ = -1; 555 first_const_pool_64_use_ = -1;
567 last_bound_pos_ = 0; 556 last_bound_pos_ = 0;
568 ClearRecordedAstId(); 557 ClearRecordedAstId();
569 if (CpuFeatures::IsSupported(VFP32DREGS)) { 558 if (CpuFeatures::IsSupported(VFP32DREGS)) {
570 // Register objects tend to be abstracted and survive between scopes, so 559 // Register objects tend to be abstracted and survive between scopes, so
571 // it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make 560 // it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
572 // its use consistent with other features, we always enable it if we can. 561 // its use consistent with other features, we always enable it if we can.
573 EnableCpuFeature(VFP32DREGS); 562 EnableCpuFeature(VFP32DREGS);
574 } 563 }
575 } 564 }
576 565
577 566
578 Assembler::~Assembler() { 567 Assembler::~Assembler() {
579 DCHECK(const_pool_blocked_nesting_ == 0); 568 DCHECK(const_pool_blocked_nesting_ == 0);
580 } 569 }
581 570
582 571
583 void Assembler::GetCode(CodeDesc* desc) { 572 void Assembler::GetCode(CodeDesc* desc) {
584 // Emit constant pool if necessary. 573 // Emit constant pool if necessary.
585 int constant_pool_offset = 0; 574 int constant_pool_offset = 0;
586 if (FLAG_enable_embedded_constant_pool) { 575 CheckConstPool(true, false);
587 constant_pool_offset = EmitEmbeddedConstantPool(); 576 DCHECK(pending_32_bit_constants_.empty());
588 } else { 577 DCHECK(pending_64_bit_constants_.empty());
589 CheckConstPool(true, false);
590 DCHECK(pending_32_bit_constants_.empty());
591 DCHECK(pending_64_bit_constants_.empty());
592 }
593 // Set up code descriptor. 578 // Set up code descriptor.
594 desc->buffer = buffer_; 579 desc->buffer = buffer_;
595 desc->buffer_size = buffer_size_; 580 desc->buffer_size = buffer_size_;
596 desc->instr_size = pc_offset(); 581 desc->instr_size = pc_offset();
597 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); 582 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
598 desc->constant_pool_size = 583 desc->constant_pool_size =
599 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0); 584 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
600 desc->origin = this; 585 desc->origin = this;
601 desc->unwinding_info_size = 0; 586 desc->unwinding_info_size = 0;
602 desc->unwinding_info = nullptr; 587 desc->unwinding_info = nullptr;
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
733 } 718 }
734 719
735 720
736 Register Assembler::GetRm(Instr instr) { 721 Register Assembler::GetRm(Instr instr) {
737 Register reg; 722 Register reg;
738 reg.reg_code = Instruction::RmValue(instr); 723 reg.reg_code = Instruction::RmValue(instr);
739 return reg; 724 return reg;
740 } 725 }
741 726
742 727
743 Instr Assembler::GetConsantPoolLoadPattern() {
744 if (FLAG_enable_embedded_constant_pool) {
745 return kLdrPpImmedPattern;
746 } else {
747 return kLdrPCImmedPattern;
748 }
749 }
750
751
752 Instr Assembler::GetConsantPoolLoadMask() {
753 if (FLAG_enable_embedded_constant_pool) {
754 return kLdrPpImmedMask;
755 } else {
756 return kLdrPCImmedMask;
757 }
758 }
759
760
761 bool Assembler::IsPush(Instr instr) { 728 bool Assembler::IsPush(Instr instr) {
762 return ((instr & ~kRdMask) == kPushRegPattern); 729 return ((instr & ~kRdMask) == kPushRegPattern);
763 } 730 }
764 731
765 732
766 bool Assembler::IsPop(Instr instr) { 733 bool Assembler::IsPop(Instr instr) {
767 return ((instr & ~kRdMask) == kPopRegPattern); 734 return ((instr & ~kRdMask) == kPopRegPattern);
768 } 735 }
769 736
770 737
(...skipping 17 matching lines...) Expand all
788 } 755 }
789 756
790 757
791 bool Assembler::IsLdrPcImmediateOffset(Instr instr) { 758 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
792 // Check the instruction is indeed a 759 // Check the instruction is indeed a
793 // ldr<cond> <Rd>, [pc +/- offset_12]. 760 // ldr<cond> <Rd>, [pc +/- offset_12].
794 return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern; 761 return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
795 } 762 }
796 763
797 764
798 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
799 // Check the instruction is indeed a
800 // ldr<cond> <Rd>, [pp +/- offset_12].
801 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
802 }
803
804
805 bool Assembler::IsLdrPpRegOffset(Instr instr) {
806 // Check the instruction is indeed a
807 // ldr<cond> <Rd>, [pp, +/- <Rm>].
808 return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
809 }
810
811
812 Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
813
814
815 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) { 765 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
816 // Check the instruction is indeed a 766 // Check the instruction is indeed a
817 // vldr<cond> <Dd>, [pc +/- offset_10]. 767 // vldr<cond> <Dd>, [pc +/- offset_10].
818 return (instr & kVldrDPCMask) == kVldrDPCPattern; 768 return (instr & kVldrDPCMask) == kVldrDPCPattern;
819 } 769 }
820 770
821 771
822 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
823 // Check the instruction is indeed a
824 // vldr<cond> <Dd>, [pp +/- offset_10].
825 return (instr & kVldrDPpMask) == kVldrDPpPattern;
826 }
827
828
829 bool Assembler::IsBlxReg(Instr instr) { 772 bool Assembler::IsBlxReg(Instr instr) {
830 // Check the instruction is indeed a 773 // Check the instruction is indeed a
831 // blxcc <Rm> 774 // blxcc <Rm>
832 return (instr & kBlxRegMask) == kBlxRegPattern; 775 return (instr & kBlxRegMask) == kBlxRegPattern;
833 } 776 }
834 777
835 778
836 bool Assembler::IsBlxIp(Instr instr) { 779 bool Assembler::IsBlxIp(Instr instr) {
837 // Check the instruction is indeed a 780 // Check the instruction is indeed a
838 // blx ip 781 // blx ip
(...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after
1162 } else if (RelocInfo::IsNone(rmode_)) { 1105 } else if (RelocInfo::IsNone(rmode_)) {
1163 return false; 1106 return false;
1164 } 1107 }
1165 return true; 1108 return true;
1166 } 1109 }
1167 1110
1168 1111
1169 static bool use_mov_immediate_load(const Operand& x, 1112 static bool use_mov_immediate_load(const Operand& x,
1170 const Assembler* assembler) { 1113 const Assembler* assembler) {
1171 DCHECK(assembler != nullptr); 1114 DCHECK(assembler != nullptr);
1172 if (FLAG_enable_embedded_constant_pool && 1115 if (x.must_output_reloc_info(assembler)) {
1173 !assembler->is_constant_pool_available()) {
1174 return true;
1175 } else if (x.must_output_reloc_info(assembler)) {
1176 // Prefer constant pool if data is likely to be patched. 1116 // Prefer constant pool if data is likely to be patched.
1177 return false; 1117 return false;
1178 } else { 1118 } else {
1179 // Otherwise, use immediate load if movw / movt is available. 1119 // Otherwise, use immediate load if movw / movt is available.
1180 return CpuFeatures::IsSupported(ARMv7); 1120 return CpuFeatures::IsSupported(ARMv7);
1181 } 1121 }
1182 } 1122 }
1183 1123
1184 1124
1185 int Operand::instructions_required(const Assembler* assembler, 1125 int Operand::instructions_required(const Assembler* assembler,
1186 Instr instr) const { 1126 Instr instr) const {
1187 DCHECK(assembler != nullptr); 1127 DCHECK(assembler != nullptr);
1188 if (rm_.is_valid()) return 1; 1128 if (rm_.is_valid()) return 1;
1189 uint32_t dummy1, dummy2; 1129 uint32_t dummy1, dummy2;
1190 if (must_output_reloc_info(assembler) || 1130 if (must_output_reloc_info(assembler) ||
1191 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { 1131 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1192 // The immediate operand cannot be encoded as a shifter operand, or use of 1132 // The immediate operand cannot be encoded as a shifter operand, or use of
1193 // constant pool is required. First account for the instructions required 1133 // constant pool is required. First account for the instructions required
1194 // for the constant pool or immediate load 1134 // for the constant pool or immediate load
1195 int instructions; 1135 int instructions;
1196 if (use_mov_immediate_load(*this, assembler)) { 1136 if (use_mov_immediate_load(*this, assembler)) {
1197 // A movw / movt or mov / orr immediate load. 1137 // A movw / movt or mov / orr immediate load.
1198 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4; 1138 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
1199 } else if (assembler->ConstantPoolAccessIsInOverflow()) {
1200 // An overflowed constant pool load.
1201 instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
1202 } else { 1139 } else {
1203 // A small constant pool load. 1140 // A small constant pool load.
1204 instructions = 1; 1141 instructions = 1;
1205 } 1142 }
1206
1207 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set 1143 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
1208 // For a mov or mvn instruction which doesn't set the condition 1144 // For a mov or mvn instruction which doesn't set the condition
1209 // code, the constant pool or immediate load is enough, otherwise we need 1145 // code, the constant pool or immediate load is enough, otherwise we need
1210 // to account for the actual instruction being requested. 1146 // to account for the actual instruction being requested.
1211 instructions += 1; 1147 instructions += 1;
1212 } 1148 }
1213 return instructions; 1149 return instructions;
1214 } else { 1150 } else {
1215 // No use of constant pool and the immediate operand can be encoded as a 1151 // No use of constant pool and the immediate operand can be encoded as a
1216 // shifter operand. 1152 // shifter operand.
1217 return 1; 1153 return 1;
1218 } 1154 }
1219 } 1155 }
1220 1156
1221 1157
1222 void Assembler::move_32_bit_immediate(Register rd, 1158 void Assembler::move_32_bit_immediate(Register rd,
1223 const Operand& x, 1159 const Operand& x,
1224 Condition cond) { 1160 Condition cond) {
1225 uint32_t imm32 = static_cast<uint32_t>(x.imm32_); 1161 uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
1226 if (x.must_output_reloc_info(this)) { 1162 if (x.must_output_reloc_info(this)) {
1227 RecordRelocInfo(x.rmode_); 1163 RecordRelocInfo(x.rmode_);
1228 } 1164 }
1229 1165
1230 if (use_mov_immediate_load(x, this)) { 1166 if (use_mov_immediate_load(x, this)) {
1167 // use_mov_immediate_load should return false when we need to output
1168 // relocation info, since we prefer the constant pool for values that
1169 // can be patched.
1170 DCHECK(!x.must_output_reloc_info(this));
1231 Register target = rd.code() == pc.code() ? ip : rd; 1171 Register target = rd.code() == pc.code() ? ip : rd;
1232 if (CpuFeatures::IsSupported(ARMv7)) { 1172 if (CpuFeatures::IsSupported(ARMv7)) {
1233 CpuFeatureScope scope(this, ARMv7); 1173 CpuFeatureScope scope(this, ARMv7);
1234 if (!FLAG_enable_embedded_constant_pool &&
1235 x.must_output_reloc_info(this)) {
1236 // Make sure the movw/movt doesn't get separated.
1237 BlockConstPoolFor(2);
1238 }
1239 movw(target, imm32 & 0xffff, cond); 1174 movw(target, imm32 & 0xffff, cond);
1240 movt(target, imm32 >> 16, cond); 1175 movt(target, imm32 >> 16, cond);
1241 } else {
1242 DCHECK(FLAG_enable_embedded_constant_pool);
1243 mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
1244 orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
1245 orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
1246 orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
1247 } 1176 }
1248 if (target.code() != rd.code()) { 1177 if (target.code() != rd.code()) {
1249 mov(rd, target, LeaveCC, cond); 1178 mov(rd, target, LeaveCC, cond);
1250 } 1179 }
1251 } else { 1180 } else {
1252 DCHECK(!FLAG_enable_embedded_constant_pool || is_constant_pool_available());
1253 ConstantPoolEntry::Access access = 1181 ConstantPoolEntry::Access access =
1254 ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_); 1182 ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
1255 if (access == ConstantPoolEntry::OVERFLOWED) { 1183 DCHECK(access == ConstantPoolEntry::REGULAR);
1256 DCHECK(FLAG_enable_embedded_constant_pool); 1184 USE(access);
1257 Register target = rd.code() == pc.code() ? ip : rd; 1185 ldr(rd, MemOperand(pc, 0), cond);
1258 // Emit instructions to load constant pool offset.
1259 if (CpuFeatures::IsSupported(ARMv7)) {
1260 CpuFeatureScope scope(this, ARMv7);
1261 movw(target, 0, cond);
1262 movt(target, 0, cond);
1263 } else {
1264 mov(target, Operand(0), LeaveCC, cond);
1265 orr(target, target, Operand(0), LeaveCC, cond);
1266 orr(target, target, Operand(0), LeaveCC, cond);
1267 orr(target, target, Operand(0), LeaveCC, cond);
1268 }
1269 // Load from constant pool at offset.
1270 ldr(rd, MemOperand(pp, target), cond);
1271 } else {
1272 DCHECK(access == ConstantPoolEntry::REGULAR);
1273 ldr(rd, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0),
1274 cond);
1275 }
1276 } 1186 }
1277 } 1187 }
1278 1188
1279 1189
1280 void Assembler::addrmod1(Instr instr, 1190 void Assembler::addrmod1(Instr instr,
1281 Register rn, 1191 Register rn,
1282 Register rd, 1192 Register rd,
1283 const Operand& x) { 1193 const Operand& x) {
1284 CheckBuffer(); 1194 CheckBuffer();
1285 DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0); 1195 DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
(...skipping 1494 matching lines...) Expand 10 before | Expand all | Expand 10 after
2780 } 2690 }
2781 } 2691 }
2782 2692
2783 2693
2784 void Assembler::vmov(const DwVfpRegister dst, 2694 void Assembler::vmov(const DwVfpRegister dst,
2785 double imm, 2695 double imm,
2786 const Register scratch) { 2696 const Register scratch) {
2787 DCHECK(VfpRegisterIsAvailable(dst)); 2697 DCHECK(VfpRegisterIsAvailable(dst));
2788 DCHECK(!scratch.is(ip)); 2698 DCHECK(!scratch.is(ip));
2789 uint32_t enc; 2699 uint32_t enc;
2790 // If the embedded constant pool is disabled, we can use the normal, inline
2791 // constant pool. If the embedded constant pool is enabled (via
2792 // FLAG_enable_embedded_constant_pool), we can only use it where the pool
2793 // pointer (pp) is valid.
2794 bool can_use_pool =
2795 !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
2796 if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) { 2700 if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
2797 CpuFeatureScope scope(this, VFPv3); 2701 CpuFeatureScope scope(this, VFPv3);
2798 // The double can be encoded in the instruction. 2702 // The double can be encoded in the instruction.
2799 // 2703 //
2800 // Dd = immediate 2704 // Dd = immediate
2801 // Instruction details available in ARM DDI 0406C.b, A8-936. 2705 // Instruction details available in ARM DDI 0406C.b, A8-936.
2802 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) | 2706 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2803 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0) 2707 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2804 int vd, d; 2708 int vd, d;
2805 dst.split_code(&vd, &d); 2709 dst.split_code(&vd, &d);
2806 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); 2710 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2807 } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm && 2711 } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm) {
2808 can_use_pool) {
2809 CpuFeatureScope scope(this, ARMv7); 2712 CpuFeatureScope scope(this, ARMv7);
2810 // TODO(jfb) Temporarily turned off until we have constant blinding or 2713 // TODO(jfb) Temporarily turned off until we have constant blinding or
2811 // some equivalent mitigation: an attacker can otherwise control 2714 // some equivalent mitigation: an attacker can otherwise control
2812 // generated data which also happens to be executable, a Very Bad 2715 // generated data which also happens to be executable, a Very Bad
2813 // Thing indeed. 2716 // Thing indeed.
2814 // Blinding gets tricky because we don't have xor, we probably 2717 // Blinding gets tricky because we don't have xor, we probably
2815 // need to add/subtract without losing precision, which requires a 2718 // need to add/subtract without losing precision, which requires a
2816 // cookie value that Lithium is probably better positioned to 2719 // cookie value that Lithium is probably better positioned to
2817 // choose. 2720 // choose.
2818 // We could also add a few peepholes here like detecting 0.0 and 2721 // We could also add a few peepholes here like detecting 0.0 and
2819 // -0.0 and doing a vmov from the sequestered d14, forcing denorms 2722 // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2820 // to zero (we set flush-to-zero), and normalizing NaN values. 2723 // to zero (we set flush-to-zero), and normalizing NaN values.
2821 // We could also detect redundant values. 2724 // We could also detect redundant values.
2822 // The code could also randomize the order of values, though 2725 // The code could also randomize the order of values, though
2823 // that's tricky because vldr has a limited reach. Furthermore 2726 // that's tricky because vldr has a limited reach. Furthermore
2824 // it breaks load locality. 2727 // it breaks load locality.
2825 ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm); 2728 ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
2826 if (access == ConstantPoolEntry::OVERFLOWED) { 2729 DCHECK(access == ConstantPoolEntry::REGULAR);
2827 DCHECK(FLAG_enable_embedded_constant_pool); 2730 USE(access);
2828 // Emit instructions to load constant pool offset. 2731 vldr(dst, MemOperand(pc, 0));
2829 movw(ip, 0);
2830 movt(ip, 0);
2831 // Load from constant pool at offset.
2832 vldr(dst, MemOperand(pp, ip));
2833 } else {
2834 DCHECK(access == ConstantPoolEntry::REGULAR);
2835 vldr(dst, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0));
2836 }
2837 } else { 2732 } else {
2838 // Synthesise the double from ARM immediates. 2733 // Synthesise the double from ARM immediates.
2839 uint32_t lo, hi; 2734 uint32_t lo, hi;
2840 DoubleAsTwoUInt32(imm, &lo, &hi); 2735 DoubleAsTwoUInt32(imm, &lo, &hi);
2841 2736
2842 if (lo == hi) { 2737 if (lo == hi) {
2843 // Move the low and high parts of the double to a D register in one 2738 // Move the low and high parts of the double to a D register in one
2844 // instruction. 2739 // instruction.
2845 mov(ip, Operand(lo)); 2740 mov(ip, Operand(lo));
2846 vmov(dst, ip, ip); 2741 vmov(dst, ip, ip);
(...skipping 2192 matching lines...) Expand 10 before | Expand all | Expand 10 after
5039 } 4934 }
5040 4935
5041 4936
5042 ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position, 4937 ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
5043 RelocInfo::Mode rmode, 4938 RelocInfo::Mode rmode,
5044 intptr_t value) { 4939 intptr_t value) {
5045 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL && 4940 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL &&
5046 rmode != RelocInfo::NONE64); 4941 rmode != RelocInfo::NONE64);
5047 bool sharing_ok = RelocInfo::IsNone(rmode) || 4942 bool sharing_ok = RelocInfo::IsNone(rmode) ||
5048 !(serializer_enabled() || rmode < RelocInfo::CELL); 4943 !(serializer_enabled() || rmode < RelocInfo::CELL);
5049 if (FLAG_enable_embedded_constant_pool) { 4944 DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
5050 return constant_pool_builder_.AddEntry(position, value, sharing_ok); 4945 if (pending_32_bit_constants_.empty()) {
5051 } else { 4946 first_const_pool_32_use_ = position;
5052 DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants); 4947 }
5053 if (pending_32_bit_constants_.empty()) { 4948 ConstantPoolEntry entry(position, value, sharing_ok);
5054 first_const_pool_32_use_ = position; 4949 pending_32_bit_constants_.push_back(entry);
5055 }
5056 ConstantPoolEntry entry(position, value, sharing_ok);
5057 pending_32_bit_constants_.push_back(entry);
5058 4950
5059 // Make sure the constant pool is not emitted in place of the next 4951 // Make sure the constant pool is not emitted in place of the next
5060 // instruction for which we just recorded relocation info. 4952 // instruction for which we just recorded relocation info.
5061 BlockConstPoolFor(1); 4953 BlockConstPoolFor(1);
5062 return ConstantPoolEntry::REGULAR; 4954 return ConstantPoolEntry::REGULAR;
5063 }
5064 } 4955 }
5065 4956
5066 4957
5067 ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position, 4958 ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
5068 double value) { 4959 double value) {
5069 if (FLAG_enable_embedded_constant_pool) { 4960 DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
5070 return constant_pool_builder_.AddEntry(position, value); 4961 if (pending_64_bit_constants_.empty()) {
5071 } else { 4962 first_const_pool_64_use_ = position;
5072 DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants); 4963 }
5073 if (pending_64_bit_constants_.empty()) { 4964 ConstantPoolEntry entry(position, value);
5074 first_const_pool_64_use_ = position; 4965 pending_64_bit_constants_.push_back(entry);
5075 }
5076 ConstantPoolEntry entry(position, value);
5077 pending_64_bit_constants_.push_back(entry);
5078 4966
5079 // Make sure the constant pool is not emitted in place of the next 4967 // Make sure the constant pool is not emitted in place of the next
5080 // instruction for which we just recorded relocation info. 4968 // instruction for which we just recorded relocation info.
5081 BlockConstPoolFor(1); 4969 BlockConstPoolFor(1);
5082 return ConstantPoolEntry::REGULAR; 4970 return ConstantPoolEntry::REGULAR;
5083 }
5084 } 4971 }
5085 4972
5086 4973
5087 void Assembler::BlockConstPoolFor(int instructions) { 4974 void Assembler::BlockConstPoolFor(int instructions) {
5088 if (FLAG_enable_embedded_constant_pool) {
5089 // Should be a no-op if using an embedded constant pool.
5090 DCHECK(pending_32_bit_constants_.empty());
5091 DCHECK(pending_64_bit_constants_.empty());
5092 return;
5093 }
5094
5095 int pc_limit = pc_offset() + instructions * kInstrSize; 4975 int pc_limit = pc_offset() + instructions * kInstrSize;
5096 if (no_const_pool_before_ < pc_limit) { 4976 if (no_const_pool_before_ < pc_limit) {
5097 // Max pool start (if we need a jump and an alignment). 4977 // Max pool start (if we need a jump and an alignment).
5098 #ifdef DEBUG 4978 #ifdef DEBUG
5099 int start = pc_limit + kInstrSize + 2 * kPointerSize; 4979 int start = pc_limit + kInstrSize + 2 * kPointerSize;
5100 DCHECK(pending_32_bit_constants_.empty() || 4980 DCHECK(pending_32_bit_constants_.empty() ||
5101 (start - first_const_pool_32_use_ + 4981 (start - first_const_pool_32_use_ +
5102 pending_64_bit_constants_.size() * kDoubleSize < 4982 pending_64_bit_constants_.size() * kDoubleSize <
5103 kMaxDistToIntPool)); 4983 kMaxDistToIntPool));
5104 DCHECK(pending_64_bit_constants_.empty() || 4984 DCHECK(pending_64_bit_constants_.empty() ||
5105 (start - first_const_pool_64_use_ < kMaxDistToFPPool)); 4985 (start - first_const_pool_64_use_ < kMaxDistToFPPool));
5106 #endif 4986 #endif
5107 no_const_pool_before_ = pc_limit; 4987 no_const_pool_before_ = pc_limit;
5108 } 4988 }
5109 4989
5110 if (next_buffer_check_ < no_const_pool_before_) { 4990 if (next_buffer_check_ < no_const_pool_before_) {
5111 next_buffer_check_ = no_const_pool_before_; 4991 next_buffer_check_ = no_const_pool_before_;
5112 } 4992 }
5113 } 4993 }
5114 4994
5115 4995
5116 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { 4996 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
5117 if (FLAG_enable_embedded_constant_pool) {
5118 // Should be a no-op if using an embedded constant pool.
5119 DCHECK(pending_32_bit_constants_.empty());
5120 DCHECK(pending_64_bit_constants_.empty());
5121 return;
5122 }
5123
5124 // Some short sequence of instruction mustn't be broken up by constant pool 4997 // Some short sequence of instruction mustn't be broken up by constant pool
5125 // emission, such sequences are protected by calls to BlockConstPoolFor and 4998 // emission, such sequences are protected by calls to BlockConstPoolFor and
5126 // BlockConstPoolScope. 4999 // BlockConstPoolScope.
5127 if (is_const_pool_blocked()) { 5000 if (is_const_pool_blocked()) {
5128 // Something is wrong if emission is forced and blocked at the same time. 5001 // Something is wrong if emission is forced and blocked at the same time.
5129 DCHECK(!force_emit); 5002 DCHECK(!force_emit);
5130 return; 5003 return;
5131 } 5004 }
5132 5005
5133 // There is nothing to do if there are no pending constant pool entries. 5006 // There is nothing to do if there are no pending constant pool entries.
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
5326 if (after_pool.is_linked()) { 5199 if (after_pool.is_linked()) {
5327 bind(&after_pool); 5200 bind(&after_pool);
5328 } 5201 }
5329 } 5202 }
5330 5203
5331 // Since a constant pool was just emitted, move the check offset forward by 5204 // Since a constant pool was just emitted, move the check offset forward by
5332 // the standard interval. 5205 // the standard interval.
5333 next_buffer_check_ = pc_offset() + kCheckPoolInterval; 5206 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
5334 } 5207 }
5335 5208
5336
5337 void Assembler::PatchConstantPoolAccessInstruction(
5338 int pc_offset, int offset, ConstantPoolEntry::Access access,
5339 ConstantPoolEntry::Type type) {
5340 DCHECK(FLAG_enable_embedded_constant_pool);
5341 Address pc = buffer_ + pc_offset;
5342
5343 // Patch vldr/ldr instruction with correct offset.
5344 Instr instr = instr_at(pc);
5345 if (access == ConstantPoolEntry::OVERFLOWED) {
5346 if (CpuFeatures::IsSupported(ARMv7)) {
5347 CpuFeatureScope scope(this, ARMv7);
5348 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
5349 Instr next_instr = instr_at(pc + kInstrSize);
5350 DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
5351 DCHECK((IsMovT(next_instr) &&
5352 Instruction::ImmedMovwMovtValue(next_instr) == 0));
5353 instr_at_put(pc, PatchMovwImmediate(instr, offset & 0xffff));
5354 instr_at_put(pc + kInstrSize,
5355 PatchMovwImmediate(next_instr, offset >> 16));
5356 } else {
5357 // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
5358 Instr instr_2 = instr_at(pc + kInstrSize);
5359 Instr instr_3 = instr_at(pc + 2 * kInstrSize);
5360 Instr instr_4 = instr_at(pc + 3 * kInstrSize);
5361 DCHECK((IsMovImmed(instr) && Instruction::Immed8Value(instr) == 0));
5362 DCHECK((IsOrrImmed(instr_2) && Instruction::Immed8Value(instr_2) == 0) &&
5363 GetRn(instr_2).is(GetRd(instr_2)));
5364 DCHECK((IsOrrImmed(instr_3) && Instruction::Immed8Value(instr_3) == 0) &&
5365 GetRn(instr_3).is(GetRd(instr_3)));
5366 DCHECK((IsOrrImmed(instr_4) && Instruction::Immed8Value(instr_4) == 0) &&
5367 GetRn(instr_4).is(GetRd(instr_4)));
5368 instr_at_put(pc, PatchShiftImm(instr, (offset & kImm8Mask)));
5369 instr_at_put(pc + kInstrSize,
5370 PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
5371 instr_at_put(pc + 2 * kInstrSize,
5372 PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
5373 instr_at_put(pc + 3 * kInstrSize,
5374 PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
5375 }
5376 } else if (type == ConstantPoolEntry::DOUBLE) {
5377 // Instruction to patch must be 'vldr rd, [pp, #0]'.
5378 DCHECK((IsVldrDPpImmediateOffset(instr) &&
5379 GetVldrDRegisterImmediateOffset(instr) == 0));
5380 DCHECK(is_uint10(offset));
5381 instr_at_put(pc, SetVldrDRegisterImmediateOffset(instr, offset));
5382 } else {
5383 // Instruction to patch must be 'ldr rd, [pp, #0]'.
5384 DCHECK((IsLdrPpImmediateOffset(instr) &&
5385 GetLdrRegisterImmediateOffset(instr) == 0));
5386 DCHECK(is_uint12(offset));
5387 instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset));
5388 }
5389 }
5390
5391 PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address, 5209 PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address,
5392 int instructions) 5210 int instructions)
5393 : Assembler(isolate_data, address, instructions * kInstrSize + kGap) { 5211 : Assembler(isolate_data, address, instructions * kInstrSize + kGap) {
5394 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_); 5212 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
5395 } 5213 }
5396 5214
5397 PatchingAssembler::~PatchingAssembler() { 5215 PatchingAssembler::~PatchingAssembler() {
5398 // Check that we don't have any pending constant pools. 5216 // Check that we don't have any pending constant pools.
5399 DCHECK(pending_32_bit_constants_.empty()); 5217 DCHECK(pending_32_bit_constants_.empty());
5400 DCHECK(pending_64_bit_constants_.empty()); 5218 DCHECK(pending_64_bit_constants_.empty());
5401 5219
5402 // Check that the code was patched as expected. 5220 // Check that the code was patched as expected.
5403 DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap); 5221 DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
5404 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_); 5222 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
5405 } 5223 }
5406 5224
5407 void PatchingAssembler::Emit(Address addr) { 5225 void PatchingAssembler::Emit(Address addr) {
5408 emit(reinterpret_cast<Instr>(addr)); 5226 emit(reinterpret_cast<Instr>(addr));
5409 } 5227 }
5410 5228
5411 void PatchingAssembler::FlushICache(Isolate* isolate) { 5229 void PatchingAssembler::FlushICache(Isolate* isolate) {
5412 Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap); 5230 Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
5413 } 5231 }
5414 5232
5415 } // namespace internal 5233 } // namespace internal
5416 } // namespace v8 5234 } // namespace v8
5417 5235
5418 #endif // V8_TARGET_ARCH_ARM 5236 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698