Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(457)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 356393003: [Arm]: Enable use of extended out-of-line constant pool for Arm. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix issue with inline-constant pool. Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
222 bool RelocInfo::IsCodedSpecially() { 222 bool RelocInfo::IsCodedSpecially() {
223 // The deserializer needs to know whether a pointer is specially coded.  Being 223 // The deserializer needs to know whether a pointer is specially coded.  Being
224 // specially coded on ARM means that it is a movw/movt instruction, or is an 224 // specially coded on ARM means that it is a movw/movt instruction, or is an
225 // out of line constant pool entry.  These only occur if 225 // out of line constant pool entry.  These only occur if
226 // FLAG_enable_ool_constant_pool is true. 226 // FLAG_enable_ool_constant_pool is true.
227 return FLAG_enable_ool_constant_pool; 227 return FLAG_enable_ool_constant_pool;
228 } 228 }
229 229
230 230
231 bool RelocInfo::IsInConstantPool() { 231 bool RelocInfo::IsInConstantPool() {
232 if (FLAG_enable_ool_constant_pool) { 232 return Assembler::is_constant_pool_load(pc_);
233 return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
234 } else {
235 return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
236 }
237 } 233 }
238 234
239 235
240 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { 236 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
241 // Patch the code at the current address with the supplied instructions. 237 // Patch the code at the current address with the supplied instructions.
242 Instr* pc = reinterpret_cast<Instr*>(pc_); 238 Instr* pc = reinterpret_cast<Instr*>(pc_);
243 Instr* instr = reinterpret_cast<Instr*>(instructions); 239 Instr* instr = reinterpret_cast<Instr*>(instructions);
244 for (int i = 0; i < instruction_count; i++) { 240 for (int i = 0; i < instruction_count; i++) {
245 *(pc + i) = *(instr + i); 241 *(pc + i) = *(instr + i);
246 } 242 }
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
407 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) 403 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
408 // register r is not encoded. 404 // register r is not encoded.
409 const Instr kPopRegPattern = 405 const Instr kPopRegPattern =
410 al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16; 406 al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
411 // ldr rd, [pc, #offset] 407 // ldr rd, [pc, #offset]
412 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16; 408 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
413 const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16; 409 const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
414 // ldr rd, [pp, #offset] 410 // ldr rd, [pp, #offset]
415 const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16; 411 const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
416 const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16; 412 const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
413 // ldr rd, [pp, rn]
414 const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
415 const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
417 // vldr dd, [pc, #offset] 416 // vldr dd, [pc, #offset]
418 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; 417 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
419 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8; 418 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
420 // vldr dd, [pp, #offset] 419 // vldr dd, [pp, #offset]
421 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; 420 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
422 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8; 421 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
423 // blxcc rm 422 // blxcc rm
424 const Instr kBlxRegMask = 423 const Instr kBlxRegMask =
425 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; 424 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
426 const Instr kBlxRegPattern = 425 const Instr kBlxRegPattern =
427 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX; 426 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
428 const Instr kBlxIp = al | kBlxRegPattern | ip.code(); 427 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
429 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; 428 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
430 const Instr kMovMvnPattern = 0xd * B21; 429 const Instr kMovMvnPattern = 0xd * B21;
431 const Instr kMovMvnFlip = B22; 430 const Instr kMovMvnFlip = B22;
432 const Instr kMovLeaveCCMask = 0xdff * B16; 431 const Instr kMovLeaveCCMask = 0xdff * B16;
433 const Instr kMovLeaveCCPattern = 0x1a0 * B16; 432 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
433 const Instr kMovwPattern = 0x30 * B20;
434 const Instr kMovtPattern = 0x34 * B20;
434 const Instr kMovwLeaveCCFlip = 0x5 * B21; 435 const Instr kMovwLeaveCCFlip = 0x5 * B21;
435 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; 436 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
436 const Instr kCmpCmnPattern = 0x15 * B20; 437 const Instr kCmpCmnPattern = 0x15 * B20;
437 const Instr kCmpCmnFlip = B21; 438 const Instr kCmpCmnFlip = B21;
438 const Instr kAddSubFlip = 0x6 * B21; 439 const Instr kAddSubFlip = 0x6 * B21;
439 const Instr kAndBicFlip = 0xe * B21; 440 const Instr kAndBicFlip = 0xe * B21;
440 441
441 // A mask for the Rd register for push, pop, ldr, str instructions. 442 // A mask for the Rd register for push, pop, ldr, str instructions.
442 const Instr kLdrRegFpOffsetPattern = 443 const Instr kLdrRegFpOffsetPattern =
443 al | B26 | L | Offset | kRegister_fp_Code * B16; 444 al | B26 | L | Offset | kRegister_fp_Code * B16;
(...skipping 14 matching lines...) Expand all
458 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); 459 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
459 num_pending_32_bit_reloc_info_ = 0; 460 num_pending_32_bit_reloc_info_ = 0;
460 num_pending_64_bit_reloc_info_ = 0; 461 num_pending_64_bit_reloc_info_ = 0;
461 next_buffer_check_ = 0; 462 next_buffer_check_ = 0;
462 const_pool_blocked_nesting_ = 0; 463 const_pool_blocked_nesting_ = 0;
463 no_const_pool_before_ = 0; 464 no_const_pool_before_ = 0;
464 first_const_pool_32_use_ = -1; 465 first_const_pool_32_use_ = -1;
465 first_const_pool_64_use_ = -1; 466 first_const_pool_64_use_ = -1;
466 last_bound_pos_ = 0; 467 last_bound_pos_ = 0;
467 constant_pool_available_ = !FLAG_enable_ool_constant_pool; 468 constant_pool_available_ = !FLAG_enable_ool_constant_pool;
468 constant_pool_full_ = false;
469 ClearRecordedAstId(); 469 ClearRecordedAstId();
470 } 470 }
471 471
472 472
473 Assembler::~Assembler() { 473 Assembler::~Assembler() {
474 ASSERT(const_pool_blocked_nesting_ == 0); 474 ASSERT(const_pool_blocked_nesting_ == 0);
475 } 475 }
476 476
477 477
478 void Assembler::GetCode(CodeDesc* desc) { 478 void Assembler::GetCode(CodeDesc* desc) {
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
629 629
630 Instr Assembler::GetConsantPoolLoadPattern() { 630 Instr Assembler::GetConsantPoolLoadPattern() {
631 if (FLAG_enable_ool_constant_pool) { 631 if (FLAG_enable_ool_constant_pool) {
632 return kLdrPpImmedPattern; 632 return kLdrPpImmedPattern;
633 } else { 633 } else {
634 return kLdrPCImmedPattern; 634 return kLdrPCImmedPattern;
635 } 635 }
636 } 636 }
637 637
638 638
639 Instr Assembler::GetConsantPoolLoadMask() {
640 if (FLAG_enable_ool_constant_pool) {
641 return kLdrPpImmedMask;
642 } else {
643 return kLdrPCImmedMask;
644 }
645 }
646
647
639 bool Assembler::IsPush(Instr instr) { 648 bool Assembler::IsPush(Instr instr) {
640 return ((instr & ~kRdMask) == kPushRegPattern); 649 return ((instr & ~kRdMask) == kPushRegPattern);
641 } 650 }
642 651
643 652
644 bool Assembler::IsPop(Instr instr) { 653 bool Assembler::IsPop(Instr instr) {
645 return ((instr & ~kRdMask) == kPopRegPattern); 654 return ((instr & ~kRdMask) == kPopRegPattern);
646 } 655 }
647 656
648 657
(...skipping 24 matching lines...) Expand all
673 } 682 }
674 683
675 684
676 bool Assembler::IsLdrPpImmediateOffset(Instr instr) { 685 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
677 // Check the instruction is indeed a 686 // Check the instruction is indeed a
678 // ldr<cond> <Rd>, [pp +/- offset_12]. 687 // ldr<cond> <Rd>, [pp +/- offset_12].
679 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern; 688 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
680 } 689 }
681 690
682 691
692 bool Assembler::IsLdrPpRegOffset(Instr instr) {
693 // Check the instruction is indeed a
694 // ldr<cond> <Rd>, [pp, +/- <Rm>].
695 return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
696 }
697
698
699 Instr Assembler::GetLdrPpRegOffsetPattern() {
700 return kLdrPpRegPattern;
701 }
702
703
683 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) { 704 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
684 // Check the instruction is indeed a 705 // Check the instruction is indeed a
685 // vldr<cond> <Dd>, [pc +/- offset_10]. 706 // vldr<cond> <Dd>, [pc +/- offset_10].
686 return (instr & kVldrDPCMask) == kVldrDPCPattern; 707 return (instr & kVldrDPCMask) == kVldrDPCPattern;
687 } 708 }
688 709
689 710
690 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) { 711 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
691 // Check the instruction is indeed a 712 // Check the instruction is indeed a
692 // vldr<cond> <Dd>, [pp +/- offset_10]. 713 // vldr<cond> <Dd>, [pp +/- offset_10].
(...skipping 281 matching lines...) Expand 10 before | Expand all | Expand 10 after
974 // immediate fits, change the opcode. 995 // immediate fits, change the opcode.
975 if (instr != NULL) { 996 if (instr != NULL) {
976 if ((*instr & kMovMvnMask) == kMovMvnPattern) { 997 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
977 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { 998 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
978 *instr ^= kMovMvnFlip; 999 *instr ^= kMovMvnFlip;
979 return true; 1000 return true;
980 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { 1001 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
981 if (CpuFeatures::IsSupported(ARMv7)) { 1002 if (CpuFeatures::IsSupported(ARMv7)) {
982 if (imm32 < 0x10000) { 1003 if (imm32 < 0x10000) {
983 *instr ^= kMovwLeaveCCFlip; 1004 *instr ^= kMovwLeaveCCFlip;
984 *instr |= EncodeMovwImmediate(imm32); 1005 *instr |= Assembler::EncodeMovwImmediate(imm32);
985 *rotate_imm = *immed_8 = 0; // Not used for movw. 1006 *rotate_imm = *immed_8 = 0; // Not used for movw.
986 return true; 1007 return true;
987 } 1008 }
988 } 1009 }
989 } 1010 }
990 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { 1011 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
991 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { 1012 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
992 *instr ^= kCmpCmnFlip; 1013 *instr ^= kCmpCmnFlip;
993 return true; 1014 return true;
994 } 1015 }
(...skipping 28 matching lines...) Expand all
1023 return assembler->serializer_enabled(); 1044 return assembler->serializer_enabled();
1024 } else if (RelocInfo::IsNone(rmode_)) { 1045 } else if (RelocInfo::IsNone(rmode_)) {
1025 return false; 1046 return false;
1026 } 1047 }
1027 return true; 1048 return true;
1028 } 1049 }
1029 1050
1030 1051
1031 static bool use_mov_immediate_load(const Operand& x, 1052 static bool use_mov_immediate_load(const Operand& x,
1032 const Assembler* assembler) { 1053 const Assembler* assembler) {
1033 if (assembler != NULL && !assembler->can_use_constant_pool()) { 1054 if (assembler != NULL && !assembler->is_constant_pool_available()) {
1034 // If there is no constant pool available, we must use an mov immediate. 1055 // If there is no constant pool available, we must use an mov immediate.
1035 // TODO(rmcilroy): enable ARMv6 support. 1056 // TODO(rmcilroy): enable ARMv6 support.
1036 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1057 ASSERT(CpuFeatures::IsSupported(ARMv7));
1037 return true; 1058 return true;
1038 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && 1059 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
1039 (assembler == NULL || !assembler->predictable_code_size())) { 1060 (assembler == NULL || !assembler->predictable_code_size())) {
1040 // Prefer movw / movt to constant pool if it is more efficient on the CPU. 1061 // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1041 return true; 1062 return true;
1042 } else if (x.must_output_reloc_info(assembler)) { 1063 } else if (x.must_output_reloc_info(assembler)) {
1043 // Prefer constant pool if data is likely to be patched. 1064 // Prefer constant pool if data is likely to be patched.
1044 return false; 1065 return false;
1045 } else { 1066 } else {
1046 // Otherwise, use immediate load if movw / movt is available. 1067 // Otherwise, use immediate load if movw / movt is available.
1047 return CpuFeatures::IsSupported(ARMv7); 1068 return CpuFeatures::IsSupported(ARMv7);
1048 } 1069 }
1049 } 1070 }
1050 1071
1051 1072
1052 bool Operand::is_single_instruction(const Assembler* assembler, 1073 int Operand::instructions_required(const Assembler* assembler,
ulan 2014/07/01 13:29:47 It is possible to accidentally misuse this functio
rmcilroy 2014/07/02 16:35:54 Yes, this would theoretically be possible. I've ad
1053 Instr instr) const { 1074 Instr instr) const {
1054 if (rm_.is_valid()) return true; 1075 if (rm_.is_valid()) return 1;
1055 uint32_t dummy1, dummy2; 1076 uint32_t dummy1, dummy2;
1056 if (must_output_reloc_info(assembler) || 1077 if (must_output_reloc_info(assembler) ||
1057 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { 1078 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1058 // The immediate operand cannot be encoded as a shifter operand, or use of 1079 // The immediate operand cannot be encoded as a shifter operand, or use of
1059 // constant pool is required. For a mov instruction not setting the 1080 // constant pool is required. First account for the instructions required
1060 // condition code additional instruction conventions can be used. 1081 // for the constant pool or immediate load
1061 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set 1082 int instructions;
1062 return !use_mov_immediate_load(*this, assembler); 1083 if (use_mov_immediate_load(*this, assembler)) {
1084 instructions = 2; // A movw, movt immediate load.
1085 } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
1086 instructions = 3; // An extended constant pool load.
1063 } else { 1087 } else {
1064 // If this is not a mov or mvn instruction there will always an additional 1088 instructions = 1; // A small constant pool load.
1065 // instructions - either mov or ldr. The mov might actually be two
1066 // instructions mov or movw followed by movt so including the actual
1067 // instruction two or three instructions will be generated.
1068 return false;
1069 } 1089 }
1090
1091 if ((instr & ~kCondMask) != 13*B21) { // mov, S not set
1092 // For a mov or mvn instruction which doesn't set the condition
1093 // code, the constant pool or immediate load is enough, otherwise we need
1094 // to account for the actual instruction being requested.
1095 instructions += 1;
1096 }
1097 return instructions;
1070 } else { 1098 } else {
1071 // No use of constant pool and the immediate operand can be encoded as a 1099 // No use of constant pool and the immediate operand can be encoded as a
1072 // shifter operand. 1100 // shifter operand.
1073 return true; 1101 return 1;
1074 } 1102 }
1075 } 1103 }
1076 1104
1077 1105
1078 void Assembler::move_32_bit_immediate(Register rd, 1106 void Assembler::move_32_bit_immediate(Register rd,
1079 const Operand& x, 1107 const Operand& x,
1080 Condition cond) { 1108 Condition cond) {
1081 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); 1109 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
1082 if (x.must_output_reloc_info(this)) { 1110 if (x.must_output_reloc_info(this)) {
1083 RecordRelocInfo(rinfo); 1111 RecordRelocInfo(rinfo);
1084 } 1112 }
1085 1113
1086 if (use_mov_immediate_load(x, this)) { 1114 if (use_mov_immediate_load(x, this)) {
1087 Register target = rd.code() == pc.code() ? ip : rd; 1115 Register target = rd.code() == pc.code() ? ip : rd;
1088 // TODO(rmcilroy): add ARMv6 support for immediate loads. 1116 // TODO(rmcilroy): add ARMv6 support for immediate loads.
1089 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1117 ASSERT(CpuFeatures::IsSupported(ARMv7));
1090 if (!FLAG_enable_ool_constant_pool && 1118 if (!FLAG_enable_ool_constant_pool &&
1091 x.must_output_reloc_info(this)) { 1119 x.must_output_reloc_info(this)) {
1092 // Make sure the movw/movt doesn't get separated. 1120 // Make sure the movw/movt doesn't get separated.
1093 BlockConstPoolFor(2); 1121 BlockConstPoolFor(2);
1094 } 1122 }
1095 movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond); 1123 movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond);
1096 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond); 1124 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
1097 if (target.code() != rd.code()) { 1125 if (target.code() != rd.code()) {
1098 mov(rd, target, LeaveCC, cond); 1126 mov(rd, target, LeaveCC, cond);
1099 } 1127 }
1100 } else { 1128 } else {
1101 ASSERT(can_use_constant_pool()); 1129 ASSERT(is_constant_pool_available());
1102 ConstantPoolAddEntry(rinfo); 1130 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
1103 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); 1131 if (section == ConstantPoolArray::EXTENDED_SECTION) {
1132 ASSERT(FLAG_enable_ool_constant_pool);
1133 Register target = rd.code() == pc.code() ? ip : rd;
1134 // Emit instructions to load constant pool offset.
1135 movw(target, 0, cond);
1136 movt(target, 0, cond);
1137 // Load from constant pool at offset.
1138 ldr(rd, MemOperand(pp, target), cond);
1139 } else {
1140 ASSERT(section == ConstantPoolArray::SMALL_SECTION);
1141 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
1142 }
1104 } 1143 }
1105 } 1144 }
1106 1145
1107 1146
1108 void Assembler::addrmod1(Instr instr, 1147 void Assembler::addrmod1(Instr instr,
1109 Register rn, 1148 Register rn,
1110 Register rd, 1149 Register rd,
1111 const Operand& x) { 1150 const Operand& x) {
1112 CheckBuffer(); 1151 CheckBuffer();
1113 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0); 1152 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
(...skipping 1283 matching lines...) Expand 10 before | Expand all | Expand 10 after
2397 if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) { 2436 if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2398 // The double can be encoded in the instruction. 2437 // The double can be encoded in the instruction.
2399 // 2438 //
2400 // Dd = immediate 2439 // Dd = immediate
2401 // Instruction details available in ARM DDI 0406C.b, A8-936. 2440 // Instruction details available in ARM DDI 0406C.b, A8-936.
2402 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) | 2441 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2403 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0) 2442 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2404 int vd, d; 2443 int vd, d;
2405 dst.split_code(&vd, &d); 2444 dst.split_code(&vd, &d);
2406 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); 2445 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2407 } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) { 2446 } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
2408 // TODO(jfb) Temporarily turned off until we have constant blinding or 2447 // TODO(jfb) Temporarily turned off until we have constant blinding or
2409 // some equivalent mitigation: an attacker can otherwise control 2448 // some equivalent mitigation: an attacker can otherwise control
2410 // generated data which also happens to be executable, a Very Bad 2449 // generated data which also happens to be executable, a Very Bad
2411 // Thing indeed. 2450 // Thing indeed.
2412 // Blinding gets tricky because we don't have xor, we probably 2451 // Blinding gets tricky because we don't have xor, we probably
2413 // need to add/subtract without losing precision, which requires a 2452 // need to add/subtract without losing precision, which requires a
2414 // cookie value that Lithium is probably better positioned to 2453 // cookie value that Lithium is probably better positioned to
2415 // choose. 2454 // choose.
2416 // We could also add a few peepholes here like detecting 0.0 and 2455 // We could also add a few peepholes here like detecting 0.0 and
2417 // -0.0 and doing a vmov from the sequestered d14, forcing denorms 2456 // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2418 // to zero (we set flush-to-zero), and normalizing NaN values. 2457 // to zero (we set flush-to-zero), and normalizing NaN values.
2419 // We could also detect redundant values. 2458 // We could also detect redundant values.
2420 // The code could also randomize the order of values, though 2459 // The code could also randomize the order of values, though
2421 // that's tricky because vldr has a limited reach. Furthermore 2460 // that's tricky because vldr has a limited reach. Furthermore
2422 // it breaks load locality. 2461 // it breaks load locality.
2423 RelocInfo rinfo(pc_, imm); 2462 RelocInfo rinfo(pc_, imm);
2424 ConstantPoolAddEntry(rinfo); 2463 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
2425 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); 2464 if (section == ConstantPoolArray::EXTENDED_SECTION) {
2465 ASSERT(FLAG_enable_ool_constant_pool);
2466 // Emit instructions to load constant pool offset.
2467 movw(ip, 0);
2468 movt(ip, 0);
2469 // Load from constant pool at offset.
2470 vldr(dst, MemOperand(pp, ip));
2471 } else {
2472 ASSERT(section == ConstantPoolArray::SMALL_SECTION);
2473 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
2474 }
2426 } else { 2475 } else {
2427 // Synthesise the double from ARM immediates. 2476 // Synthesise the double from ARM immediates.
2428 uint32_t lo, hi; 2477 uint32_t lo, hi;
2429 DoubleAsTwoUInt32(imm, &lo, &hi); 2478 DoubleAsTwoUInt32(imm, &lo, &hi);
2430 2479
2431 if (scratch.is(no_reg)) { 2480 if (scratch.is(no_reg)) {
2432 if (dst.code() < 16) { 2481 if (dst.code() < 16) {
2433 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); 2482 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
2434 // Move the low part of the double into the lower of the corresponsing S 2483 // Move the low part of the double into the lower of the corresponsing S
2435 // registers of D register dst. 2484 // registers of D register dst.
(...skipping 594 matching lines...) Expand 10 before | Expand all | Expand 10 after
3030 // a type. 3079 // a type.
3031 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. 3080 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3032 emit(al | 13*B21 | type*B12 | type); 3081 emit(al | 13*B21 | type*B12 | type);
3033 } 3082 }
3034 3083
3035 3084
3036 bool Assembler::IsMovT(Instr instr) { 3085 bool Assembler::IsMovT(Instr instr) {
3037 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions 3086 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3038 ((kNumRegisters-1)*B12) | // mask out register 3087 ((kNumRegisters-1)*B12) | // mask out register
3039 EncodeMovwImmediate(0xFFFF)); // mask out immediate value 3088 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3040 return instr == 0x34*B20; 3089 return instr == kMovtPattern;
3041 } 3090 }
3042 3091
3043 3092
3044 bool Assembler::IsMovW(Instr instr) { 3093 bool Assembler::IsMovW(Instr instr) {
3045 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions 3094 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3046 ((kNumRegisters-1)*B12) | // mask out destination 3095 ((kNumRegisters-1)*B12) | // mask out destination
3047 EncodeMovwImmediate(0xFFFF)); // mask out immediate value 3096 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3048 return instr == 0x30*B20; 3097 return instr == kMovwPattern;
3049 } 3098 }
3050 3099
3051 3100
3101 Instr Assembler::GetMovTPattern() {
3102 return kMovtPattern;
3103 }
3104
3105
3106 Instr Assembler::GetMovWPattern() {
3107 return kMovwPattern;
3108 }
3109
3110
3111 Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
3112 ASSERT(immediate < 0x10000);
3113 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
3114 }
3115
3116
3117 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
3118 instruction &= ~EncodeMovwImmediate(0xffff);
3119 return instruction | EncodeMovwImmediate(immediate);
3120 }
3121
3122
3052 bool Assembler::IsNop(Instr instr, int type) { 3123 bool Assembler::IsNop(Instr instr, int type) {
3053 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. 3124 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3054 // Check for mov rx, rx where x = type. 3125 // Check for mov rx, rx where x = type.
3055 return instr == (al | 13*B21 | type*B12 | type); 3126 return instr == (al | 13*B21 | type*B12 | type);
3056 } 3127 }
3057 3128
3058 3129
3059 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { 3130 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3060 uint32_t dummy1; 3131 uint32_t dummy1;
3061 uint32_t dummy2; 3132 uint32_t dummy2;
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
3208 NULL); 3279 NULL);
3209 ClearRecordedAstId(); 3280 ClearRecordedAstId();
3210 reloc_info_writer.Write(&reloc_info_with_ast_id); 3281 reloc_info_writer.Write(&reloc_info_with_ast_id);
3211 } else { 3282 } else {
3212 reloc_info_writer.Write(&rinfo); 3283 reloc_info_writer.Write(&rinfo);
3213 } 3284 }
3214 } 3285 }
3215 } 3286 }
3216 3287
3217 3288
3218 void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) { 3289 ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
3290 const RelocInfo& rinfo) {
3219 if (FLAG_enable_ool_constant_pool) { 3291 if (FLAG_enable_ool_constant_pool) {
3220 constant_pool_builder_.AddEntry(this, rinfo); 3292 return constant_pool_builder_.AddEntry(this, rinfo);
3221 } else { 3293 } else {
3222 if (rinfo.rmode() == RelocInfo::NONE64) { 3294 if (rinfo.rmode() == RelocInfo::NONE64) {
3223 ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); 3295 ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3224 if (num_pending_64_bit_reloc_info_ == 0) { 3296 if (num_pending_64_bit_reloc_info_ == 0) {
3225 first_const_pool_64_use_ = pc_offset(); 3297 first_const_pool_64_use_ = pc_offset();
3226 } 3298 }
3227 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo; 3299 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3228 } else { 3300 } else {
3229 ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo); 3301 ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3230 if (num_pending_32_bit_reloc_info_ == 0) { 3302 if (num_pending_32_bit_reloc_info_ == 0) {
3231 first_const_pool_32_use_ = pc_offset(); 3303 first_const_pool_32_use_ = pc_offset();
3232 } 3304 }
3233 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo; 3305 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3234 } 3306 }
3235 // Make sure the constant pool is not emitted in place of the next 3307 // Make sure the constant pool is not emitted in place of the next
3236 // instruction for which we just recorded relocation info. 3308 // instruction for which we just recorded relocation info.
3237 BlockConstPoolFor(1); 3309 BlockConstPoolFor(1);
3310 return ConstantPoolArray::SMALL_SECTION;
3238 } 3311 }
3239 } 3312 }
3240 3313
3241 3314
3242 void Assembler::BlockConstPoolFor(int instructions) { 3315 void Assembler::BlockConstPoolFor(int instructions) {
3243 if (FLAG_enable_ool_constant_pool) { 3316 if (FLAG_enable_ool_constant_pool) {
3244 // Should be a no-op if using an out-of-line constant pool. 3317 // Should be a no-op if using an out-of-line constant pool.
3245 ASSERT(num_pending_32_bit_reloc_info_ == 0); 3318 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3246 ASSERT(num_pending_64_bit_reloc_info_ == 0); 3319 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3247 return; 3320 return;
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
3480 } 3553 }
3481 3554
3482 3555
3483 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { 3556 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3484 constant_pool_builder_.Populate(this, constant_pool); 3557 constant_pool_builder_.Populate(this, constant_pool);
3485 } 3558 }
3486 3559
3487 3560
3488 ConstantPoolBuilder::ConstantPoolBuilder() 3561 ConstantPoolBuilder::ConstantPoolBuilder()
3489 : entries_(), 3562 : entries_(),
3490 merged_indexes_(), 3563 current_section_(ConstantPoolArray::SMALL_SECTION) {}
3491 count_of_64bit_(0),
3492 count_of_code_ptr_(0),
3493 count_of_heap_ptr_(0),
3494 count_of_32bit_(0) { }
3495 3564
3496 3565
3497 bool ConstantPoolBuilder::IsEmpty() { 3566 bool ConstantPoolBuilder::IsEmpty() {
3498 return entries_.size() == 0; 3567 return entries_.size() == 0;
3499 } 3568 }
3500 3569
3501 3570
3502 bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) { 3571 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
3503 return rmode == RelocInfo::NONE64; 3572 RelocInfo::Mode rmode) {
3573 if (rmode == RelocInfo::NONE64) {
3574 return ConstantPoolArray::INT64;
3575 } else if (!RelocInfo::IsGCRelocMode(rmode)) {
3576 return ConstantPoolArray::INT32;
3577 } else if (RelocInfo::IsCodeTarget(rmode)) {
3578 return ConstantPoolArray::CODE_PTR;
3579 } else {
3580 ASSERT(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
3581 return ConstantPoolArray::HEAP_PTR;
3582 }
3504 } 3583 }
3505 3584
3506 3585
3507 bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) { 3586 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
3508 return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64; 3587 Assembler* assm, const RelocInfo& rinfo) {
3509 }
3510
3511
3512 bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
3513 return RelocInfo::IsCodeTarget(rmode);
3514 }
3515
3516
3517 bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
3518 return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
3519 }
3520
3521
3522 void ConstantPoolBuilder::AddEntry(Assembler* assm,
3523 const RelocInfo& rinfo) {
3524 RelocInfo::Mode rmode = rinfo.rmode(); 3588 RelocInfo::Mode rmode = rinfo.rmode();
3525 ASSERT(rmode != RelocInfo::COMMENT && 3589 ASSERT(rmode != RelocInfo::COMMENT &&
3526 rmode != RelocInfo::POSITION && 3590 rmode != RelocInfo::POSITION &&
3527 rmode != RelocInfo::STATEMENT_POSITION && 3591 rmode != RelocInfo::STATEMENT_POSITION &&
3528 rmode != RelocInfo::CONST_POOL); 3592 rmode != RelocInfo::CONST_POOL);
3529 3593
3530 3594
3531 // Try to merge entries which won't be patched. 3595 // Try to merge entries which won't be patched.
3532 int merged_index = -1; 3596 int merged_index = -1;
3597 ConstantPoolArray::LayoutSection entry_section = current_section_;
3533 if (RelocInfo::IsNone(rmode) || 3598 if (RelocInfo::IsNone(rmode) ||
3534 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { 3599 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
3535 size_t i; 3600 size_t i;
3536 std::vector<RelocInfo>::const_iterator it; 3601 std::vector<ConstantPoolEntry>::const_iterator it;
3537 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { 3602 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3538 if (RelocInfo::IsEqual(rinfo, *it)) { 3603 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
3604 // Merge with found entry.
3539 merged_index = i; 3605 merged_index = i;
3606 entry_section = entries_[i].section_;
3540 break; 3607 break;
3541 } 3608 }
3542 } 3609 }
3543 } 3610 }
3544 3611 ASSERT(entry_section <= current_section_);
3545 entries_.push_back(rinfo); 3612 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
3546 merged_indexes_.push_back(merged_index);
3547 3613
3548 if (merged_index == -1) { 3614 if (merged_index == -1) {
3549 // Not merged, so update the appropriate count. 3615 // Not merged, so update the appropriate count.
3550 if (Is64BitEntry(rmode)) { 3616 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
3551 count_of_64bit_++;
3552 } else if (Is32BitEntry(rmode)) {
3553 count_of_32bit_++;
3554 } else if (IsCodePtrEntry(rmode)) {
3555 count_of_code_ptr_++;
3556 } else {
3557 ASSERT(IsHeapPtrEntry(rmode));
3558 count_of_heap_ptr_++;
3559 }
3560 } 3617 }
3561 3618
3562 // Check if we still have room for another entry given Arm's ldr and vldr 3619 // Check if we still have room for another entry in the small section
3563 // immediate offset range. 3620 // given Arm's ldr and vldr immediate offset range.
3564 // TODO(rmcilroy): Avoid creating a new object here when we support 3621 if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
3565 // extended constant pools. 3622 !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
3566 ConstantPoolArray::NumberOfEntries total(count_of_64bit_, 3623 is_uint10(ConstantPoolArray::MaxInt64Offset(
3567 count_of_code_ptr_, 3624 small_entries()->count_of(ConstantPoolArray::INT64))))) {
3568 count_of_heap_ptr_, 3625 current_section_ = ConstantPoolArray::EXTENDED_SECTION;
3569 count_of_32bit_);
3570 ConstantPoolArray::NumberOfEntries int64_counts(count_of_64bit_, 0, 0, 0);
3571 if (!(is_uint12(ConstantPoolArray::SizeFor(total)) &&
3572 is_uint10(ConstantPoolArray::SizeFor(int64_counts)))) {
3573 assm->set_constant_pool_full();
3574 } 3626 }
3627 return entry_section;
3575 } 3628 }
3576 3629
3577 3630
3578 void ConstantPoolBuilder::Relocate(int pc_delta) { 3631 void ConstantPoolBuilder::Relocate(int pc_delta) {
3579 for (std::vector<RelocInfo>::iterator rinfo = entries_.begin(); 3632 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3580 rinfo != entries_.end(); rinfo++) { 3633 entry != entries_.end(); entry++) {
3581 ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN); 3634 ASSERT(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
3582 rinfo->set_pc(rinfo->pc() + pc_delta); 3635 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
3583 } 3636 }
3584 } 3637 }
3585 3638
3586 3639
3587 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { 3640 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
3588 if (IsEmpty()) { 3641 if (IsEmpty()) {
3589 return isolate->factory()->empty_constant_pool_array(); 3642 return isolate->factory()->empty_constant_pool_array();
3643 } else if (extended_entries()->is_empty()) {
3644 return isolate->factory()->NewConstantPoolArray(*small_entries());
3590 } else { 3645 } else {
3591 ConstantPoolArray::NumberOfEntries small(count_of_64bit_, 3646 ASSERT(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
3592 count_of_code_ptr_, 3647 return isolate->factory()->NewExtendedConstantPoolArray(
3593 count_of_heap_ptr_, 3648 *small_entries(), *extended_entries());
3594 count_of_32bit_);
3595 return isolate->factory()->NewConstantPoolArray(small);
3596 } 3649 }
3597 } 3650 }
3598 3651
3599 3652
3600 void ConstantPoolBuilder::Populate(Assembler* assm, 3653 void ConstantPoolBuilder::Populate(Assembler* assm,
3601 ConstantPoolArray* constant_pool) { 3654 ConstantPoolArray* constant_pool) {
3602 ASSERT(count_of_64bit_ == constant_pool->number_of_entries( 3655 ASSERT_EQ(extended_entries()->is_empty(),
3603 ConstantPoolArray::INT64, ConstantPoolArray::SMALL_SECTION)); 3656 !constant_pool->is_extended_layout());
3604 ASSERT(count_of_code_ptr_ == constant_pool->number_of_entries( 3657 ASSERT(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
3605 ConstantPoolArray::CODE_PTR, ConstantPoolArray::SMALL_SECTION)); 3658 constant_pool, ConstantPoolArray::SMALL_SECTION)));
3606 ASSERT(count_of_heap_ptr_ == constant_pool->number_of_entries( 3659 if (constant_pool->is_extended_layout()) {
3607 ConstantPoolArray::HEAP_PTR, ConstantPoolArray::SMALL_SECTION)); 3660 ASSERT(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
3608 ASSERT(count_of_32bit_ == constant_pool->number_of_entries( 3661 constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
3609 ConstantPoolArray::INT32, ConstantPoolArray::SMALL_SECTION)); 3662 }
3610 ASSERT(entries_.size() == merged_indexes_.size());
3611 3663
3612 int index_64bit = 0; 3664 ConstantPoolArray::NumberOfEntries small_idx;
3613 int index_code_ptr = count_of_64bit_; 3665 ConstantPoolArray::NumberOfEntries extended_idx;
3614 int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_; 3666 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3615 int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_; 3667 entry != entries_.end(); entry++) {
3616 3668 RelocInfo rinfo = entry->rinfo_;
3617 size_t i; 3669 RelocInfo::Mode rmode = entry->rinfo_.rmode();
3618 std::vector<RelocInfo>::const_iterator rinfo; 3670 ConstantPoolArray::Type type = GetConstantPoolType(rmode);
3619 for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
3620 RelocInfo::Mode rmode = rinfo->rmode();
3621 3671
3622 // Update constant pool if necessary and get the entry's offset. 3672 // Update constant pool if necessary and get the entry's offset.
3623 int offset; 3673 int offset;
3624 if (merged_indexes_[i] == -1) { 3674 if (entry->merged_index_ == -1) {
3625 if (Is64BitEntry(rmode)) { 3675 int index;
3626 offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag; 3676 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
3627 constant_pool->set(index_64bit++, rinfo->data64()); 3677 index = small_entries()->total_count() +
3628 } else if (Is32BitEntry(rmode)) { 3678 extended_entries()->base_of(type) + extended_idx.count_of(type);
3629 offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag; 3679 extended_idx.increment(type);
3630 constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
3631 } else if (IsCodePtrEntry(rmode)) {
3632 offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
3633 kHeapObjectTag;
3634 constant_pool->set(index_code_ptr++,
3635 reinterpret_cast<Address>(rinfo->data()));
3636 } else { 3680 } else {
3637 ASSERT(IsHeapPtrEntry(rmode)); 3681 ASSERT(entry->section_ == ConstantPoolArray::SMALL_SECTION);
3638 offset = constant_pool->OffsetOfElementAt(index_heap_ptr) - 3682 index = small_entries()->base_of(type) + small_idx.count_of(type);
3639 kHeapObjectTag; 3683 small_idx.increment(type);
3640 constant_pool->set(index_heap_ptr++,
3641 reinterpret_cast<Object *>(rinfo->data()));
3642 } 3684 }
3643 merged_indexes_[i] = offset; // Stash offset for merged entries. 3685 if (type == ConstantPoolArray::INT64) {
3686 constant_pool->set(index, rinfo.data64());
3687 } else if (type == ConstantPoolArray::INT32) {
3688 constant_pool->set(index, static_cast<int32_t>(rinfo.data()));
3689 } else if (type == ConstantPoolArray::CODE_PTR) {
3690 constant_pool->set(index, reinterpret_cast<Address>(rinfo.data()));
3691 } else {
3692 ASSERT(type == ConstantPoolArray::HEAP_PTR);
3693 constant_pool->set(index, reinterpret_cast<Object *>(rinfo.data()));
3694 }
3695 offset = constant_pool->OffsetOfElementAt(index) - kHeapObjectTag;
3696 entry->merged_index_ = offset; // Stash offset for merged entries.
3644 } else { 3697 } else {
3645 size_t merged_index = static_cast<size_t>(merged_indexes_[i]); 3698 ASSERT(entry->merged_index_ < (entry - entries_.begin()));
3646 ASSERT(merged_index < merged_indexes_.size() && merged_index < i); 3699 offset = entries_[entry->merged_index_].merged_index_;
3647 offset = merged_indexes_[merged_index];
3648 } 3700 }
3649 3701
3650 // Patch vldr/ldr instruction with correct offset. 3702 // Patch vldr/ldr instruction with correct offset.
3651 Instr instr = assm->instr_at(rinfo->pc()); 3703 Instr instr = assm->instr_at(rinfo.pc());
3652 if (Is64BitEntry(rmode)) { 3704 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
3705 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
3706 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3707 ASSERT((Assembler::IsMovW(instr) &&
3708 Instruction::ImmedMovwMovtValue(instr) == 0));
3709 ASSERT((Assembler::IsMovT(next_instr) &&
3710 Instruction::ImmedMovwMovtValue(instr) == 0));
ulan 2014/07/01 13:29:47 Instruction::ImmedMovwMovtValue(next_instr) == 0
rmcilroy 2014/07/02 16:35:54 Good catch, thanks!
3711 assm->instr_at_put(rinfo.pc(),
3712 Assembler::PatchMovwImmediate(instr, offset & 0xffff));
3713 assm->instr_at_put(rinfo.pc() + Assembler::kInstrSize,
3714 Assembler::PatchMovwImmediate(next_instr, offset >> 16));
3715 } else if (type == ConstantPoolArray::INT64) {
3653 // Instruction to patch must be 'vldr rd, [pp, #0]'. 3716 // Instruction to patch must be 'vldr rd, [pp, #0]'.
3654 ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) && 3717 ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
3655 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); 3718 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
3656 ASSERT(is_uint10(offset)); 3719 ASSERT(is_uint10(offset));
3657 assm->instr_at_put(rinfo->pc(), 3720 assm->instr_at_put(rinfo.pc(),
3658 Assembler::SetVldrDRegisterImmediateOffset(instr, offset)); 3721 Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
3659 } else { 3722 } else {
3660 // Instruction to patch must be 'ldr rd, [pp, #0]'. 3723 // Instruction to patch must be 'ldr rd, [pp, #0]'.
3661 ASSERT((Assembler::IsLdrPpImmediateOffset(instr) && 3724 ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
3662 Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); 3725 Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
3663 ASSERT(is_uint12(offset)); 3726 ASSERT(is_uint12(offset));
3664 assm->instr_at_put(rinfo->pc(), 3727 assm->instr_at_put(rinfo.pc(),
3665 Assembler::SetLdrRegisterImmediateOffset(instr, offset)); 3728 Assembler::SetLdrRegisterImmediateOffset(instr, offset));
3666 } 3729 }
3667 } 3730 }
3668 3731
3669 ASSERT((index_64bit == count_of_64bit_) && 3732 ASSERT(small_idx.equals(*small_entries()));
3670 (index_code_ptr == (index_64bit + count_of_code_ptr_)) && 3733 ASSERT(extended_idx.equals(*extended_entries()));
3671 (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
3672 (index_32bit == (index_heap_ptr + count_of_32bit_)));
3673 } 3734 }
3674 3735
3675 3736
3676 } } // namespace v8::internal 3737 } } // namespace v8::internal
3677 3738
3678 #endif // V8_TARGET_ARCH_ARM 3739 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698