Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(210)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 356393003: [Arm]: Enable use of extended out-of-line constant pool for Arm. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Formatted with git cl format Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
224 bool RelocInfo::IsCodedSpecially() { 224 bool RelocInfo::IsCodedSpecially() {
225 // The deserializer needs to know whether a pointer is specially coded.  Being 225 // The deserializer needs to know whether a pointer is specially coded.  Being
226 // specially coded on ARM means that it is a movw/movt instruction, or is an 226 // specially coded on ARM means that it is a movw/movt instruction, or is an
227 // out of line constant pool entry.  These only occur if 227 // out of line constant pool entry.  These only occur if
228 // FLAG_enable_ool_constant_pool is true. 228 // FLAG_enable_ool_constant_pool is true.
229 return FLAG_enable_ool_constant_pool; 229 return FLAG_enable_ool_constant_pool;
230 } 230 }
231 231
232 232
233 bool RelocInfo::IsInConstantPool() { 233 bool RelocInfo::IsInConstantPool() {
234 if (FLAG_enable_ool_constant_pool) { 234 return Assembler::is_constant_pool_load(pc_);
235 return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
236 } else {
237 return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
238 }
239 } 235 }
240 236
241 237
242 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { 238 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
243 // Patch the code at the current address with the supplied instructions. 239 // Patch the code at the current address with the supplied instructions.
244 Instr* pc = reinterpret_cast<Instr*>(pc_); 240 Instr* pc = reinterpret_cast<Instr*>(pc_);
245 Instr* instr = reinterpret_cast<Instr*>(instructions); 241 Instr* instr = reinterpret_cast<Instr*>(instructions);
246 for (int i = 0; i < instruction_count; i++) { 242 for (int i = 0; i < instruction_count; i++) {
247 *(pc + i) = *(instr + i); 243 *(pc + i) = *(instr + i);
248 } 244 }
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
409 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) 405 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
410 // register r is not encoded. 406 // register r is not encoded.
411 const Instr kPopRegPattern = 407 const Instr kPopRegPattern =
412 al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16; 408 al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
413 // ldr rd, [pc, #offset] 409 // ldr rd, [pc, #offset]
414 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16; 410 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
415 const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16; 411 const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
416 // ldr rd, [pp, #offset] 412 // ldr rd, [pp, #offset]
417 const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16; 413 const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
418 const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16; 414 const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
415 // ldr rd, [pp, rn]
416 const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
417 const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
419 // vldr dd, [pc, #offset] 418 // vldr dd, [pc, #offset]
420 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; 419 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
421 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8; 420 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
422 // vldr dd, [pp, #offset] 421 // vldr dd, [pp, #offset]
423 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; 422 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
424 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8; 423 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
425 // blxcc rm 424 // blxcc rm
426 const Instr kBlxRegMask = 425 const Instr kBlxRegMask =
427 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; 426 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
428 const Instr kBlxRegPattern = 427 const Instr kBlxRegPattern =
429 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX; 428 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
430 const Instr kBlxIp = al | kBlxRegPattern | ip.code(); 429 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
431 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; 430 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
432 const Instr kMovMvnPattern = 0xd * B21; 431 const Instr kMovMvnPattern = 0xd * B21;
433 const Instr kMovMvnFlip = B22; 432 const Instr kMovMvnFlip = B22;
434 const Instr kMovLeaveCCMask = 0xdff * B16; 433 const Instr kMovLeaveCCMask = 0xdff * B16;
435 const Instr kMovLeaveCCPattern = 0x1a0 * B16; 434 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
435 const Instr kMovwPattern = 0x30 * B20;
436 const Instr kMovtPattern = 0x34 * B20;
436 const Instr kMovwLeaveCCFlip = 0x5 * B21; 437 const Instr kMovwLeaveCCFlip = 0x5 * B21;
437 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; 438 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
438 const Instr kCmpCmnPattern = 0x15 * B20; 439 const Instr kCmpCmnPattern = 0x15 * B20;
439 const Instr kCmpCmnFlip = B21; 440 const Instr kCmpCmnFlip = B21;
440 const Instr kAddSubFlip = 0x6 * B21; 441 const Instr kAddSubFlip = 0x6 * B21;
441 const Instr kAndBicFlip = 0xe * B21; 442 const Instr kAndBicFlip = 0xe * B21;
442 443
443 // A mask for the Rd register for push, pop, ldr, str instructions. 444 // A mask for the Rd register for push, pop, ldr, str instructions.
444 const Instr kLdrRegFpOffsetPattern = 445 const Instr kLdrRegFpOffsetPattern =
445 al | B26 | L | Offset | kRegister_fp_Code * B16; 446 al | B26 | L | Offset | kRegister_fp_Code * B16;
(...skipping 14 matching lines...) Expand all
460 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); 461 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
461 num_pending_32_bit_reloc_info_ = 0; 462 num_pending_32_bit_reloc_info_ = 0;
462 num_pending_64_bit_reloc_info_ = 0; 463 num_pending_64_bit_reloc_info_ = 0;
463 next_buffer_check_ = 0; 464 next_buffer_check_ = 0;
464 const_pool_blocked_nesting_ = 0; 465 const_pool_blocked_nesting_ = 0;
465 no_const_pool_before_ = 0; 466 no_const_pool_before_ = 0;
466 first_const_pool_32_use_ = -1; 467 first_const_pool_32_use_ = -1;
467 first_const_pool_64_use_ = -1; 468 first_const_pool_64_use_ = -1;
468 last_bound_pos_ = 0; 469 last_bound_pos_ = 0;
469 constant_pool_available_ = !FLAG_enable_ool_constant_pool; 470 constant_pool_available_ = !FLAG_enable_ool_constant_pool;
470 constant_pool_full_ = false;
471 ClearRecordedAstId(); 471 ClearRecordedAstId();
472 } 472 }
473 473
474 474
475 Assembler::~Assembler() { 475 Assembler::~Assembler() {
476 ASSERT(const_pool_blocked_nesting_ == 0); 476 ASSERT(const_pool_blocked_nesting_ == 0);
477 } 477 }
478 478
479 479
480 void Assembler::GetCode(CodeDesc* desc) { 480 void Assembler::GetCode(CodeDesc* desc) {
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
631 631
632 Instr Assembler::GetConsantPoolLoadPattern() { 632 Instr Assembler::GetConsantPoolLoadPattern() {
633 if (FLAG_enable_ool_constant_pool) { 633 if (FLAG_enable_ool_constant_pool) {
634 return kLdrPpImmedPattern; 634 return kLdrPpImmedPattern;
635 } else { 635 } else {
636 return kLdrPCImmedPattern; 636 return kLdrPCImmedPattern;
637 } 637 }
638 } 638 }
639 639
640 640
641 Instr Assembler::GetConsantPoolLoadMask() {
642 if (FLAG_enable_ool_constant_pool) {
643 return kLdrPpImmedMask;
644 } else {
645 return kLdrPCImmedMask;
646 }
647 }
648
649
641 bool Assembler::IsPush(Instr instr) { 650 bool Assembler::IsPush(Instr instr) {
642 return ((instr & ~kRdMask) == kPushRegPattern); 651 return ((instr & ~kRdMask) == kPushRegPattern);
643 } 652 }
644 653
645 654
646 bool Assembler::IsPop(Instr instr) { 655 bool Assembler::IsPop(Instr instr) {
647 return ((instr & ~kRdMask) == kPopRegPattern); 656 return ((instr & ~kRdMask) == kPopRegPattern);
648 } 657 }
649 658
650 659
(...skipping 24 matching lines...) Expand all
675 } 684 }
676 685
677 686
678 bool Assembler::IsLdrPpImmediateOffset(Instr instr) { 687 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
679 // Check the instruction is indeed a 688 // Check the instruction is indeed a
680 // ldr<cond> <Rd>, [pp +/- offset_12]. 689 // ldr<cond> <Rd>, [pp +/- offset_12].
681 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern; 690 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
682 } 691 }
683 692
684 693
694 bool Assembler::IsLdrPpRegOffset(Instr instr) {
695 // Check the instruction is indeed a
696 // ldr<cond> <Rd>, [pp, +/- <Rm>].
697 return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
698 }
699
700
701 Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
702
703
685 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) { 704 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
686 // Check the instruction is indeed a 705 // Check the instruction is indeed a
687 // vldr<cond> <Dd>, [pc +/- offset_10]. 706 // vldr<cond> <Dd>, [pc +/- offset_10].
688 return (instr & kVldrDPCMask) == kVldrDPCPattern; 707 return (instr & kVldrDPCMask) == kVldrDPCPattern;
689 } 708 }
690 709
691 710
692 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) { 711 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
693 // Check the instruction is indeed a 712 // Check the instruction is indeed a
694 // vldr<cond> <Dd>, [pp +/- offset_10]. 713 // vldr<cond> <Dd>, [pp +/- offset_10].
(...skipping 281 matching lines...) Expand 10 before | Expand all | Expand 10 after
976 // immediate fits, change the opcode. 995 // immediate fits, change the opcode.
977 if (instr != NULL) { 996 if (instr != NULL) {
978 if ((*instr & kMovMvnMask) == kMovMvnPattern) { 997 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
979 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { 998 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
980 *instr ^= kMovMvnFlip; 999 *instr ^= kMovMvnFlip;
981 return true; 1000 return true;
982 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { 1001 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
983 if (CpuFeatures::IsSupported(ARMv7)) { 1002 if (CpuFeatures::IsSupported(ARMv7)) {
984 if (imm32 < 0x10000) { 1003 if (imm32 < 0x10000) {
985 *instr ^= kMovwLeaveCCFlip; 1004 *instr ^= kMovwLeaveCCFlip;
986 *instr |= EncodeMovwImmediate(imm32); 1005 *instr |= Assembler::EncodeMovwImmediate(imm32);
987 *rotate_imm = *immed_8 = 0; // Not used for movw. 1006 *rotate_imm = *immed_8 = 0; // Not used for movw.
988 return true; 1007 return true;
989 } 1008 }
990 } 1009 }
991 } 1010 }
992 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { 1011 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
993 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { 1012 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
994 *instr ^= kCmpCmnFlip; 1013 *instr ^= kCmpCmnFlip;
995 return true; 1014 return true;
996 } 1015 }
(...skipping 28 matching lines...) Expand all
1025 return assembler->serializer_enabled(); 1044 return assembler->serializer_enabled();
1026 } else if (RelocInfo::IsNone(rmode_)) { 1045 } else if (RelocInfo::IsNone(rmode_)) {
1027 return false; 1046 return false;
1028 } 1047 }
1029 return true; 1048 return true;
1030 } 1049 }
1031 1050
1032 1051
1033 static bool use_mov_immediate_load(const Operand& x, 1052 static bool use_mov_immediate_load(const Operand& x,
1034 const Assembler* assembler) { 1053 const Assembler* assembler) {
1035 if (assembler != NULL && !assembler->can_use_constant_pool()) { 1054 if (assembler != NULL && !assembler->is_constant_pool_available()) {
1036 // If there is no constant pool available, we must use an mov immediate. 1055 // If there is no constant pool available, we must use an mov immediate.
1037 // TODO(rmcilroy): enable ARMv6 support. 1056 // TODO(rmcilroy): enable ARMv6 support.
1038 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1057 ASSERT(CpuFeatures::IsSupported(ARMv7));
1039 return true; 1058 return true;
1040 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && 1059 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
1041 (assembler == NULL || !assembler->predictable_code_size())) { 1060 (assembler == NULL || !assembler->predictable_code_size())) {
1042 // Prefer movw / movt to constant pool if it is more efficient on the CPU. 1061 // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1043 return true; 1062 return true;
1044 } else if (x.must_output_reloc_info(assembler)) { 1063 } else if (x.must_output_reloc_info(assembler)) {
1045 // Prefer constant pool if data is likely to be patched. 1064 // Prefer constant pool if data is likely to be patched.
1046 return false; 1065 return false;
1047 } else { 1066 } else {
1048 // Otherwise, use immediate load if movw / movt is available. 1067 // Otherwise, use immediate load if movw / movt is available.
1049 return CpuFeatures::IsSupported(ARMv7); 1068 return CpuFeatures::IsSupported(ARMv7);
1050 } 1069 }
1051 } 1070 }
1052 1071
1053 1072
1054 bool Operand::is_single_instruction(const Assembler* assembler, 1073 int Operand::instructions_required(const Assembler* assembler,
1055 Instr instr) const { 1074 Instr instr) const {
1056 if (rm_.is_valid()) return true; 1075 if (rm_.is_valid()) return 1;
1057 uint32_t dummy1, dummy2; 1076 uint32_t dummy1, dummy2;
1058 if (must_output_reloc_info(assembler) || 1077 if (must_output_reloc_info(assembler) ||
1059 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { 1078 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1060 // The immediate operand cannot be encoded as a shifter operand, or use of 1079 // The immediate operand cannot be encoded as a shifter operand, or use of
1061 // constant pool is required. For a mov instruction not setting the 1080 // constant pool is required. First account for the instructions required
1062 // condition code additional instruction conventions can be used. 1081 // for the constant pool or immediate load
1063 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set 1082 int instructions;
1064 return !use_mov_immediate_load(*this, assembler); 1083 if (use_mov_immediate_load(*this, assembler)) {
1084 instructions = 2; // A movw, movt immediate load.
1085 } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
1086 instructions = 3; // An extended constant pool load.
1065 } else { 1087 } else {
1066 // If this is not a mov or mvn instruction there will always an additional 1088 instructions = 1; // A small constant pool load.
1067 // instructions - either mov or ldr. The mov might actually be two
1068 // instructions mov or movw followed by movt so including the actual
1069 // instruction two or three instructions will be generated.
1070 return false;
1071 } 1089 }
1090
1091 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
1092 // For a mov or mvn instruction which doesn't set the condition
1093 // code, the constant pool or immediate load is enough, otherwise we need
1094 // to account for the actual instruction being requested.
1095 instructions += 1;
1096 }
1097 return instructions;
1072 } else { 1098 } else {
1073 // No use of constant pool and the immediate operand can be encoded as a 1099 // No use of constant pool and the immediate operand can be encoded as a
1074 // shifter operand. 1100 // shifter operand.
1075 return true; 1101 return 1;
1076 } 1102 }
1077 } 1103 }
1078 1104
1079 1105
1080 void Assembler::move_32_bit_immediate(Register rd, 1106 void Assembler::move_32_bit_immediate(Register rd,
1081 const Operand& x, 1107 const Operand& x,
1082 Condition cond) { 1108 Condition cond) {
1083 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); 1109 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
1084 if (x.must_output_reloc_info(this)) { 1110 if (x.must_output_reloc_info(this)) {
1085 RecordRelocInfo(rinfo); 1111 RecordRelocInfo(rinfo);
1086 } 1112 }
1087 1113
1088 if (use_mov_immediate_load(x, this)) { 1114 if (use_mov_immediate_load(x, this)) {
1089 Register target = rd.code() == pc.code() ? ip : rd; 1115 Register target = rd.code() == pc.code() ? ip : rd;
1090 // TODO(rmcilroy): add ARMv6 support for immediate loads. 1116 // TODO(rmcilroy): add ARMv6 support for immediate loads.
1091 ASSERT(CpuFeatures::IsSupported(ARMv7)); 1117 ASSERT(CpuFeatures::IsSupported(ARMv7));
1092 if (!FLAG_enable_ool_constant_pool && 1118 if (!FLAG_enable_ool_constant_pool &&
1093 x.must_output_reloc_info(this)) { 1119 x.must_output_reloc_info(this)) {
1094 // Make sure the movw/movt doesn't get separated. 1120 // Make sure the movw/movt doesn't get separated.
1095 BlockConstPoolFor(2); 1121 BlockConstPoolFor(2);
1096 } 1122 }
1097 movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond); 1123 movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond);
1098 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond); 1124 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
1099 if (target.code() != rd.code()) { 1125 if (target.code() != rd.code()) {
1100 mov(rd, target, LeaveCC, cond); 1126 mov(rd, target, LeaveCC, cond);
1101 } 1127 }
1102 } else { 1128 } else {
1103 ASSERT(can_use_constant_pool()); 1129 ASSERT(is_constant_pool_available());
1104 ConstantPoolAddEntry(rinfo); 1130 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
1105 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); 1131 if (section == ConstantPoolArray::EXTENDED_SECTION) {
1132 ASSERT(FLAG_enable_ool_constant_pool);
1133 Register target = rd.code() == pc.code() ? ip : rd;
1134 // Emit instructions to load constant pool offset.
1135 movw(target, 0, cond);
1136 movt(target, 0, cond);
1137 // Load from constant pool at offset.
1138 ldr(rd, MemOperand(pp, target), cond);
1139 } else {
1140 ASSERT(section == ConstantPoolArray::SMALL_SECTION);
1141 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
1142 }
1106 } 1143 }
1107 } 1144 }
1108 1145
1109 1146
1110 void Assembler::addrmod1(Instr instr, 1147 void Assembler::addrmod1(Instr instr,
1111 Register rn, 1148 Register rn,
1112 Register rd, 1149 Register rd,
1113 const Operand& x) { 1150 const Operand& x) {
1114 CheckBuffer(); 1151 CheckBuffer();
1115 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0); 1152 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
(...skipping 1283 matching lines...) Expand 10 before | Expand all | Expand 10 after
2399 if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) { 2436 if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2400 // The double can be encoded in the instruction. 2437 // The double can be encoded in the instruction.
2401 // 2438 //
2402 // Dd = immediate 2439 // Dd = immediate
2403 // Instruction details available in ARM DDI 0406C.b, A8-936. 2440 // Instruction details available in ARM DDI 0406C.b, A8-936.
2404 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) | 2441 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2405 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0) 2442 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2406 int vd, d; 2443 int vd, d;
2407 dst.split_code(&vd, &d); 2444 dst.split_code(&vd, &d);
2408 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); 2445 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2409 } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) { 2446 } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
2410 // TODO(jfb) Temporarily turned off until we have constant blinding or 2447 // TODO(jfb) Temporarily turned off until we have constant blinding or
2411 // some equivalent mitigation: an attacker can otherwise control 2448 // some equivalent mitigation: an attacker can otherwise control
2412 // generated data which also happens to be executable, a Very Bad 2449 // generated data which also happens to be executable, a Very Bad
2413 // Thing indeed. 2450 // Thing indeed.
2414 // Blinding gets tricky because we don't have xor, we probably 2451 // Blinding gets tricky because we don't have xor, we probably
2415 // need to add/subtract without losing precision, which requires a 2452 // need to add/subtract without losing precision, which requires a
2416 // cookie value that Lithium is probably better positioned to 2453 // cookie value that Lithium is probably better positioned to
2417 // choose. 2454 // choose.
2418 // We could also add a few peepholes here like detecting 0.0 and 2455 // We could also add a few peepholes here like detecting 0.0 and
2419 // -0.0 and doing a vmov from the sequestered d14, forcing denorms 2456 // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2420 // to zero (we set flush-to-zero), and normalizing NaN values. 2457 // to zero (we set flush-to-zero), and normalizing NaN values.
2421 // We could also detect redundant values. 2458 // We could also detect redundant values.
2422 // The code could also randomize the order of values, though 2459 // The code could also randomize the order of values, though
2423 // that's tricky because vldr has a limited reach. Furthermore 2460 // that's tricky because vldr has a limited reach. Furthermore
2424 // it breaks load locality. 2461 // it breaks load locality.
2425 RelocInfo rinfo(pc_, imm); 2462 RelocInfo rinfo(pc_, imm);
2426 ConstantPoolAddEntry(rinfo); 2463 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
2427 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); 2464 if (section == ConstantPoolArray::EXTENDED_SECTION) {
2465 ASSERT(FLAG_enable_ool_constant_pool);
2466 // Emit instructions to load constant pool offset.
2467 movw(ip, 0);
2468 movt(ip, 0);
2469 // Load from constant pool at offset.
2470 vldr(dst, MemOperand(pp, ip));
2471 } else {
2472 ASSERT(section == ConstantPoolArray::SMALL_SECTION);
2473 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
2474 }
2428 } else { 2475 } else {
2429 // Synthesise the double from ARM immediates. 2476 // Synthesise the double from ARM immediates.
2430 uint32_t lo, hi; 2477 uint32_t lo, hi;
2431 DoubleAsTwoUInt32(imm, &lo, &hi); 2478 DoubleAsTwoUInt32(imm, &lo, &hi);
2432 2479
2433 if (scratch.is(no_reg)) { 2480 if (scratch.is(no_reg)) {
2434 if (dst.code() < 16) { 2481 if (dst.code() < 16) {
2435 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); 2482 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
2436 // Move the low part of the double into the lower of the corresponsing S 2483 // Move the low part of the double into the lower of the corresponsing S
2437 // registers of D register dst. 2484 // registers of D register dst.
(...skipping 594 matching lines...) Expand 10 before | Expand all | Expand 10 after
3032 // a type. 3079 // a type.
3033 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. 3080 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3034 emit(al | 13*B21 | type*B12 | type); 3081 emit(al | 13*B21 | type*B12 | type);
3035 } 3082 }
3036 3083
3037 3084
3038 bool Assembler::IsMovT(Instr instr) { 3085 bool Assembler::IsMovT(Instr instr) {
3039 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions 3086 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3040 ((kNumRegisters-1)*B12) | // mask out register 3087 ((kNumRegisters-1)*B12) | // mask out register
3041 EncodeMovwImmediate(0xFFFF)); // mask out immediate value 3088 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3042 return instr == 0x34*B20; 3089 return instr == kMovtPattern;
3043 } 3090 }
3044 3091
3045 3092
3046 bool Assembler::IsMovW(Instr instr) { 3093 bool Assembler::IsMovW(Instr instr) {
3047 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions 3094 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3048 ((kNumRegisters-1)*B12) | // mask out destination 3095 ((kNumRegisters-1)*B12) | // mask out destination
3049 EncodeMovwImmediate(0xFFFF)); // mask out immediate value 3096 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3050 return instr == 0x30*B20; 3097 return instr == kMovwPattern;
3051 } 3098 }
3052 3099
3053 3100
3101 Instr Assembler::GetMovTPattern() { return kMovtPattern; }
3102
3103
3104 Instr Assembler::GetMovWPattern() { return kMovwPattern; }
3105
3106
3107 Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
3108 ASSERT(immediate < 0x10000);
3109 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
3110 }
3111
3112
3113 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
3114 instruction &= ~EncodeMovwImmediate(0xffff);
3115 return instruction | EncodeMovwImmediate(immediate);
3116 }
3117
3118
3054 bool Assembler::IsNop(Instr instr, int type) { 3119 bool Assembler::IsNop(Instr instr, int type) {
3055 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. 3120 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3056 // Check for mov rx, rx where x = type. 3121 // Check for mov rx, rx where x = type.
3057 return instr == (al | 13*B21 | type*B12 | type); 3122 return instr == (al | 13*B21 | type*B12 | type);
3058 } 3123 }
3059 3124
3060 3125
3061 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { 3126 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3062 uint32_t dummy1; 3127 uint32_t dummy1;
3063 uint32_t dummy2; 3128 uint32_t dummy2;
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
3210 NULL); 3275 NULL);
3211 ClearRecordedAstId(); 3276 ClearRecordedAstId();
3212 reloc_info_writer.Write(&reloc_info_with_ast_id); 3277 reloc_info_writer.Write(&reloc_info_with_ast_id);
3213 } else { 3278 } else {
3214 reloc_info_writer.Write(&rinfo); 3279 reloc_info_writer.Write(&rinfo);
3215 } 3280 }
3216 } 3281 }
3217 } 3282 }
3218 3283
3219 3284
3220 void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) { 3285 ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
3286 const RelocInfo& rinfo) {
3221 if (FLAG_enable_ool_constant_pool) { 3287 if (FLAG_enable_ool_constant_pool) {
3222 constant_pool_builder_.AddEntry(this, rinfo); 3288 return constant_pool_builder_.AddEntry(this, rinfo);
3223 } else { 3289 } else {
3224 if (rinfo.rmode() == RelocInfo::NONE64) { 3290 if (rinfo.rmode() == RelocInfo::NONE64) {
3225 ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); 3291 ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3226 if (num_pending_64_bit_reloc_info_ == 0) { 3292 if (num_pending_64_bit_reloc_info_ == 0) {
3227 first_const_pool_64_use_ = pc_offset(); 3293 first_const_pool_64_use_ = pc_offset();
3228 } 3294 }
3229 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo; 3295 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3230 } else { 3296 } else {
3231 ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo); 3297 ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3232 if (num_pending_32_bit_reloc_info_ == 0) { 3298 if (num_pending_32_bit_reloc_info_ == 0) {
3233 first_const_pool_32_use_ = pc_offset(); 3299 first_const_pool_32_use_ = pc_offset();
3234 } 3300 }
3235 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo; 3301 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3236 } 3302 }
3237 // Make sure the constant pool is not emitted in place of the next 3303 // Make sure the constant pool is not emitted in place of the next
3238 // instruction for which we just recorded relocation info. 3304 // instruction for which we just recorded relocation info.
3239 BlockConstPoolFor(1); 3305 BlockConstPoolFor(1);
3306 return ConstantPoolArray::SMALL_SECTION;
3240 } 3307 }
3241 } 3308 }
3242 3309
3243 3310
3244 void Assembler::BlockConstPoolFor(int instructions) { 3311 void Assembler::BlockConstPoolFor(int instructions) {
3245 if (FLAG_enable_ool_constant_pool) { 3312 if (FLAG_enable_ool_constant_pool) {
3246 // Should be a no-op if using an out-of-line constant pool. 3313 // Should be a no-op if using an out-of-line constant pool.
3247 ASSERT(num_pending_32_bit_reloc_info_ == 0); 3314 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3248 ASSERT(num_pending_64_bit_reloc_info_ == 0); 3315 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3249 return; 3316 return;
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
3481 return constant_pool_builder_.New(isolate); 3548 return constant_pool_builder_.New(isolate);
3482 } 3549 }
3483 3550
3484 3551
3485 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { 3552 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3486 constant_pool_builder_.Populate(this, constant_pool); 3553 constant_pool_builder_.Populate(this, constant_pool);
3487 } 3554 }
3488 3555
3489 3556
3490 ConstantPoolBuilder::ConstantPoolBuilder() 3557 ConstantPoolBuilder::ConstantPoolBuilder()
3491 : entries_(), 3558 : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
3492 merged_indexes_(),
3493 count_of_64bit_(0),
3494 count_of_code_ptr_(0),
3495 count_of_heap_ptr_(0),
3496 count_of_32bit_(0) { }
3497 3559
3498 3560
3499 bool ConstantPoolBuilder::IsEmpty() { 3561 bool ConstantPoolBuilder::IsEmpty() {
3500 return entries_.size() == 0; 3562 return entries_.size() == 0;
3501 } 3563 }
3502 3564
3503 3565
3504 bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) { 3566 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
3505 return rmode == RelocInfo::NONE64; 3567 RelocInfo::Mode rmode) {
3568 if (rmode == RelocInfo::NONE64) {
3569 return ConstantPoolArray::INT64;
3570 } else if (!RelocInfo::IsGCRelocMode(rmode)) {
3571 return ConstantPoolArray::INT32;
3572 } else if (RelocInfo::IsCodeTarget(rmode)) {
3573 return ConstantPoolArray::CODE_PTR;
3574 } else {
3575 ASSERT(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
3576 return ConstantPoolArray::HEAP_PTR;
3577 }
3506 } 3578 }
3507 3579
3508 3580
3509 bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) { 3581 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
3510 return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64; 3582 Assembler* assm, const RelocInfo& rinfo) {
3511 }
3512
3513
3514 bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
3515 return RelocInfo::IsCodeTarget(rmode);
3516 }
3517
3518
3519 bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
3520 return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
3521 }
3522
3523
3524 void ConstantPoolBuilder::AddEntry(Assembler* assm,
3525 const RelocInfo& rinfo) {
3526 RelocInfo::Mode rmode = rinfo.rmode(); 3583 RelocInfo::Mode rmode = rinfo.rmode();
3527 ASSERT(rmode != RelocInfo::COMMENT && 3584 ASSERT(rmode != RelocInfo::COMMENT &&
3528 rmode != RelocInfo::POSITION && 3585 rmode != RelocInfo::POSITION &&
3529 rmode != RelocInfo::STATEMENT_POSITION && 3586 rmode != RelocInfo::STATEMENT_POSITION &&
3530 rmode != RelocInfo::CONST_POOL); 3587 rmode != RelocInfo::CONST_POOL);
3531 3588
3532 3589
3533 // Try to merge entries which won't be patched. 3590 // Try to merge entries which won't be patched.
3534 int merged_index = -1; 3591 int merged_index = -1;
3592 ConstantPoolArray::LayoutSection entry_section = current_section_;
3535 if (RelocInfo::IsNone(rmode) || 3593 if (RelocInfo::IsNone(rmode) ||
3536 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { 3594 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
3537 size_t i; 3595 size_t i;
3538 std::vector<RelocInfo>::const_iterator it; 3596 std::vector<ConstantPoolEntry>::const_iterator it;
3539 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { 3597 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3540 if (RelocInfo::IsEqual(rinfo, *it)) { 3598 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
3599 // Merge with found entry.
3541 merged_index = i; 3600 merged_index = i;
3601 entry_section = entries_[i].section_;
3542 break; 3602 break;
3543 } 3603 }
3544 } 3604 }
3545 } 3605 }
3546 3606 ASSERT(entry_section <= current_section_);
3547 entries_.push_back(rinfo); 3607 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
3548 merged_indexes_.push_back(merged_index);
3549 3608
3550 if (merged_index == -1) { 3609 if (merged_index == -1) {
3551 // Not merged, so update the appropriate count. 3610 // Not merged, so update the appropriate count.
3552 if (Is64BitEntry(rmode)) { 3611 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
3553 count_of_64bit_++;
3554 } else if (Is32BitEntry(rmode)) {
3555 count_of_32bit_++;
3556 } else if (IsCodePtrEntry(rmode)) {
3557 count_of_code_ptr_++;
3558 } else {
3559 ASSERT(IsHeapPtrEntry(rmode));
3560 count_of_heap_ptr_++;
3561 }
3562 } 3612 }
3563 3613
3564 // Check if we still have room for another entry given Arm's ldr and vldr 3614 // Check if we still have room for another entry in the small section
3565 // immediate offset range. 3615 // given Arm's ldr and vldr immediate offset range.
3566 // TODO(rmcilroy): Avoid creating a new object here when we support 3616 if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
3567 // extended constant pools. 3617 !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
3568 ConstantPoolArray::NumberOfEntries total(count_of_64bit_, 3618 is_uint10(ConstantPoolArray::MaxInt64Offset(
3569 count_of_code_ptr_, 3619 small_entries()->count_of(ConstantPoolArray::INT64))))) {
3570 count_of_heap_ptr_, 3620 current_section_ = ConstantPoolArray::EXTENDED_SECTION;
3571 count_of_32bit_);
3572 ConstantPoolArray::NumberOfEntries int64_counts(count_of_64bit_, 0, 0, 0);
3573 if (!(is_uint12(ConstantPoolArray::SizeFor(total)) &&
3574 is_uint10(ConstantPoolArray::SizeFor(int64_counts)))) {
3575 assm->set_constant_pool_full();
3576 } 3621 }
3622 return entry_section;
3577 } 3623 }
3578 3624
3579 3625
3580 void ConstantPoolBuilder::Relocate(int pc_delta) { 3626 void ConstantPoolBuilder::Relocate(int pc_delta) {
3581 for (std::vector<RelocInfo>::iterator rinfo = entries_.begin(); 3627 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3582 rinfo != entries_.end(); rinfo++) { 3628 entry != entries_.end(); entry++) {
3583 ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN); 3629 ASSERT(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
3584 rinfo->set_pc(rinfo->pc() + pc_delta); 3630 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
3585 } 3631 }
3586 } 3632 }
3587 3633
3588 3634
3589 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { 3635 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
3590 if (IsEmpty()) { 3636 if (IsEmpty()) {
3591 return isolate->factory()->empty_constant_pool_array(); 3637 return isolate->factory()->empty_constant_pool_array();
3638 } else if (extended_entries()->is_empty()) {
3639 return isolate->factory()->NewConstantPoolArray(*small_entries());
3592 } else { 3640 } else {
3593 ConstantPoolArray::NumberOfEntries small(count_of_64bit_, 3641 ASSERT(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
3594 count_of_code_ptr_, 3642 return isolate->factory()->NewExtendedConstantPoolArray(
3595 count_of_heap_ptr_, 3643 *small_entries(), *extended_entries());
3596 count_of_32bit_);
3597 return isolate->factory()->NewConstantPoolArray(small);
3598 } 3644 }
3599 } 3645 }
3600 3646
3601 3647
3602 void ConstantPoolBuilder::Populate(Assembler* assm, 3648 void ConstantPoolBuilder::Populate(Assembler* assm,
3603 ConstantPoolArray* constant_pool) { 3649 ConstantPoolArray* constant_pool) {
3604 ASSERT(count_of_64bit_ == constant_pool->number_of_entries( 3650 ASSERT_EQ(extended_entries()->is_empty(),
3605 ConstantPoolArray::INT64, ConstantPoolArray::SMALL_SECTION)); 3651 !constant_pool->is_extended_layout());
3606 ASSERT(count_of_code_ptr_ == constant_pool->number_of_entries( 3652 ASSERT(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
3607 ConstantPoolArray::CODE_PTR, ConstantPoolArray::SMALL_SECTION)); 3653 constant_pool, ConstantPoolArray::SMALL_SECTION)));
3608 ASSERT(count_of_heap_ptr_ == constant_pool->number_of_entries( 3654 if (constant_pool->is_extended_layout()) {
3609 ConstantPoolArray::HEAP_PTR, ConstantPoolArray::SMALL_SECTION)); 3655 ASSERT(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
3610 ASSERT(count_of_32bit_ == constant_pool->number_of_entries( 3656 constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
3611 ConstantPoolArray::INT32, ConstantPoolArray::SMALL_SECTION)); 3657 }
3612 ASSERT(entries_.size() == merged_indexes_.size());
3613 3658
3614 int index_64bit = 0; 3659 ConstantPoolArray::NumberOfEntries small_idx;
3615 int index_code_ptr = count_of_64bit_; 3660 ConstantPoolArray::NumberOfEntries extended_idx;
3616 int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_; 3661 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3617 int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_; 3662 entry != entries_.end(); entry++) {
3618 3663 RelocInfo rinfo = entry->rinfo_;
3619 size_t i; 3664 RelocInfo::Mode rmode = entry->rinfo_.rmode();
3620 std::vector<RelocInfo>::const_iterator rinfo; 3665 ConstantPoolArray::Type type = GetConstantPoolType(rmode);
3621 for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
3622 RelocInfo::Mode rmode = rinfo->rmode();
3623 3666
3624 // Update constant pool if necessary and get the entry's offset. 3667 // Update constant pool if necessary and get the entry's offset.
3625 int offset; 3668 int offset;
3626 if (merged_indexes_[i] == -1) { 3669 if (entry->merged_index_ == -1) {
3627 if (Is64BitEntry(rmode)) { 3670 int index;
3628 offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag; 3671 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
3629 constant_pool->set(index_64bit++, rinfo->data64()); 3672 index = small_entries()->total_count() +
3630 } else if (Is32BitEntry(rmode)) { 3673 extended_entries()->base_of(type) + extended_idx.count_of(type);
3631 offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag; 3674 extended_idx.increment(type);
3632 constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
3633 } else if (IsCodePtrEntry(rmode)) {
3634 offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
3635 kHeapObjectTag;
3636 constant_pool->set(index_code_ptr++,
3637 reinterpret_cast<Address>(rinfo->data()));
3638 } else { 3675 } else {
3639 ASSERT(IsHeapPtrEntry(rmode)); 3676 ASSERT(entry->section_ == ConstantPoolArray::SMALL_SECTION);
3640 offset = constant_pool->OffsetOfElementAt(index_heap_ptr) - 3677 index = small_entries()->base_of(type) + small_idx.count_of(type);
3641 kHeapObjectTag; 3678 small_idx.increment(type);
3642 constant_pool->set(index_heap_ptr++,
3643 reinterpret_cast<Object *>(rinfo->data()));
3644 } 3679 }
3645 merged_indexes_[i] = offset; // Stash offset for merged entries. 3680 if (type == ConstantPoolArray::INT64) {
3681 constant_pool->set(index, rinfo.data64());
3682 } else if (type == ConstantPoolArray::INT32) {
3683 constant_pool->set(index, static_cast<int32_t>(rinfo.data()));
3684 } else if (type == ConstantPoolArray::CODE_PTR) {
3685 constant_pool->set(index, reinterpret_cast<Address>(rinfo.data()));
3686 } else {
3687 ASSERT(type == ConstantPoolArray::HEAP_PTR);
3688 constant_pool->set(index, reinterpret_cast<Object*>(rinfo.data()));
3689 }
3690 offset = constant_pool->OffsetOfElementAt(index) - kHeapObjectTag;
3691 entry->merged_index_ = offset; // Stash offset for merged entries.
3646 } else { 3692 } else {
3647 size_t merged_index = static_cast<size_t>(merged_indexes_[i]); 3693 ASSERT(entry->merged_index_ < (entry - entries_.begin()));
3648 ASSERT(merged_index < merged_indexes_.size() && merged_index < i); 3694 offset = entries_[entry->merged_index_].merged_index_;
3649 offset = merged_indexes_[merged_index];
3650 } 3695 }
3651 3696
3652 // Patch vldr/ldr instruction with correct offset. 3697 // Patch vldr/ldr instruction with correct offset.
3653 Instr instr = assm->instr_at(rinfo->pc()); 3698 Instr instr = assm->instr_at(rinfo.pc());
3654 if (Is64BitEntry(rmode)) { 3699 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
3700 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
3701 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3702 ASSERT((Assembler::IsMovW(instr) &&
3703 Instruction::ImmedMovwMovtValue(instr) == 0));
3704 ASSERT((Assembler::IsMovT(next_instr) &&
3705 Instruction::ImmedMovwMovtValue(next_instr) == 0));
3706 assm->instr_at_put(rinfo.pc(),
3707 Assembler::PatchMovwImmediate(instr, offset & 0xffff));
3708 assm->instr_at_put(
3709 rinfo.pc() + Assembler::kInstrSize,
3710 Assembler::PatchMovwImmediate(next_instr, offset >> 16));
3711 } else if (type == ConstantPoolArray::INT64) {
3655 // Instruction to patch must be 'vldr rd, [pp, #0]'. 3712 // Instruction to patch must be 'vldr rd, [pp, #0]'.
3656 ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) && 3713 ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
3657 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); 3714 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
3658 ASSERT(is_uint10(offset)); 3715 ASSERT(is_uint10(offset));
3659 assm->instr_at_put(rinfo->pc(), 3716 assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
3660 Assembler::SetVldrDRegisterImmediateOffset(instr, offset)); 3717 instr, offset));
3661 } else { 3718 } else {
3662 // Instruction to patch must be 'ldr rd, [pp, #0]'. 3719 // Instruction to patch must be 'ldr rd, [pp, #0]'.
3663 ASSERT((Assembler::IsLdrPpImmediateOffset(instr) && 3720 ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
3664 Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); 3721 Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
3665 ASSERT(is_uint12(offset)); 3722 ASSERT(is_uint12(offset));
3666 assm->instr_at_put(rinfo->pc(), 3723 assm->instr_at_put(
3667 Assembler::SetLdrRegisterImmediateOffset(instr, offset)); 3724 rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
3668 } 3725 }
3669 } 3726 }
3670 3727
3671 ASSERT((index_64bit == count_of_64bit_) && 3728 ASSERT(small_idx.equals(*small_entries()));
3672 (index_code_ptr == (index_64bit + count_of_code_ptr_)) && 3729 ASSERT(extended_idx.equals(*extended_entries()));
3673 (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
3674 (index_32bit == (index_heap_ptr + count_of_32bit_)));
3675 } 3730 }
3676 3731
3677 3732
3678 } } // namespace v8::internal 3733 } } // namespace v8::internal
3679 3734
3680 #endif // V8_TARGET_ARCH_ARM 3735 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698