Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(521)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 2084017: Version 2.2.11... (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 18 matching lines...) Expand all
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE. 31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 32
33 // The original source code covered by the above license above has been 33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc. 34 // modified significantly by Google Inc.
35 // Copyright 2010 the V8 project authors. All rights reserved. 35 // Copyright 2010 the V8 project authors. All rights reserved.
36 36
37 #include "v8.h" 37 #include "v8.h"
38 38
39 #if defined(V8_TARGET_ARCH_ARM)
40
39 #include "arm/assembler-arm-inl.h" 41 #include "arm/assembler-arm-inl.h"
40 #include "serialize.h" 42 #include "serialize.h"
41 43
42 namespace v8 { 44 namespace v8 {
43 namespace internal { 45 namespace internal {
44 46
45 // Safe default is no features. 47 // Safe default is no features.
46 unsigned CpuFeatures::supported_ = 0; 48 unsigned CpuFeatures::supported_ = 0;
47 unsigned CpuFeatures::enabled_ = 0; 49 unsigned CpuFeatures::enabled_ = 0;
48 unsigned CpuFeatures::found_by_runtime_probing_ = 0; 50 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
99 #endif 101 #endif
100 } 102 }
101 103
102 104
103 // ----------------------------------------------------------------------------- 105 // -----------------------------------------------------------------------------
104 // Implementation of RelocInfo 106 // Implementation of RelocInfo
105 107
106 const int RelocInfo::kApplyMask = 0; 108 const int RelocInfo::kApplyMask = 0;
107 109
108 110
111 bool RelocInfo::IsCodedSpecially() {
112 // The deserializer needs to know whether a pointer is specially coded. Being
113 // specially coded on ARM means that it is a movw/movt instruction. We don't
114 // generate those yet.
115 return false;
116 }
117
118
119
109 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { 120 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
110 // Patch the code at the current address with the supplied instructions. 121 // Patch the code at the current address with the supplied instructions.
111 Instr* pc = reinterpret_cast<Instr*>(pc_); 122 Instr* pc = reinterpret_cast<Instr*>(pc_);
112 Instr* instr = reinterpret_cast<Instr*>(instructions); 123 Instr* instr = reinterpret_cast<Instr*>(instructions);
113 for (int i = 0; i < instruction_count; i++) { 124 for (int i = 0; i < instruction_count; i++) {
114 *(pc + i) = *(instr + i); 125 *(pc + i) = *(instr + i);
115 } 126 }
116 127
117 // Indicate that code has changed. 128 // Indicate that code has changed.
118 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); 129 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 // mov lr, pc 272 // mov lr, pc
262 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12; 273 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
263 // ldr rd, [pc, #offset] 274 // ldr rd, [pc, #offset]
264 const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16; 275 const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
265 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16; 276 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
266 // blxcc rm 277 // blxcc rm
267 const Instr kBlxRegMask = 278 const Instr kBlxRegMask =
268 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; 279 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
269 const Instr kBlxRegPattern = 280 const Instr kBlxRegPattern =
270 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4; 281 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
282 // A mask for the Rd register for push, pop, ldr, str instructions.
283 const Instr kRdMask = 0x0000f000;
284 static const int kRdShift = 12;
285 static const Instr kLdrRegFpOffsetPattern =
286 al | B26 | L | Offset | fp.code() * B16;
287 static const Instr kStrRegFpOffsetPattern =
288 al | B26 | Offset | fp.code() * B16;
289 static const Instr kLdrRegFpNegOffsetPattern =
290 al | B26 | L | NegOffset | fp.code() * B16;
291 static const Instr kStrRegFpNegOffsetPattern =
292 al | B26 | NegOffset | fp.code() * B16;
293 static const Instr kLdrStrInstrTypeMask = 0xffff0000;
294 static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
295 static const Instr kLdrStrOffsetMask = 0x00000fff;
271 296
272 // Spare buffer. 297 // Spare buffer.
273 static const int kMinimalBufferSize = 4*KB; 298 static const int kMinimalBufferSize = 4*KB;
274 static byte* spare_buffer_ = NULL; 299 static byte* spare_buffer_ = NULL;
275 300
276 Assembler::Assembler(void* buffer, int buffer_size) { 301 Assembler::Assembler(void* buffer, int buffer_size) {
277 if (buffer == NULL) { 302 if (buffer == NULL) {
278 // Do our own buffer management. 303 // Do our own buffer management.
279 if (buffer_size <= kMinimalBufferSize) { 304 if (buffer_size <= kMinimalBufferSize) {
280 buffer_size = kMinimalBufferSize; 305 buffer_size = kMinimalBufferSize;
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
388 bool positive = offset >= 0; 413 bool positive = offset >= 0;
389 if (!positive) offset = -offset; 414 if (!positive) offset = -offset;
390 ASSERT(is_uint12(offset)); 415 ASSERT(is_uint12(offset));
391 // Set bit indicating whether the offset should be added. 416 // Set bit indicating whether the offset should be added.
392 instr = (instr & ~B23) | (positive ? B23 : 0); 417 instr = (instr & ~B23) | (positive ? B23 : 0);
393 // Set the actual offset. 418 // Set the actual offset.
394 return (instr & ~Off12Mask) | offset; 419 return (instr & ~Off12Mask) | offset;
395 } 420 }
396 421
397 422
423 Register Assembler::GetRd(Instr instr) {
424 Register reg;
425 reg.code_ = ((instr & kRdMask) >> kRdShift);
426 return reg;
427 }
428
429
430 bool Assembler::IsPush(Instr instr) {
431 return ((instr & ~kRdMask) == kPushRegPattern);
432 }
433
434
435 bool Assembler::IsPop(Instr instr) {
436 return ((instr & ~kRdMask) == kPopRegPattern);
437 }
438
439
440 bool Assembler::IsStrRegFpOffset(Instr instr) {
441 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
442 }
443
444
445 bool Assembler::IsLdrRegFpOffset(Instr instr) {
446 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
447 }
448
449
450 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
451 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
452 }
453
454
455 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
456 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
457 }
458
459
398 // Labels refer to positions in the (to be) generated code. 460 // Labels refer to positions in the (to be) generated code.
399 // There are bound, linked, and unused labels. 461 // There are bound, linked, and unused labels.
400 // 462 //
401 // Bound labels refer to known positions in the already 463 // Bound labels refer to known positions in the already
402 // generated code. pos() is the position the label refers to. 464 // generated code. pos() is the position the label refers to.
403 // 465 //
404 // Linked labels refer to unknown positions in the code 466 // Linked labels refer to unknown positions in the code
405 // to be generated; pos() is the position of the last 467 // to be generated; pos() is the position of the last
406 // instruction using the label. 468 // instruction using the label.
407 469
(...skipping 472 matching lines...) Expand 10 before | Expand all | Expand 10 after
880 942
881 943
882 void Assembler::add(Register dst, Register src1, const Operand& src2, 944 void Assembler::add(Register dst, Register src1, const Operand& src2,
883 SBit s, Condition cond) { 945 SBit s, Condition cond) {
884 addrmod1(cond | 4*B21 | s, src1, dst, src2); 946 addrmod1(cond | 4*B21 | s, src1, dst, src2);
885 947
886 // Eliminate pattern: push(r), pop() 948 // Eliminate pattern: push(r), pop()
887 // str(src, MemOperand(sp, 4, NegPreIndex), al); 949 // str(src, MemOperand(sp, 4, NegPreIndex), al);
888 // add(sp, sp, Operand(kPointerSize)); 950 // add(sp, sp, Operand(kPointerSize));
889 // Both instructions can be eliminated. 951 // Both instructions can be eliminated.
890 int pattern_size = 2 * kInstrSize; 952 if (can_peephole_optimize(2) &&
891 if (FLAG_push_pop_elimination &&
892 last_bound_pos_ <= (pc_offset() - pattern_size) &&
893 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
894 // Pattern. 953 // Pattern.
895 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && 954 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
896 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) { 955 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
897 pc_ -= 2 * kInstrSize; 956 pc_ -= 2 * kInstrSize;
898 if (FLAG_print_push_pop_elimination) { 957 if (FLAG_print_peephole_optimization) {
899 PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); 958 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
900 } 959 }
901 } 960 }
902 } 961 }
903 962
904 963
905 void Assembler::adc(Register dst, Register src1, const Operand& src2, 964 void Assembler::adc(Register dst, Register src1, const Operand& src2,
906 SBit s, Condition cond) { 965 SBit s, Condition cond) {
907 addrmod1(cond | 5*B21 | s, src1, dst, src2); 966 addrmod1(cond | 5*B21 | s, src1, dst, src2);
908 } 967 }
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
1079 } 1138 }
1080 1139
1081 1140
1082 // Load/Store instructions. 1141 // Load/Store instructions.
1083 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { 1142 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1084 if (dst.is(pc)) { 1143 if (dst.is(pc)) {
1085 WriteRecordedPositions(); 1144 WriteRecordedPositions();
1086 } 1145 }
1087 addrmod2(cond | B26 | L, dst, src); 1146 addrmod2(cond | B26 | L, dst, src);
1088 1147
1089 // Eliminate pattern: push(r), pop(r) 1148 // Eliminate pattern: push(ry), pop(rx)
1090 // str(r, MemOperand(sp, 4, NegPreIndex), al) 1149 // str(ry, MemOperand(sp, 4, NegPreIndex), al)
1091 // ldr(r, MemOperand(sp, 4, PostIndex), al) 1150 // ldr(rx, MemOperand(sp, 4, PostIndex), al)
1092 // Both instructions can be eliminated. 1151 // Both instructions can be eliminated if ry = rx.
1093 int pattern_size = 2 * kInstrSize; 1152 // If ry != rx, a register copy from ry to rx is inserted
1094 if (FLAG_push_pop_elimination && 1153 // after eliminating the push and the pop instructions.
1095 last_bound_pos_ <= (pc_offset() - pattern_size) && 1154 Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
1096 reloc_info_writer.last_pc() <= (pc_ - pattern_size) && 1155 Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
1097 // Pattern. 1156
1098 instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) && 1157 if (can_peephole_optimize(2) &&
1099 instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) { 1158 IsPush(push_instr) &&
1100 pc_ -= 2 * kInstrSize; 1159 IsPop(pop_instr)) {
1101 if (FLAG_print_push_pop_elimination) { 1160 if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
1102 PrintF("%x push/pop (same reg) eliminated\n", pc_offset()); 1161 // For consecutive push and pop on different registers,
1162 // we delete both the push & pop and insert a register move.
1163 // push ry, pop rx --> mov rx, ry
1164 Register reg_pushed, reg_popped;
1165 reg_pushed = GetRd(push_instr);
1166 reg_popped = GetRd(pop_instr);
1167 pc_ -= 2 * kInstrSize;
1168 // Insert a mov instruction, which is better than a pair of push & pop
1169 mov(reg_popped, reg_pushed);
1170 if (FLAG_print_peephole_optimization) {
1171 PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset());
1172 }
1173 } else {
1174 // For consecutive push and pop on the same register,
1175 // both the push and the pop can be deleted.
1176 pc_ -= 2 * kInstrSize;
1177 if (FLAG_print_peephole_optimization) {
1178 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1179 }
1180 }
1181 }
1182
1183 if (can_peephole_optimize(2)) {
1184 Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
1185 Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
1186
1187 if ((IsStrRegFpOffset(str_instr) &&
1188 IsLdrRegFpOffset(ldr_instr)) ||
1189 (IsStrRegFpNegOffset(str_instr) &&
1190 IsLdrRegFpNegOffset(ldr_instr))) {
1191 if ((ldr_instr & kLdrStrInstrArgumentMask) ==
1192 (str_instr & kLdrStrInstrArgumentMask)) {
1193 // Pattern: Ldr/str same fp+offset, same register.
1194 //
1195 // The following:
1196 // str rx, [fp, #-12]
1197 // ldr rx, [fp, #-12]
1198 //
1199 // Becomes:
1200 // str rx, [fp, #-12]
1201
1202 pc_ -= 1 * kInstrSize;
1203 if (FLAG_print_peephole_optimization) {
1204 PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
1205 }
1206 } else if ((ldr_instr & kLdrStrOffsetMask) ==
1207 (str_instr & kLdrStrOffsetMask)) {
1208 // Pattern: Ldr/str same fp+offset, different register.
1209 //
1210 // The following:
1211 // str rx, [fp, #-12]
1212 // ldr ry, [fp, #-12]
1213 //
1214 // Becomes:
1215 // str rx, [fp, #-12]
1216 // mov ry, rx
1217
1218 Register reg_stored, reg_loaded;
1219 reg_stored = GetRd(str_instr);
1220 reg_loaded = GetRd(ldr_instr);
1221 pc_ -= 1 * kInstrSize;
1222 // Insert a mov instruction, which is better than ldr.
1223 mov(reg_loaded, reg_stored);
1224 if (FLAG_print_peephole_optimization) {
1225 PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
1226 }
1227 }
1228 }
1229 }
1230
1231 if (can_peephole_optimize(3)) {
1232 Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
1233 Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
1234 Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
1235 if (IsPush(mem_write_instr) &&
1236 IsPop(mem_read_instr)) {
1237 if ((IsLdrRegFpOffset(ldr_instr) ||
1238 IsLdrRegFpNegOffset(ldr_instr))) {
1239 if ((mem_write_instr & kRdMask) ==
1240 (mem_read_instr & kRdMask)) {
1241 // Pattern: push & pop from/to same register,
1242 // with a fp+offset ldr in between
1243 //
1244 // The following:
1245 // str rx, [sp, #-4]!
1246 // ldr rz, [fp, #-24]
1247 // ldr rx, [sp], #+4
1248 //
1249 // Becomes:
1250 // if(rx == rz)
1251 // delete all
1252 // else
1253 // ldr rz, [fp, #-24]
1254
1255 if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
1256 pc_ -= 3 * kInstrSize;
1257 } else {
1258 pc_ -= 3 * kInstrSize;
1259 // Reinsert back the ldr rz.
1260 emit(ldr_instr);
1261 }
1262 if (FLAG_print_peephole_optimization) {
1263 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
1264 }
1265 } else {
1266 // Pattern: push & pop from/to different registers
1267 // with a fp+offset ldr in between
1268 //
1269 // The following:
1270 // str rx, [sp, #-4]!
1271 // ldr rz, [fp, #-24]
1272 // ldr ry, [sp], #+4
1273 //
1274 // Becomes:
1275 // if(ry == rz)
1276 // mov ry, rx;
1277 // else if(rx != rz)
1278 // ldr rz, [fp, #-24]
1279 // mov ry, rx
1280 // else if((ry != rz) || (rx == rz)) becomes:
1281 // mov ry, rx
1282 // ldr rz, [fp, #-24]
1283
1284 Register reg_pushed, reg_popped;
1285 if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
1286 reg_pushed = GetRd(mem_write_instr);
1287 reg_popped = GetRd(mem_read_instr);
1288 pc_ -= 3 * kInstrSize;
1289 mov(reg_popped, reg_pushed);
1290 } else if ((mem_write_instr & kRdMask)
1291 != (ldr_instr & kRdMask)) {
1292 reg_pushed = GetRd(mem_write_instr);
1293 reg_popped = GetRd(mem_read_instr);
1294 pc_ -= 3 * kInstrSize;
1295 emit(ldr_instr);
1296 mov(reg_popped, reg_pushed);
1297 } else if (((mem_read_instr & kRdMask)
1298 != (ldr_instr & kRdMask)) ||
1299 ((mem_write_instr & kRdMask)
1300 == (ldr_instr & kRdMask)) ) {
1301 reg_pushed = GetRd(mem_write_instr);
1302 reg_popped = GetRd(mem_read_instr);
1303 pc_ -= 3 * kInstrSize;
1304 mov(reg_popped, reg_pushed);
1305 emit(ldr_instr);
1306 }
1307 if (FLAG_print_peephole_optimization) {
1308 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1309 }
1310 }
1311 }
1103 } 1312 }
1104 } 1313 }
1105 } 1314 }
1106 1315
1107 1316
1108 void Assembler::str(Register src, const MemOperand& dst, Condition cond) { 1317 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1109 addrmod2(cond | B26, src, dst); 1318 addrmod2(cond | B26, src, dst);
1110 1319
1111 // Eliminate pattern: pop(), push(r) 1320 // Eliminate pattern: pop(), push(r)
1112 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al 1321 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1113 // -> str r, [sp, 0], al 1322 // -> str r, [sp, 0], al
1114 int pattern_size = 2 * kInstrSize; 1323 if (can_peephole_optimize(2) &&
1115 if (FLAG_push_pop_elimination &&
1116 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1117 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1118 // Pattern. 1324 // Pattern.
1119 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) && 1325 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1120 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) { 1326 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1121 pc_ -= 2 * kInstrSize; 1327 pc_ -= 2 * kInstrSize;
1122 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12); 1328 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1123 if (FLAG_print_push_pop_elimination) { 1329 if (FLAG_print_peephole_optimization) {
1124 PrintF("%x pop()/push(reg) eliminated\n", pc_offset()); 1330 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1125 } 1331 }
1126 } 1332 }
1127 } 1333 }
1128 1334
1129 1335
1130 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) { 1336 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1131 addrmod2(cond | B26 | B | L, dst, src); 1337 addrmod2(cond | B26 | B | L, dst, src);
1132 } 1338 }
1133 1339
(...skipping 21 matching lines...) Expand all
1155 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { 1361 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1156 addrmod3(cond | L | B7 | S6 | H | B4, dst, src); 1362 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1157 } 1363 }
1158 1364
1159 1365
1160 void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) { 1366 void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
1161 ASSERT(src.rm().is(no_reg)); 1367 ASSERT(src.rm().is(no_reg));
1162 #ifdef CAN_USE_ARMV7_INSTRUCTIONS 1368 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
1163 addrmod3(cond | B7 | B6 | B4, dst, src); 1369 addrmod3(cond | B7 | B6 | B4, dst, src);
1164 #else 1370 #else
1165 ldr(dst, src, cond); 1371 // Generate two ldr instructions if ldrd is not available.
1166 MemOperand src1(src); 1372 MemOperand src1(src);
1167 src1.set_offset(src1.offset() + 4); 1373 src1.set_offset(src1.offset() + 4);
1168 Register dst1(dst); 1374 Register dst1(dst);
1169 dst1.code_ = dst1.code_ + 1; 1375 dst1.set_code(dst1.code() + 1);
1170 ldr(dst1, src1, cond); 1376 if (dst.is(src.rn())) {
1377 ldr(dst1, src1, cond);
1378 ldr(dst, src, cond);
1379 } else {
1380 ldr(dst, src, cond);
1381 ldr(dst1, src1, cond);
1382 }
1171 #endif 1383 #endif
1172 } 1384 }
1173 1385
1174 1386
1175 void Assembler::strd(Register src, const MemOperand& dst, Condition cond) { 1387 void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
1176 ASSERT(dst.rm().is(no_reg)); 1388 ASSERT(dst.rm().is(no_reg));
1177 #ifdef CAN_USE_ARMV7_INSTRUCTIONS 1389 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
1178 addrmod3(cond | B7 | B6 | B5 | B4, src, dst); 1390 addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
1179 #else 1391 #else
1180 str(src, dst, cond); 1392 // Generate two str instructions if strd is not available.
1181 MemOperand dst1(dst); 1393 MemOperand dst1(dst);
1182 dst1.set_offset(dst1.offset() + 4); 1394 dst1.set_offset(dst1.offset() + 4);
1183 Register src1(src); 1395 Register src1(src);
1184 src1.code_ = src1.code_ + 1; 1396 src1.set_code(src1.code() + 1);
1397 str(src, dst, cond);
1185 str(src1, dst1, cond); 1398 str(src1, dst1, cond);
1186 #endif 1399 #endif
1187 } 1400 }
1188 1401
1189 // Load/Store multiple instructions. 1402 // Load/Store multiple instructions.
1190 void Assembler::ldm(BlockAddrMode am, 1403 void Assembler::ldm(BlockAddrMode am,
1191 Register base, 1404 Register base,
1192 RegList dst, 1405 RegList dst,
1193 Condition cond) { 1406 Condition cond) {
1194 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. 1407 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
(...skipping 14 matching lines...) Expand all
1209 1422
1210 1423
1211 void Assembler::stm(BlockAddrMode am, 1424 void Assembler::stm(BlockAddrMode am,
1212 Register base, 1425 Register base,
1213 RegList src, 1426 RegList src,
1214 Condition cond) { 1427 Condition cond) {
1215 addrmod4(cond | B27 | am, base, src); 1428 addrmod4(cond | B27 | am, base, src);
1216 } 1429 }
1217 1430
1218 1431
1219 // Semaphore instructions.
1220 void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
1221 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1222 ASSERT(!dst.is(base) && !src.is(base));
1223 emit(cond | P | base.code()*B16 | dst.code()*B12 |
1224 B7 | B4 | src.code());
1225 }
1226
1227
1228 void Assembler::swpb(Register dst,
1229 Register src,
1230 Register base,
1231 Condition cond) {
1232 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1233 ASSERT(!dst.is(base) && !src.is(base));
1234 emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
1235 B7 | B4 | src.code());
1236 }
1237
1238
1239 // Exception-generating instructions and debugging support. 1432 // Exception-generating instructions and debugging support.
1240 void Assembler::stop(const char* msg) { 1433 void Assembler::stop(const char* msg) {
1241 #ifndef __arm__ 1434 #ifndef __arm__
1242 // The simulator handles these special instructions and stops execution. 1435 // The simulator handles these special instructions and stops execution.
1243 emit(15 << 28 | ((intptr_t) msg)); 1436 emit(15 << 28 | ((intptr_t) msg));
1244 #else // def __arm__ 1437 #else // def __arm__
1245 #ifdef CAN_USE_ARMV5_INSTRUCTIONS 1438 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
1246 bkpt(0); 1439 bkpt(0);
1247 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS 1440 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS
1248 swi(0x9f0001); 1441 swi(0x9f0001);
(...skipping 523 matching lines...) Expand 10 before | Expand all | Expand 10 after
1772 1965
1773 1966
1774 // Pseudo instructions. 1967 // Pseudo instructions.
1775 void Assembler::nop(int type) { 1968 void Assembler::nop(int type) {
1776 // This is mov rx, rx. 1969 // This is mov rx, rx.
1777 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. 1970 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
1778 emit(al | 13*B21 | type*B12 | type); 1971 emit(al | 13*B21 | type*B12 | type);
1779 } 1972 }
1780 1973
1781 1974
1782 void Assembler::lea(Register dst,
1783 const MemOperand& x,
1784 SBit s,
1785 Condition cond) {
1786 int am = x.am_;
1787 if (!x.rm_.is_valid()) {
1788 // Immediate offset.
1789 if ((am & P) == 0) // post indexing
1790 mov(dst, Operand(x.rn_), s, cond);
1791 else if ((am & U) == 0) // negative indexing
1792 sub(dst, x.rn_, Operand(x.offset_), s, cond);
1793 else
1794 add(dst, x.rn_, Operand(x.offset_), s, cond);
1795 } else {
1796 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1797 // register offset the constructors make sure than both shift_imm_
1798 // and shift_op_ are initialized.
1799 ASSERT(!x.rm_.is(pc));
1800 if ((am & P) == 0) // post indexing
1801 mov(dst, Operand(x.rn_), s, cond);
1802 else if ((am & U) == 0) // negative indexing
1803 sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1804 else
1805 add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1806 }
1807 }
1808
1809
1810 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { 1975 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
1811 uint32_t dummy1; 1976 uint32_t dummy1;
1812 uint32_t dummy2; 1977 uint32_t dummy2;
1813 return fits_shifter(imm32, &dummy1, &dummy2, NULL); 1978 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
1814 } 1979 }
1815 1980
1816 1981
1817 void Assembler::BlockConstPoolFor(int instructions) { 1982 void Assembler::BlockConstPoolFor(int instructions) {
1818 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize); 1983 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
1819 } 1984 }
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after
2055 bind(&after_pool); 2220 bind(&after_pool);
2056 } 2221 }
2057 2222
2058 // Since a constant pool was just emitted, move the check offset forward by 2223 // Since a constant pool was just emitted, move the check offset forward by
2059 // the standard interval. 2224 // the standard interval.
2060 next_buffer_check_ = pc_offset() + kCheckConstInterval; 2225 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2061 } 2226 }
2062 2227
2063 2228
2064 } } // namespace v8::internal 2229 } } // namespace v8::internal
2230
2231 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698