Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/mips/assembler-mips.cc

Issue 7043003: Version 3.3.8 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 9 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/full-codegen-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
233 const Instr kLwSwOffsetMask = kImm16Mask; 233 const Instr kLwSwOffsetMask = kImm16Mask;
234 234
235 235
236 // Spare buffer. 236 // Spare buffer.
237 static const int kMinimalBufferSize = 4 * KB; 237 static const int kMinimalBufferSize = 4 * KB;
238 238
239 239
240 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) 240 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
241 : AssemblerBase(arg_isolate), 241 : AssemblerBase(arg_isolate),
242 positions_recorder_(this), 242 positions_recorder_(this),
243 allow_peephole_optimization_(false),
244 emit_debug_code_(FLAG_debug_code) { 243 emit_debug_code_(FLAG_debug_code) {
245 allow_peephole_optimization_ = FLAG_peephole_optimization;
246 if (buffer == NULL) { 244 if (buffer == NULL) {
247 // Do our own buffer management. 245 // Do our own buffer management.
248 if (buffer_size <= kMinimalBufferSize) { 246 if (buffer_size <= kMinimalBufferSize) {
249 buffer_size = kMinimalBufferSize; 247 buffer_size = kMinimalBufferSize;
250 248
251 if (isolate()->assembler_spare_buffer() != NULL) { 249 if (isolate()->assembler_spare_buffer() != NULL) {
252 buffer = isolate()->assembler_spare_buffer(); 250 buffer = isolate()->assembler_spare_buffer();
253 isolate()->set_assembler_spare_buffer(NULL); 251 isolate()->set_assembler_spare_buffer(NULL);
254 } 252 }
255 } 253 }
(...skipping 16 matching lines...) Expand all
272 // Setup buffer pointers. 270 // Setup buffer pointers.
273 ASSERT(buffer_ != NULL); 271 ASSERT(buffer_ != NULL);
274 pc_ = buffer_; 272 pc_ = buffer_;
275 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); 273 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
276 274
277 last_trampoline_pool_end_ = 0; 275 last_trampoline_pool_end_ = 0;
278 no_trampoline_pool_before_ = 0; 276 no_trampoline_pool_before_ = 0;
279 trampoline_pool_blocked_nesting_ = 0; 277 trampoline_pool_blocked_nesting_ = 0;
280 next_buffer_check_ = kMaxBranchOffset - kTrampolineSize; 278 next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
281 internal_trampoline_exception_ = false; 279 internal_trampoline_exception_ = false;
280 last_bound_pos_ = 0;
282 281
283 ast_id_for_reloc_info_ = kNoASTId; 282 ast_id_for_reloc_info_ = kNoASTId;
284 } 283 }
285 284
286 285
287 Assembler::~Assembler() { 286 Assembler::~Assembler() {
288 if (own_buffer_) { 287 if (own_buffer_) {
289 if (isolate()->assembler_spare_buffer() == NULL && 288 if (isolate()->assembler_spare_buffer() == NULL &&
290 buffer_size_ == kMinimalBufferSize) { 289 buffer_size_ == kMinimalBufferSize) {
291 isolate()->set_assembler_spare_buffer(buffer_); 290 isolate()->set_assembler_spare_buffer(buffer_);
(...skipping 790 matching lines...) Expand 10 before | Expand all | Expand 10 after
1082 1081
1083 // Arithmetic. 1082 // Arithmetic.
1084 1083
1085 void Assembler::addu(Register rd, Register rs, Register rt) { 1084 void Assembler::addu(Register rd, Register rs, Register rt) {
1086 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); 1085 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1087 } 1086 }
1088 1087
1089 1088
1090 void Assembler::addiu(Register rd, Register rs, int32_t j) { 1089 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1091 GenInstrImmediate(ADDIU, rs, rd, j); 1090 GenInstrImmediate(ADDIU, rs, rd, j);
1092
1093 // Eliminate pattern: push(r), pop().
1094 // addiu(sp, sp, Operand(-kPointerSize));
1095 // sw(src, MemOperand(sp, 0);
1096 // addiu(sp, sp, Operand(kPointerSize));
1097 // Both instructions can be eliminated.
1098 if (can_peephole_optimize(3) &&
1099 // Pattern.
1100 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
1101 (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
1102 (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
1103 pc_ -= 3 * kInstrSize;
1104 if (FLAG_print_peephole_optimization) {
1105 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
1106 }
1107 }
1108
1109 // Eliminate pattern: push(ry), pop(rx).
1110 // addiu(sp, sp, -kPointerSize)
1111 // sw(ry, MemOperand(sp, 0)
1112 // lw(rx, MemOperand(sp, 0)
1113 // addiu(sp, sp, kPointerSize);
1114 // Both instructions can be eliminated if ry = rx.
1115 // If ry != rx, a register copy from ry to rx is inserted
1116 // after eliminating the push and the pop instructions.
1117 if (can_peephole_optimize(4)) {
1118 Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
1119 Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
1120 Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
1121 Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
1122
1123 if (IsPush(push_instr) &&
1124 IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
1125 post_pop_sp_set == kPopInstruction) {
1126 if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
1127 // For consecutive push and pop on different registers,
1128 // we delete both the push & pop and insert a register move.
1129 // push ry, pop rx --> mov rx, ry.
1130 Register reg_pushed, reg_popped;
1131 reg_pushed = GetRtReg(push_instr);
1132 reg_popped = GetRtReg(pop_instr);
1133 pc_ -= 4 * kInstrSize;
1134 // Insert a mov instruction, which is better than a pair of push & pop.
1135 or_(reg_popped, reg_pushed, zero_reg);
1136 if (FLAG_print_peephole_optimization) {
1137 PrintF("%x push/pop (diff reg) replaced by a reg move\n",
1138 pc_offset());
1139 }
1140 } else {
1141 // For consecutive push and pop on the same register,
1142 // both the push and the pop can be deleted.
1143 pc_ -= 4 * kInstrSize;
1144 if (FLAG_print_peephole_optimization) {
1145 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1146 }
1147 }
1148 }
1149 }
1150
1151 if (can_peephole_optimize(5)) {
1152 Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
1153 Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
1154 Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
1155 Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
1156 Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
1157
1158 if (IsPush(mem_write_instr) &&
1159 pre_push_sp_set == kPushInstruction &&
1160 IsPop(mem_read_instr) &&
1161 post_pop_sp_set == kPopInstruction) {
1162 if ((IsLwRegFpOffset(lw_instr) ||
1163 IsLwRegFpNegOffset(lw_instr))) {
1164 if ((mem_write_instr & kRtMask) ==
1165 (mem_read_instr & kRtMask)) {
1166 // Pattern: push & pop from/to same register,
1167 // with a fp + offset lw in between.
1168 //
1169 // The following:
1170 // addiu sp, sp, -4
1171 // sw rx, [sp, #0]!
1172 // lw rz, [fp, #-24]
1173 // lw rx, [sp, 0],
1174 // addiu sp, sp, 4
1175 //
1176 // Becomes:
1177 // if(rx == rz)
1178 // delete all
1179 // else
1180 // lw rz, [fp, #-24]
1181
1182 if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
1183 pc_ -= 5 * kInstrSize;
1184 } else {
1185 pc_ -= 5 * kInstrSize;
1186 // Reinsert back the lw rz.
1187 emit(lw_instr);
1188 }
1189 if (FLAG_print_peephole_optimization) {
1190 PrintF("%x push/pop -dead ldr fp + offset in middle\n",
1191 pc_offset());
1192 }
1193 } else {
1194 // Pattern: push & pop from/to different registers
1195 // with a fp + offset lw in between.
1196 //
1197 // The following:
1198 // addiu sp, sp ,-4
1199 // sw rx, [sp, 0]
1200 // lw rz, [fp, #-24]
1201 // lw ry, [sp, 0]
1202 // addiu sp, sp, 4
1203 //
1204 // Becomes:
1205 // if(ry == rz)
1206 // mov ry, rx;
1207 // else if(rx != rz)
1208 // lw rz, [fp, #-24]
1209 // mov ry, rx
1210 // else if((ry != rz) || (rx == rz)) becomes:
1211 // mov ry, rx
1212 // lw rz, [fp, #-24]
1213
1214 Register reg_pushed, reg_popped;
1215 if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
1216 reg_pushed = GetRtReg(mem_write_instr);
1217 reg_popped = GetRtReg(mem_read_instr);
1218 pc_ -= 5 * kInstrSize;
1219 or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
1220 } else if ((mem_write_instr & kRtMask)
1221 != (lw_instr & kRtMask)) {
1222 reg_pushed = GetRtReg(mem_write_instr);
1223 reg_popped = GetRtReg(mem_read_instr);
1224 pc_ -= 5 * kInstrSize;
1225 emit(lw_instr);
1226 or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
1227 } else if (((mem_read_instr & kRtMask)
1228 != (lw_instr & kRtMask)) ||
1229 ((mem_write_instr & kRtMask)
1230 == (lw_instr & kRtMask)) ) {
1231 reg_pushed = GetRtReg(mem_write_instr);
1232 reg_popped = GetRtReg(mem_read_instr);
1233 pc_ -= 5 * kInstrSize;
1234 or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
1235 emit(lw_instr);
1236 }
1237 if (FLAG_print_peephole_optimization) {
1238 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1239 }
1240 }
1241 }
1242 }
1243 }
1244 } 1091 }
1245 1092
1246 1093
1247 void Assembler::subu(Register rd, Register rs, Register rt) { 1094 void Assembler::subu(Register rd, Register rs, Register rt) {
1248 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); 1095 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1249 } 1096 }
1250 1097
1251 1098
1252 void Assembler::mul(Register rd, Register rs, Register rt) { 1099 void Assembler::mul(Register rd, Register rs, Register rt) {
1253 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); 1100 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1421 } 1268 }
1422 1269
1423 1270
1424 void Assembler::lw(Register rd, const MemOperand& rs) { 1271 void Assembler::lw(Register rd, const MemOperand& rs) {
1425 if (is_int16(rs.offset_)) { 1272 if (is_int16(rs.offset_)) {
1426 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); 1273 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1427 } else { // Offset > 16 bits, use multiple instructions to load. 1274 } else { // Offset > 16 bits, use multiple instructions to load.
1428 LoadRegPlusOffsetToAt(rs); 1275 LoadRegPlusOffsetToAt(rs);
1429 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0)); 1276 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1430 } 1277 }
1431
1432 if (can_peephole_optimize(2)) {
1433 Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
1434 Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
1435
1436 if ((IsSwRegFpOffset(sw_instr) &&
1437 IsLwRegFpOffset(lw_instr)) ||
1438 (IsSwRegFpNegOffset(sw_instr) &&
1439 IsLwRegFpNegOffset(lw_instr))) {
1440 if ((lw_instr & kLwSwInstrArgumentMask) ==
1441 (sw_instr & kLwSwInstrArgumentMask)) {
1442 // Pattern: Lw/sw same fp+offset, same register.
1443 //
1444 // The following:
1445 // sw rx, [fp, #-12]
1446 // lw rx, [fp, #-12]
1447 //
1448 // Becomes:
1449 // sw rx, [fp, #-12]
1450
1451 pc_ -= 1 * kInstrSize;
1452 if (FLAG_print_peephole_optimization) {
1453 PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
1454 }
1455 } else if ((lw_instr & kLwSwOffsetMask) ==
1456 (sw_instr & kLwSwOffsetMask)) {
1457 // Pattern: Lw/sw same fp+offset, different register.
1458 //
1459 // The following:
1460 // sw rx, [fp, #-12]
1461 // lw ry, [fp, #-12]
1462 //
1463 // Becomes:
1464 // sw rx, [fp, #-12]
1465 // mov ry, rx
1466
1467 Register reg_stored, reg_loaded;
1468 reg_stored = GetRtReg(sw_instr);
1469 reg_loaded = GetRtReg(lw_instr);
1470 pc_ -= 1 * kInstrSize;
1471 // Insert a mov instruction, which is better than lw.
1472 or_(reg_loaded, reg_stored, zero_reg); // Move instruction.
1473 if (FLAG_print_peephole_optimization) {
1474 PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
1475 }
1476 }
1477 }
1478 }
1479 } 1278 }
1480 1279
1481 1280
1482 void Assembler::lwl(Register rd, const MemOperand& rs) { 1281 void Assembler::lwl(Register rd, const MemOperand& rs) {
1483 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); 1282 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1484 } 1283 }
1485 1284
1486 1285
1487 void Assembler::lwr(Register rd, const MemOperand& rs) { 1286 void Assembler::lwr(Register rd, const MemOperand& rs) {
1488 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); 1287 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
(...skipping 20 matching lines...) Expand all
1509 } 1308 }
1510 1309
1511 1310
1512 void Assembler::sw(Register rd, const MemOperand& rs) { 1311 void Assembler::sw(Register rd, const MemOperand& rs) {
1513 if (is_int16(rs.offset_)) { 1312 if (is_int16(rs.offset_)) {
1514 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); 1313 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1515 } else { // Offset > 16 bits, use multiple instructions to store. 1314 } else { // Offset > 16 bits, use multiple instructions to store.
1516 LoadRegPlusOffsetToAt(rs); 1315 LoadRegPlusOffsetToAt(rs);
1517 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); 1316 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1518 } 1317 }
1519
1520 // Eliminate pattern: pop(), push(r).
1521 // addiu sp, sp, Operand(kPointerSize);
1522 // addiu sp, sp, Operand(-kPointerSize);
1523 // -> sw r, MemOpernad(sp, 0);
1524 if (can_peephole_optimize(3) &&
1525 // Pattern.
1526 instr_at(pc_ - 1 * kInstrSize) ==
1527 (kPushRegPattern | (rd.code() << kRtShift)) &&
1528 instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
1529 instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
1530 pc_ -= 3 * kInstrSize;
1531 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1532 if (FLAG_print_peephole_optimization) {
1533 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1534 }
1535 }
1536 } 1318 }
1537 1319
1538 1320
1539 void Assembler::swl(Register rd, const MemOperand& rs) { 1321 void Assembler::swl(Register rd, const MemOperand& rs) {
1540 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); 1322 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1541 } 1323 }
1542 1324
1543 1325
1544 void Assembler::swr(Register rd, const MemOperand& rs) { 1326 void Assembler::swr(Register rd, const MemOperand& rs) {
1545 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); 1327 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
(...skipping 623 matching lines...) Expand 10 before | Expand all | Expand 10 after
2169 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); 1951 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2170 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); 1952 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2171 1953
2172 CPU::FlushICache(pc, 2 * sizeof(int32_t)); 1954 CPU::FlushICache(pc, 2 * sizeof(int32_t));
2173 } 1955 }
2174 1956
2175 1957
2176 } } // namespace v8::internal 1958 } } // namespace v8::internal
2177 1959
2178 #endif // V8_TARGET_ARCH_MIPS 1960 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/full-codegen-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698