OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/longjump.h" | 9 #include "vm/longjump.h" |
10 #include "vm/runtime_entry.h" | 10 #include "vm/runtime_entry.h" |
(...skipping 439 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
450 } else { | 450 } else { |
451 subu(rd, rs, rt); | 451 subu(rd, rs, rt); |
452 xor_(ro, rd, rs); | 452 xor_(ro, rd, rs); |
453 xor_(TMP, rs, rt); | 453 xor_(TMP, rs, rt); |
454 and_(ro, TMP, ro); | 454 and_(ro, TMP, ro); |
455 } | 455 } |
456 } | 456 } |
457 | 457 |
458 | 458 |
459 void Assembler::Branch(const StubEntry& stub_entry) { | 459 void Assembler::Branch(const StubEntry& stub_entry) { |
460 const ExternalLabel label(stub_entry.EntryPoint()); | 460 ASSERT(!in_delay_slot_); |
461 Branch(&label); | 461 LoadImmediate(TMP, stub_entry.label().address()); |
| 462 jr(TMP); |
462 } | 463 } |
463 | 464 |
464 | 465 |
465 void Assembler::BranchPatchable(const StubEntry& stub_entry) { | 466 void Assembler::BranchPatchable(const StubEntry& stub_entry) { |
466 const ExternalLabel label(stub_entry.EntryPoint()); | 467 ASSERT(!in_delay_slot_); |
467 BranchPatchable(&label); | 468 const ExternalLabel& label = stub_entry.label(); |
| 469 const uint16_t low = Utils::Low16Bits(label.address()); |
| 470 const uint16_t high = Utils::High16Bits(label.address()); |
| 471 lui(T9, Immediate(high)); |
| 472 ori(T9, T9, Immediate(low)); |
| 473 jr(T9); |
| 474 delay_slot_available_ = false; // CodePatcher expects a nop. |
468 } | 475 } |
469 | 476 |
470 | 477 |
471 void Assembler::BranchLink(const StubEntry& stub_entry) { | 478 void Assembler::BranchLink(const ExternalLabel* label) { |
472 const ExternalLabel label(stub_entry.EntryPoint()); | 479 ASSERT(!in_delay_slot_); |
473 BranchLink(&label); | 480 LoadImmediate(T9, label->address()); |
| 481 jalr(T9); |
| 482 } |
| 483 |
| 484 |
| 485 void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) { |
| 486 ASSERT(!in_delay_slot_); |
| 487 const int32_t offset = ObjectPool::element_offset( |
| 488 object_pool_wrapper_.FindExternalLabel(label, patchable)); |
| 489 LoadWordFromPoolOffset(T9, offset - kHeapObjectTag); |
| 490 jalr(T9); |
| 491 if (patchable == kPatchable) { |
| 492 delay_slot_available_ = false; // CodePatcher expects a nop. |
| 493 } |
474 } | 494 } |
475 | 495 |
476 | 496 |
477 void Assembler::BranchLink(const StubEntry& stub_entry, | 497 void Assembler::BranchLink(const StubEntry& stub_entry, |
478 Patchability patchable) { | 498 Patchability patchable) { |
479 const ExternalLabel label(stub_entry.EntryPoint()); | 499 BranchLink(&stub_entry.label(), patchable); |
480 BranchLink(&label, patchable); | |
481 } | 500 } |
482 | 501 |
483 | 502 |
484 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { | 503 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { |
485 const ExternalLabel label(stub_entry.EntryPoint()); | 504 BranchLink(&stub_entry.label(), kPatchable); |
486 BranchLink(&label, kPatchable); | |
487 } | 505 } |
488 | 506 |
489 | 507 |
490 void Assembler::LoadObjectHelper(Register rd, | 508 void Assembler::LoadObjectHelper(Register rd, |
491 const Object& object, | 509 const Object& object, |
492 bool is_unique) { | 510 bool is_unique) { |
493 // Load common VM constants from the thread. This works also in places where | 511 // Load common VM constants from the thread. This works also in places where |
494 // no constant pool is set up (e.g. intrinsic code). | 512 // no constant pool is set up (e.g. intrinsic code). |
495 if (Thread::CanLoadFromThread(object)) { | 513 if (Thread::CanLoadFromThread(object)) { |
496 lw(rd, Address(THR, Thread::OffsetFromThread(object))); | 514 lw(rd, Address(THR, Thread::OffsetFromThread(object))); |
(...skipping 649 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1146 LoadImmediate(TMP, ~(OS::ActivationFrameAlignment() - 1)); | 1164 LoadImmediate(TMP, ~(OS::ActivationFrameAlignment() - 1)); |
1147 and_(SP, SP, TMP); | 1165 and_(SP, SP, TMP); |
1148 } | 1166 } |
1149 } | 1167 } |
1150 | 1168 |
1151 | 1169 |
1152 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { | 1170 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { |
1153 ASSERT(!in_delay_slot_); | 1171 ASSERT(!in_delay_slot_); |
1154 const intptr_t kPushedRegistersSize = | 1172 const intptr_t kPushedRegistersSize = |
1155 kDartVolatileCpuRegCount * kWordSize + | 1173 kDartVolatileCpuRegCount * kWordSize + |
1156 2 * kWordSize + // FP and RA. | 1174 3 * kWordSize + // PP, FP and RA. |
1157 kDartVolatileFpuRegCount * kWordSize; | 1175 kDartVolatileFpuRegCount * kWordSize; |
1158 | 1176 |
1159 SetPrologueOffset(); | 1177 SetPrologueOffset(); |
1160 | 1178 |
1161 Comment("EnterCallRuntimeFrame"); | 1179 Comment("EnterCallRuntimeFrame"); |
1162 | 1180 |
1163 // Save volatile CPU and FPU registers on the stack: | 1181 // Save volatile CPU and FPU registers on the stack: |
1164 // ------------- | 1182 // ------------- |
1165 // FPU Registers | 1183 // FPU Registers |
1166 // CPU Registers | 1184 // CPU Registers |
1167 // RA | 1185 // RA |
1168 // FP | 1186 // FP |
1169 // ------------- | 1187 // ------------- |
1170 // TODO(zra): It may be a problem for walking the stack that FP is below | 1188 // TODO(zra): It may be a problem for walking the stack that FP is below |
1171 // the saved registers. If it turns out to be a problem in the | 1189 // the saved registers. If it turns out to be a problem in the |
1172 // future, try pushing RA and FP before the volatile registers. | 1190 // future, try pushing RA and FP before the volatile registers. |
1173 addiu(SP, SP, Immediate(-kPushedRegistersSize)); | 1191 addiu(SP, SP, Immediate(-kPushedRegistersSize)); |
1174 for (int i = kDartFirstVolatileFpuReg; i <= kDartLastVolatileFpuReg; i++) { | 1192 for (int i = kDartFirstVolatileFpuReg; i <= kDartLastVolatileFpuReg; i++) { |
1175 // These go above the volatile CPU registers. | 1193 // These go above the volatile CPU registers. |
1176 const int slot = | 1194 const int slot = |
1177 (i - kDartFirstVolatileFpuReg) + kDartVolatileCpuRegCount + 2; | 1195 (i - kDartFirstVolatileFpuReg) + kDartVolatileCpuRegCount + 3; |
1178 FRegister reg = static_cast<FRegister>(i); | 1196 FRegister reg = static_cast<FRegister>(i); |
1179 swc1(reg, Address(SP, slot * kWordSize)); | 1197 swc1(reg, Address(SP, slot * kWordSize)); |
1180 } | 1198 } |
1181 for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { | 1199 for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { |
1182 // + 2 because FP goes in slot 0. | 1200 // + 2 because FP goes in slot 0. |
1183 const int slot = (i - kDartFirstVolatileCpuReg) + 2; | 1201 const int slot = (i - kDartFirstVolatileCpuReg) + 3; |
1184 Register reg = static_cast<Register>(i); | 1202 Register reg = static_cast<Register>(i); |
1185 sw(reg, Address(SP, slot * kWordSize)); | 1203 sw(reg, Address(SP, slot * kWordSize)); |
1186 } | 1204 } |
1187 sw(RA, Address(SP, 1 * kWordSize)); | 1205 sw(RA, Address(SP, 2 * kWordSize)); |
1188 sw(FP, Address(SP, 0 * kWordSize)); | 1206 sw(FP, Address(SP, 1 * kWordSize)); |
| 1207 sw(PP, Address(SP, 0 * kWordSize)); |
| 1208 LoadPoolPointer(); |
| 1209 |
1189 mov(FP, SP); | 1210 mov(FP, SP); |
1190 | 1211 |
1191 ReserveAlignedFrameSpace(frame_space); | 1212 ReserveAlignedFrameSpace(frame_space); |
1192 } | 1213 } |
1193 | 1214 |
1194 | 1215 |
1195 void Assembler::LeaveCallRuntimeFrame() { | 1216 void Assembler::LeaveCallRuntimeFrame() { |
1196 ASSERT(!in_delay_slot_); | 1217 ASSERT(!in_delay_slot_); |
1197 const intptr_t kPushedRegistersSize = | 1218 const intptr_t kPushedRegistersSize = |
1198 kDartVolatileCpuRegCount * kWordSize + | 1219 kDartVolatileCpuRegCount * kWordSize + |
1199 2 * kWordSize + // FP and RA. | 1220 3 * kWordSize + // FP and RA. |
1200 kDartVolatileFpuRegCount * kWordSize; | 1221 kDartVolatileFpuRegCount * kWordSize; |
1201 | 1222 |
1202 Comment("LeaveCallRuntimeFrame"); | 1223 Comment("LeaveCallRuntimeFrame"); |
1203 | 1224 |
1204 // SP might have been modified to reserve space for arguments | 1225 // SP might have been modified to reserve space for arguments |
1205 // and ensure proper alignment of the stack frame. | 1226 // and ensure proper alignment of the stack frame. |
1206 // We need to restore it before restoring registers. | 1227 // We need to restore it before restoring registers. |
1207 mov(SP, FP); | 1228 mov(SP, FP); |
1208 | 1229 |
1209 // Restore volatile CPU and FPU registers from the stack. | 1230 // Restore volatile CPU and FPU registers from the stack. |
1210 lw(FP, Address(SP, 0 * kWordSize)); | 1231 lw(PP, Address(SP, 0 * kWordSize)); |
1211 lw(RA, Address(SP, 1 * kWordSize)); | 1232 lw(FP, Address(SP, 1 * kWordSize)); |
| 1233 lw(RA, Address(SP, 2 * kWordSize)); |
1212 for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { | 1234 for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) { |
1213 // + 2 because FP goes in slot 0. | 1235 // + 2 because FP goes in slot 0. |
1214 const int slot = (i - kDartFirstVolatileCpuReg) + 2; | 1236 const int slot = (i - kDartFirstVolatileCpuReg) + 3; |
1215 Register reg = static_cast<Register>(i); | 1237 Register reg = static_cast<Register>(i); |
1216 lw(reg, Address(SP, slot * kWordSize)); | 1238 lw(reg, Address(SP, slot * kWordSize)); |
1217 } | 1239 } |
1218 for (int i = kDartFirstVolatileFpuReg; i <= kDartLastVolatileFpuReg; i++) { | 1240 for (int i = kDartFirstVolatileFpuReg; i <= kDartLastVolatileFpuReg; i++) { |
1219 // These go above the volatile CPU registers. | 1241 // These go above the volatile CPU registers. |
1220 const int slot = | 1242 const int slot = |
1221 (i - kDartFirstVolatileFpuReg) + kDartVolatileCpuRegCount + 2; | 1243 (i - kDartFirstVolatileFpuReg) + kDartVolatileCpuRegCount + 3; |
1222 FRegister reg = static_cast<FRegister>(i); | 1244 FRegister reg = static_cast<FRegister>(i); |
1223 lwc1(reg, Address(SP, slot * kWordSize)); | 1245 lwc1(reg, Address(SP, slot * kWordSize)); |
1224 } | 1246 } |
1225 addiu(SP, SP, Immediate(kPushedRegistersSize)); | 1247 addiu(SP, SP, Immediate(kPushedRegistersSize)); |
1226 } | 1248 } |
1227 | 1249 |
1228 | 1250 |
1229 Address Assembler::ElementAddressForIntIndex(bool is_external, | 1251 Address Assembler::ElementAddressForIntIndex(bool is_external, |
1230 intptr_t cid, | 1252 intptr_t cid, |
1231 intptr_t index_scale, | 1253 intptr_t index_scale, |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1300 Label stop; | 1322 Label stop; |
1301 b(&stop); | 1323 b(&stop); |
1302 Emit(reinterpret_cast<int32_t>(message)); | 1324 Emit(reinterpret_cast<int32_t>(message)); |
1303 Bind(&stop); | 1325 Bind(&stop); |
1304 break_(Instr::kStopMessageCode); | 1326 break_(Instr::kStopMessageCode); |
1305 } | 1327 } |
1306 | 1328 |
1307 } // namespace dart | 1329 } // namespace dart |
1308 | 1330 |
1309 #endif // defined TARGET_ARCH_MIPS | 1331 #endif // defined TARGET_ARCH_MIPS |
OLD | NEW |