| OLD | NEW |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
| 6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
| 7 | 7 |
| 8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
| 9 #include "vm/longjump.h" | 9 #include "vm/longjump.h" |
| 10 #include "vm/runtime_entry.h" | 10 #include "vm/runtime_entry.h" |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 45 | 45 |
| 46 static bool CanEncodeBranchOffset(int32_t offset) { | 46 static bool CanEncodeBranchOffset(int32_t offset) { |
| 47 ASSERT(Utils::IsAligned(offset, 4)); | 47 ASSERT(Utils::IsAligned(offset, 4)); |
| 48 return Utils::IsInt(18, offset); | 48 return Utils::IsInt(18, offset); |
| 49 } | 49 } |
| 50 | 50 |
| 51 | 51 |
| 52 int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t instr) { | 52 int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t instr) { |
| 53 if (!CanEncodeBranchOffset(offset)) { | 53 if (!CanEncodeBranchOffset(offset)) { |
| 54 ASSERT(!use_far_branches()); | 54 ASSERT(!use_far_branches()); |
| 55 Thread::Current()->long_jump_base()->Jump( | 55 Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error()); |
| 56 1, Object::branch_offset_error()); | |
| 57 } | 56 } |
| 58 | 57 |
| 59 // Properly preserve only the bits supported in the instruction. | 58 // Properly preserve only the bits supported in the instruction. |
| 60 offset >>= 2; | 59 offset >>= 2; |
| 61 offset &= kBranchOffsetMask; | 60 offset &= kBranchOffsetMask; |
| 62 return (instr & ~kBranchOffsetMask) | offset; | 61 return (instr & ~kBranchOffsetMask) | offset; |
| 63 } | 62 } |
| 64 | 63 |
| 65 | 64 |
| 66 static intptr_t DecodeBranchOffset(int32_t instr) { | 65 static intptr_t DecodeBranchOffset(int32_t instr) { |
| 67 // Sign-extend, left-shift by 2. | 66 // Sign-extend, left-shift by 2. |
| 68 return (((instr & kBranchOffsetMask) << 16) >> 14); | 67 return (((instr & kBranchOffsetMask) << 16) >> 14); |
| 69 } | 68 } |
| 70 | 69 |
| 71 | 70 |
| 72 static int32_t DecodeLoadImmediate(int32_t ori_instr, int32_t lui_instr) { | 71 static int32_t DecodeLoadImmediate(int32_t ori_instr, int32_t lui_instr) { |
| 73 return (((lui_instr & kBranchOffsetMask) << 16) | | 72 return (((lui_instr & kBranchOffsetMask) << 16) | |
| 74 (ori_instr & kBranchOffsetMask)); | 73 (ori_instr & kBranchOffsetMask)); |
| 75 } | 74 } |
| 76 | 75 |
| 77 | 76 |
| 78 static int32_t EncodeLoadImmediate(int32_t dest, int32_t instr) { | 77 static int32_t EncodeLoadImmediate(int32_t dest, int32_t instr) { |
| 79 return ((instr & ~kBranchOffsetMask) | (dest & kBranchOffsetMask)); | 78 return ((instr & ~kBranchOffsetMask) | (dest & kBranchOffsetMask)); |
| 80 } | 79 } |
| 81 | 80 |
| 82 | 81 |
| 83 class PatchFarJump : public AssemblerFixup { | 82 class PatchFarJump : public AssemblerFixup { |
| 84 public: | 83 public: |
| 85 PatchFarJump() {} | 84 PatchFarJump() {} |
| 86 | 85 |
| 87 void Process(const MemoryRegion& region, intptr_t position) { | 86 void Process(const MemoryRegion& region, intptr_t position) { |
| 88 const int32_t high = region.Load<int32_t>(position); | 87 const int32_t high = region.Load<int32_t>(position); |
| 89 const int32_t low = region.Load<int32_t>(position + Instr::kInstrSize); | 88 const int32_t low = region.Load<int32_t>(position + Instr::kInstrSize); |
| 90 const int32_t offset = DecodeLoadImmediate(low, high); | 89 const int32_t offset = DecodeLoadImmediate(low, high); |
| 91 const int32_t dest = region.start() + offset; | 90 const int32_t dest = region.start() + offset; |
| 92 | 91 |
| 93 if ((Instr::At(reinterpret_cast<uword>(&high))->OpcodeField() == LUI) && | 92 if ((Instr::At(reinterpret_cast<uword>(&high))->OpcodeField() == LUI) && |
| 94 (Instr::At(reinterpret_cast<uword>(&low))->OpcodeField() == ORI)) { | 93 (Instr::At(reinterpret_cast<uword>(&low))->OpcodeField() == ORI)) { |
| 95 // Change the offset to the absolute value. | 94 // Change the offset to the absolute value. |
| 96 const int32_t encoded_low = | 95 const int32_t encoded_low = |
| 97 EncodeLoadImmediate(dest & kBranchOffsetMask, low); | 96 EncodeLoadImmediate(dest & kBranchOffsetMask, low); |
| 98 const int32_t encoded_high = | 97 const int32_t encoded_high = EncodeLoadImmediate(dest >> 16, high); |
| 99 EncodeLoadImmediate(dest >> 16, high); | |
| 100 | 98 |
| 101 region.Store<int32_t>(position, encoded_high); | 99 region.Store<int32_t>(position, encoded_high); |
| 102 region.Store<int32_t>(position + Instr::kInstrSize, encoded_low); | 100 region.Store<int32_t>(position + Instr::kInstrSize, encoded_low); |
| 103 return; | 101 return; |
| 104 } | 102 } |
| 105 // If the offset loading instructions aren't there, we must have replaced | 103 // If the offset loading instructions aren't there, we must have replaced |
| 106 // the far branch with a near one, and so these instructions should be NOPs. | 104 // the far branch with a near one, and so these instructions should be NOPs. |
| 107 ASSERT((high == Instr::kNopInstruction) && (low == Instr::kNopInstruction)); | 105 ASSERT((high == Instr::kNopInstruction) && (low == Instr::kNopInstruction)); |
| 108 } | 106 } |
| 109 | 107 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 122 if (link) { | 120 if (link) { |
| 123 EmitRType(SPECIAL, T9, R0, RA, 0, JALR); | 121 EmitRType(SPECIAL, T9, R0, RA, 0, JALR); |
| 124 } else { | 122 } else { |
| 125 EmitRType(SPECIAL, T9, R0, R0, 0, JR); | 123 EmitRType(SPECIAL, T9, R0, R0, 0, JR); |
| 126 } | 124 } |
| 127 } | 125 } |
| 128 | 126 |
| 129 | 127 |
| 130 static Opcode OppositeBranchOpcode(Opcode b) { | 128 static Opcode OppositeBranchOpcode(Opcode b) { |
| 131 switch (b) { | 129 switch (b) { |
| 132 case BEQ: return BNE; | 130 case BEQ: |
| 133 case BNE: return BEQ; | 131 return BNE; |
| 134 case BGTZ: return BLEZ; | 132 case BNE: |
| 135 case BLEZ: return BGTZ; | 133 return BEQ; |
| 136 case BEQL: return BNEL; | 134 case BGTZ: |
| 137 case BNEL: return BEQL; | 135 return BLEZ; |
| 138 case BGTZL: return BLEZL; | 136 case BLEZ: |
| 139 case BLEZL: return BGTZL; | 137 return BGTZ; |
| 138 case BEQL: |
| 139 return BNEL; |
| 140 case BNEL: |
| 141 return BEQL; |
| 142 case BGTZL: |
| 143 return BLEZL; |
| 144 case BLEZL: |
| 145 return BGTZL; |
| 140 default: | 146 default: |
| 141 UNREACHABLE(); | 147 UNREACHABLE(); |
| 142 break; | 148 break; |
| 143 } | 149 } |
| 144 return BNE; | 150 return BNE; |
| 145 } | 151 } |
| 146 | 152 |
| 147 | 153 |
| 148 void Assembler::EmitFarBranch(Opcode b, Register rs, Register rt, | 154 void Assembler::EmitFarBranch(Opcode b, |
| 155 Register rs, |
| 156 Register rt, |
| 149 int32_t offset) { | 157 int32_t offset) { |
| 150 ASSERT(!in_delay_slot_); | 158 ASSERT(!in_delay_slot_); |
| 151 EmitIType(b, rs, rt, 4); | 159 EmitIType(b, rs, rt, 4); |
| 152 nop(); | 160 nop(); |
| 153 EmitFarJump(offset, false); | 161 EmitFarJump(offset, false); |
| 154 } | 162 } |
| 155 | 163 |
| 156 | 164 |
| 157 static RtRegImm OppositeBranchNoLink(RtRegImm b) { | 165 static RtRegImm OppositeBranchNoLink(RtRegImm b) { |
| 158 switch (b) { | 166 switch (b) { |
| 159 case BLTZ: return BGEZ; | 167 case BLTZ: |
| 160 case BGEZ: return BLTZ; | 168 return BGEZ; |
| 161 case BLTZAL: return BGEZ; | 169 case BGEZ: |
| 162 case BGEZAL: return BLTZ; | 170 return BLTZ; |
| 171 case BLTZAL: |
| 172 return BGEZ; |
| 173 case BGEZAL: |
| 174 return BLTZ; |
| 163 default: | 175 default: |
| 164 UNREACHABLE(); | 176 UNREACHABLE(); |
| 165 break; | 177 break; |
| 166 } | 178 } |
| 167 return BLTZ; | 179 return BLTZ; |
| 168 } | 180 } |
| 169 | 181 |
| 170 | 182 |
| 171 void Assembler::EmitFarRegImmBranch(RtRegImm b, Register rs, int32_t offset) { | 183 void Assembler::EmitFarRegImmBranch(RtRegImm b, Register rs, int32_t offset) { |
| 172 ASSERT(!in_delay_slot_); | 184 ASSERT(!in_delay_slot_); |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 230 EmitFarRegImmBranch(b, rs, dest_off); | 242 EmitFarRegImmBranch(b, rs, dest_off); |
| 231 } else { | 243 } else { |
| 232 const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); | 244 const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); |
| 233 EmitRegImmType(REGIMM, rs, b, dest_off); | 245 EmitRegImmType(REGIMM, rs, b, dest_off); |
| 234 } | 246 } |
| 235 label->LinkTo(position); | 247 label->LinkTo(position); |
| 236 } | 248 } |
| 237 } | 249 } |
| 238 | 250 |
| 239 | 251 |
| 240 void Assembler::EmitFpuBranch(bool kind, Label *label) { | 252 void Assembler::EmitFpuBranch(bool kind, Label* label) { |
| 241 ASSERT(!in_delay_slot_); | 253 ASSERT(!in_delay_slot_); |
| 242 const int32_t b16 = kind ? (1 << 16) : 0; // Bit 16 set for branch on true. | 254 const int32_t b16 = kind ? (1 << 16) : 0; // Bit 16 set for branch on true. |
| 243 if (label->IsBound()) { | 255 if (label->IsBound()) { |
| 244 // Relative destination from an instruction after the branch. | 256 // Relative destination from an instruction after the branch. |
| 245 const int32_t dest = | 257 const int32_t dest = |
| 246 label->Position() - (buffer_.Size() + Instr::kInstrSize); | 258 label->Position() - (buffer_.Size() + Instr::kInstrSize); |
| 247 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { | 259 if (use_far_branches() && !CanEncodeBranchOffset(dest)) { |
| 248 EmitFarFpuBranch(kind, label->Position()); | 260 EmitFarFpuBranch(kind, label->Position()); |
| 249 } else { | 261 } else { |
| 250 const uint16_t dest_off = EncodeBranchOffset(dest, 0); | 262 const uint16_t dest_off = EncodeBranchOffset(dest, 0); |
| 251 Emit(COP1 << kOpcodeShift | | 263 Emit(COP1 << kOpcodeShift | COP1_BC << kCop1SubShift | b16 | dest_off); |
| 252 COP1_BC << kCop1SubShift | | |
| 253 b16 | | |
| 254 dest_off); | |
| 255 } | 264 } |
| 256 } else { | 265 } else { |
| 257 const intptr_t position = buffer_.Size(); | 266 const intptr_t position = buffer_.Size(); |
| 258 if (use_far_branches()) { | 267 if (use_far_branches()) { |
| 259 const uint32_t dest_off = label->position_; | 268 const uint32_t dest_off = label->position_; |
| 260 EmitFarFpuBranch(kind, dest_off); | 269 EmitFarFpuBranch(kind, dest_off); |
| 261 } else { | 270 } else { |
| 262 const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); | 271 const uint16_t dest_off = EncodeBranchOffset(label->position_, 0); |
| 263 Emit(COP1 << kOpcodeShift | | 272 Emit(COP1 << kOpcodeShift | COP1_BC << kCop1SubShift | b16 | dest_off); |
| 264 COP1_BC << kCop1SubShift | | |
| 265 b16 | | |
| 266 dest_off); | |
| 267 } | 273 } |
| 268 label->LinkTo(position); | 274 label->LinkTo(position); |
| 269 } | 275 } |
| 270 } | 276 } |
| 271 | 277 |
| 272 | 278 |
| 273 static int32_t FlipBranchInstruction(int32_t instr) { | 279 static int32_t FlipBranchInstruction(int32_t instr) { |
| 274 Instr* i = Instr::At(reinterpret_cast<uword>(&instr)); | 280 Instr* i = Instr::At(reinterpret_cast<uword>(&instr)); |
| 275 if (i->OpcodeField() == REGIMM) { | 281 if (i->OpcodeField() == REGIMM) { |
| 276 RtRegImm b = OppositeBranchNoLink(i->RegImmFnField()); | 282 RtRegImm b = OppositeBranchNoLink(i->RegImmFnField()); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 302 // Grab instructions that load the offset. | 308 // Grab instructions that load the offset. |
| 303 const int32_t high = | 309 const int32_t high = |
| 304 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); | 310 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); |
| 305 const int32_t low = | 311 const int32_t low = |
| 306 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); | 312 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); |
| 307 | 313 |
| 308 // Change from relative to the branch to relative to the assembler buffer. | 314 // Change from relative to the branch to relative to the assembler buffer. |
| 309 dest = buffer_.Size(); | 315 dest = buffer_.Size(); |
| 310 const int32_t encoded_low = | 316 const int32_t encoded_low = |
| 311 EncodeLoadImmediate(dest & kBranchOffsetMask, low); | 317 EncodeLoadImmediate(dest & kBranchOffsetMask, low); |
| 312 const int32_t encoded_high = | 318 const int32_t encoded_high = EncodeLoadImmediate(dest >> 16, high); |
| 313 EncodeLoadImmediate(dest >> 16, high); | |
| 314 | 319 |
| 315 // Skip the unconditional far jump if the test fails by flipping the | 320 // Skip the unconditional far jump if the test fails by flipping the |
| 316 // sense of the branch instruction. | 321 // sense of the branch instruction. |
| 317 buffer_.Store<int32_t>(position, FlipBranchInstruction(branch)); | 322 buffer_.Store<int32_t>(position, FlipBranchInstruction(branch)); |
| 318 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, encoded_high); | 323 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, encoded_high); |
| 319 buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, encoded_low); | 324 buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, encoded_low); |
| 320 label->position_ = DecodeLoadImmediate(low, high); | 325 label->position_ = DecodeLoadImmediate(low, high); |
| 321 } else if (use_far_branches() && CanEncodeBranchOffset(dest)) { | 326 } else if (use_far_branches() && CanEncodeBranchOffset(dest)) { |
| 322 // We assembled a far branch, but we don't need it. Replace with a near | 327 // We assembled a far branch, but we don't need it. Replace with a near |
| 323 // branch. | 328 // branch. |
| 324 | 329 |
| 325 // Grab the link to the next branch. | 330 // Grab the link to the next branch. |
| 326 const int32_t high = | 331 const int32_t high = |
| 327 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); | 332 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize); |
| 328 const int32_t low = | 333 const int32_t low = |
| 329 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); | 334 buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize); |
| 330 | 335 |
| 331 // Grab the original branch instruction. | 336 // Grab the original branch instruction. |
| 332 int32_t branch = buffer_.Load<int32_t>(position); | 337 int32_t branch = buffer_.Load<int32_t>(position); |
| 333 | 338 |
| 334 // Clear out the old (far) branch. | 339 // Clear out the old (far) branch. |
| 335 for (int i = 0; i < 5; i++) { | 340 for (int i = 0; i < 5; i++) { |
| 336 buffer_.Store<int32_t>(position + i * Instr::kInstrSize, | 341 buffer_.Store<int32_t>(position + i * Instr::kInstrSize, |
| 337 Instr::kNopInstruction); | 342 Instr::kNopInstruction); |
| 338 } | 343 } |
| 339 | 344 |
| 340 // Calculate the new offset. | 345 // Calculate the new offset. |
| 341 dest = dest - 4 * Instr::kInstrSize; | 346 dest = dest - 4 * Instr::kInstrSize; |
| 342 const int32_t encoded = EncodeBranchOffset(dest, branch); | 347 const int32_t encoded = EncodeBranchOffset(dest, branch); |
| 343 buffer_.Store<int32_t>(position + 4 * Instr::kInstrSize, encoded); | 348 buffer_.Store<int32_t>(position + 4 * Instr::kInstrSize, encoded); |
| 344 label->position_ = DecodeLoadImmediate(low, high); | 349 label->position_ = DecodeLoadImmediate(low, high); |
| 345 } else { | 350 } else { |
| 346 const int32_t next = buffer_.Load<int32_t>(position); | 351 const int32_t next = buffer_.Load<int32_t>(position); |
| 347 const int32_t encoded = EncodeBranchOffset(dest, next); | 352 const int32_t encoded = EncodeBranchOffset(dest, next); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 370 lui(rd, Immediate(offset_high)); | 375 lui(rd, Immediate(offset_high)); |
| 371 addu(rd, rd, pp); | 376 addu(rd, rd, pp); |
| 372 lw(rd, Address(rd, offset_low)); | 377 lw(rd, Address(rd, offset_low)); |
| 373 } else { | 378 } else { |
| 374 lw(rd, Address(pp, offset_low)); | 379 lw(rd, Address(pp, offset_low)); |
| 375 } | 380 } |
| 376 } | 381 } |
| 377 } | 382 } |
| 378 | 383 |
| 379 | 384 |
| 380 void Assembler::AdduDetectOverflow(Register rd, Register rs, Register rt, | 385 void Assembler::AdduDetectOverflow(Register rd, |
| 381 Register ro, Register scratch) { | 386 Register rs, |
| 387 Register rt, |
| 388 Register ro, |
| 389 Register scratch) { |
| 382 ASSERT(!in_delay_slot_); | 390 ASSERT(!in_delay_slot_); |
| 383 ASSERT(rd != ro); | 391 ASSERT(rd != ro); |
| 384 ASSERT(rd != TMP); | 392 ASSERT(rd != TMP); |
| 385 ASSERT(ro != TMP); | 393 ASSERT(ro != TMP); |
| 386 ASSERT(ro != rs); | 394 ASSERT(ro != rs); |
| 387 ASSERT(ro != rt); | 395 ASSERT(ro != rt); |
| 388 | 396 |
| 389 if ((rs == rt) && (rd == rs)) { | 397 if ((rs == rt) && (rd == rs)) { |
| 390 ASSERT(scratch != kNoRegister); | 398 ASSERT(scratch != kNoRegister); |
| 391 ASSERT(scratch != TMP); | 399 ASSERT(scratch != TMP); |
| 392 ASSERT(rd != scratch); | 400 ASSERT(rd != scratch); |
| 393 ASSERT(ro != scratch); | 401 ASSERT(ro != scratch); |
| 394 ASSERT(rs != scratch); | 402 ASSERT(rs != scratch); |
| 395 ASSERT(rt != scratch); | 403 ASSERT(rt != scratch); |
| 396 mov(scratch, rt); | 404 mov(scratch, rt); |
| 397 rt = scratch; | 405 rt = scratch; |
| 398 } | 406 } |
| 399 | 407 |
| 400 if (rd == rs) { | 408 if (rd == rs) { |
| 401 mov(TMP, rs); // Preserve rs. | 409 mov(TMP, rs); // Preserve rs. |
| 402 addu(rd, rs, rt); // rs is overwritten. | 410 addu(rd, rs, rt); // rs is overwritten. |
| 403 xor_(TMP, rd, TMP); // Original rs. | 411 xor_(TMP, rd, TMP); // Original rs. |
| 404 xor_(ro, rd, rt); | 412 xor_(ro, rd, rt); |
| 405 and_(ro, ro, TMP); | 413 and_(ro, ro, TMP); |
| 406 } else if (rd == rt) { | 414 } else if (rd == rt) { |
| 407 mov(TMP, rt); // Preserve rt. | 415 mov(TMP, rt); // Preserve rt. |
| 408 addu(rd, rs, rt); // rt is overwritten. | 416 addu(rd, rs, rt); // rt is overwritten. |
| 409 xor_(TMP, rd, TMP); // Original rt. | 417 xor_(TMP, rd, TMP); // Original rt. |
| 410 xor_(ro, rd, rs); | 418 xor_(ro, rd, rs); |
| 411 and_(ro, ro, TMP); | 419 and_(ro, ro, TMP); |
| 412 } else { | 420 } else { |
| 413 addu(rd, rs, rt); | 421 addu(rd, rs, rt); |
| 414 xor_(ro, rd, rs); | 422 xor_(ro, rd, rs); |
| 415 xor_(TMP, rd, rt); | 423 xor_(TMP, rd, rt); |
| 416 and_(ro, TMP, ro); | 424 and_(ro, TMP, ro); |
| 417 } | 425 } |
| 418 } | 426 } |
| 419 | 427 |
| 420 | 428 |
| 421 void Assembler::SubuDetectOverflow(Register rd, Register rs, Register rt, | 429 void Assembler::SubuDetectOverflow(Register rd, |
| 430 Register rs, |
| 431 Register rt, |
| 422 Register ro) { | 432 Register ro) { |
| 423 ASSERT(!in_delay_slot_); | 433 ASSERT(!in_delay_slot_); |
| 424 ASSERT(rd != ro); | 434 ASSERT(rd != ro); |
| 425 ASSERT(rd != TMP); | 435 ASSERT(rd != TMP); |
| 426 ASSERT(ro != TMP); | 436 ASSERT(ro != TMP); |
| 427 ASSERT(ro != rs); | 437 ASSERT(ro != rs); |
| 428 ASSERT(ro != rt); | 438 ASSERT(ro != rt); |
| 429 ASSERT(rs != TMP); | 439 ASSERT(rs != TMP); |
| 430 ASSERT(rt != TMP); | 440 ASSERT(rt != TMP); |
| 431 | 441 |
| 432 // This happens with some crankshaft code. Since Subu works fine if | 442 // This happens with some crankshaft code. Since Subu works fine if |
| 433 // left == right, let's not make that restriction here. | 443 // left == right, let's not make that restriction here. |
| 434 if (rs == rt) { | 444 if (rs == rt) { |
| 435 mov(rd, ZR); | 445 mov(rd, ZR); |
| 436 mov(ro, ZR); | 446 mov(ro, ZR); |
| 437 return; | 447 return; |
| 438 } | 448 } |
| 439 | 449 |
| 440 if (rd == rs) { | 450 if (rd == rs) { |
| 441 mov(TMP, rs); // Preserve left. | 451 mov(TMP, rs); // Preserve left. |
| 442 subu(rd, rs, rt); // Left is overwritten. | 452 subu(rd, rs, rt); // Left is overwritten. |
| 443 xor_(ro, rd, TMP); // scratch is original left. | 453 xor_(ro, rd, TMP); // scratch is original left. |
| 444 xor_(TMP, TMP, rs); // scratch is original left. | 454 xor_(TMP, TMP, rs); // scratch is original left. |
| 445 and_(ro, TMP, ro); | 455 and_(ro, TMP, ro); |
| 446 } else if (rd == rt) { | 456 } else if (rd == rt) { |
| 447 mov(TMP, rt); // Preserve right. | 457 mov(TMP, rt); // Preserve right. |
| 448 subu(rd, rs, rt); // Right is overwritten. | 458 subu(rd, rs, rt); // Right is overwritten. |
| 449 xor_(ro, rd, rs); | 459 xor_(ro, rd, rs); |
| 450 xor_(TMP, rs, TMP); // Original right. | 460 xor_(TMP, rs, TMP); // Original right. |
| 451 and_(ro, TMP, ro); | 461 and_(ro, TMP, ro); |
| 452 } else { | 462 } else { |
| 453 subu(rd, rs, rt); | 463 subu(rd, rs, rt); |
| 454 xor_(ro, rd, rs); | 464 xor_(ro, rd, rs); |
| 455 xor_(TMP, rs, rt); | 465 xor_(TMP, rs, rt); |
| 456 and_(ro, TMP, ro); | 466 and_(ro, TMP, ro); |
| 457 } | 467 } |
| 458 } | 468 } |
| 459 | 469 |
| 460 | 470 |
| 461 void Assembler::CheckCodePointer() { | 471 void Assembler::CheckCodePointer() { |
| 462 #ifdef DEBUG | 472 #ifdef DEBUG |
| 463 if (!FLAG_check_code_pointer) { | 473 if (!FLAG_check_code_pointer) { |
| 464 return; | 474 return; |
| 465 } | 475 } |
| 466 Comment("CheckCodePointer"); | 476 Comment("CheckCodePointer"); |
| 467 Label cid_ok, instructions_ok; | 477 Label cid_ok, instructions_ok; |
| 468 Push(CMPRES1); | 478 Push(CMPRES1); |
| 469 Push(CMPRES2); | 479 Push(CMPRES2); |
| 470 LoadClassId(CMPRES1, CODE_REG); | 480 LoadClassId(CMPRES1, CODE_REG); |
| 471 BranchEqual(CMPRES1, Immediate(kCodeCid), &cid_ok); | 481 BranchEqual(CMPRES1, Immediate(kCodeCid), &cid_ok); |
| 472 break_(0); | 482 break_(0); |
| 473 Bind(&cid_ok); | 483 Bind(&cid_ok); |
| 474 GetNextPC(CMPRES1, TMP); | 484 GetNextPC(CMPRES1, TMP); |
| 475 const intptr_t entry_offset = CodeSize() - Instr::kInstrSize + | 485 const intptr_t entry_offset = CodeSize() - Instr::kInstrSize + |
| 476 Instructions::HeaderSize() - kHeapObjectTag; | 486 Instructions::HeaderSize() - kHeapObjectTag; |
| 477 AddImmediate(CMPRES1, CMPRES1, -entry_offset); | 487 AddImmediate(CMPRES1, CMPRES1, -entry_offset); |
| 478 lw(CMPRES2, FieldAddress(CODE_REG, Code::saved_instructions_offset())); | 488 lw(CMPRES2, FieldAddress(CODE_REG, Code::saved_instructions_offset())); |
| 479 BranchEqual(CMPRES1, CMPRES2, &instructions_ok); | 489 BranchEqual(CMPRES1, CMPRES2, &instructions_ok); |
| 480 break_(1); | 490 break_(1); |
| 481 Bind(&instructions_ok); | 491 Bind(&instructions_ok); |
| 482 Pop(CMPRES2); | 492 Pop(CMPRES2); |
| 483 Pop(CMPRES1); | 493 Pop(CMPRES1); |
| 484 #endif | 494 #endif |
| 485 } | 495 } |
| 486 | 496 |
| (...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 704 } | 714 } |
| 705 Bind(&done); | 715 Bind(&done); |
| 706 } | 716 } |
| 707 | 717 |
| 708 | 718 |
| 709 void Assembler::StoreIntoObjectOffset(Register object, | 719 void Assembler::StoreIntoObjectOffset(Register object, |
| 710 int32_t offset, | 720 int32_t offset, |
| 711 Register value, | 721 Register value, |
| 712 bool can_value_be_smi) { | 722 bool can_value_be_smi) { |
| 713 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { | 723 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { |
| 714 StoreIntoObject( | 724 StoreIntoObject(object, FieldAddress(object, offset), value, |
| 715 object, FieldAddress(object, offset), value, can_value_be_smi); | 725 can_value_be_smi); |
| 716 } else { | 726 } else { |
| 717 AddImmediate(TMP, object, offset - kHeapObjectTag); | 727 AddImmediate(TMP, object, offset - kHeapObjectTag); |
| 718 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); | 728 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); |
| 719 } | 729 } |
| 720 } | 730 } |
| 721 | 731 |
| 722 | 732 |
| 723 void Assembler::StoreIntoObjectNoBarrier(Register object, | 733 void Assembler::StoreIntoObjectNoBarrier(Register object, |
| 724 const Address& dest, | 734 const Address& dest, |
| 725 Register value) { | 735 Register value) { |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 774 | 784 |
| 775 | 785 |
| 776 void Assembler::LoadIsolate(Register result) { | 786 void Assembler::LoadIsolate(Register result) { |
| 777 lw(result, Address(THR, Thread::isolate_offset())); | 787 lw(result, Address(THR, Thread::isolate_offset())); |
| 778 } | 788 } |
| 779 | 789 |
| 780 | 790 |
| 781 void Assembler::LoadClassId(Register result, Register object) { | 791 void Assembler::LoadClassId(Register result, Register object) { |
| 782 ASSERT(RawObject::kClassIdTagPos == 16); | 792 ASSERT(RawObject::kClassIdTagPos == 16); |
| 783 ASSERT(RawObject::kClassIdTagSize == 16); | 793 ASSERT(RawObject::kClassIdTagSize == 16); |
| 784 const intptr_t class_id_offset = Object::tags_offset() + | 794 const intptr_t class_id_offset = |
| 785 RawObject::kClassIdTagPos / kBitsPerByte; | 795 Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; |
| 786 lhu(result, FieldAddress(object, class_id_offset)); | 796 lhu(result, FieldAddress(object, class_id_offset)); |
| 787 } | 797 } |
| 788 | 798 |
| 789 | 799 |
| 790 void Assembler::LoadClassById(Register result, Register class_id) { | 800 void Assembler::LoadClassById(Register result, Register class_id) { |
| 791 ASSERT(!in_delay_slot_); | 801 ASSERT(!in_delay_slot_); |
| 792 ASSERT(result != class_id); | 802 ASSERT(result != class_id); |
| 793 LoadIsolate(result); | 803 LoadIsolate(result); |
| 794 const intptr_t offset = | 804 const intptr_t offset = |
| 795 Isolate::class_table_offset() + ClassTable::table_offset(); | 805 Isolate::class_table_offset() + ClassTable::table_offset(); |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 889 void Assembler::MaybeTraceAllocation(intptr_t cid, | 899 void Assembler::MaybeTraceAllocation(intptr_t cid, |
| 890 Register temp_reg, | 900 Register temp_reg, |
| 891 Label* trace) { | 901 Label* trace) { |
| 892 ASSERT(cid > 0); | 902 ASSERT(cid > 0); |
| 893 ASSERT(!in_delay_slot_); | 903 ASSERT(!in_delay_slot_); |
| 894 ASSERT(temp_reg != kNoRegister); | 904 ASSERT(temp_reg != kNoRegister); |
| 895 ASSERT(temp_reg != TMP); | 905 ASSERT(temp_reg != TMP); |
| 896 intptr_t state_offset = ClassTable::StateOffsetFor(cid); | 906 intptr_t state_offset = ClassTable::StateOffsetFor(cid); |
| 897 LoadIsolate(temp_reg); | 907 LoadIsolate(temp_reg); |
| 898 intptr_t table_offset = | 908 intptr_t table_offset = |
| 899 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 909 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
| 900 lw(temp_reg, Address(temp_reg, table_offset)); | 910 lw(temp_reg, Address(temp_reg, table_offset)); |
| 901 AddImmediate(temp_reg, state_offset); | 911 AddImmediate(temp_reg, state_offset); |
| 902 lw(temp_reg, Address(temp_reg, 0)); | 912 lw(temp_reg, Address(temp_reg, 0)); |
| 903 andi(CMPRES1, temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); | 913 andi(CMPRES1, temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); |
| 904 bne(CMPRES1, ZR, trace); | 914 bne(CMPRES1, ZR, trace); |
| 905 } | 915 } |
| 906 | 916 |
| 907 | 917 |
| 908 void Assembler::UpdateAllocationStats(intptr_t cid, | 918 void Assembler::UpdateAllocationStats(intptr_t cid, |
| 909 Register temp_reg, | 919 Register temp_reg, |
| (...skipping 17 matching lines...) Expand all Loading... |
| 927 | 937 |
| 928 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, | 938 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, |
| 929 Register size_reg, | 939 Register size_reg, |
| 930 Register temp_reg, | 940 Register temp_reg, |
| 931 Heap::Space space) { | 941 Heap::Space space) { |
| 932 ASSERT(!in_delay_slot_); | 942 ASSERT(!in_delay_slot_); |
| 933 ASSERT(temp_reg != kNoRegister); | 943 ASSERT(temp_reg != kNoRegister); |
| 934 ASSERT(cid > 0); | 944 ASSERT(cid > 0); |
| 935 ASSERT(temp_reg != TMP); | 945 ASSERT(temp_reg != TMP); |
| 936 const uword class_offset = ClassTable::ClassOffsetFor(cid); | 946 const uword class_offset = ClassTable::ClassOffsetFor(cid); |
| 937 const uword count_field_offset = (space == Heap::kNew) ? | 947 const uword count_field_offset = |
| 938 ClassHeapStats::allocated_since_gc_new_space_offset() : | 948 (space == Heap::kNew) |
| 939 ClassHeapStats::allocated_since_gc_old_space_offset(); | 949 ? ClassHeapStats::allocated_since_gc_new_space_offset() |
| 940 const uword size_field_offset = (space == Heap::kNew) ? | 950 : ClassHeapStats::allocated_since_gc_old_space_offset(); |
| 941 ClassHeapStats::allocated_size_since_gc_new_space_offset() : | 951 const uword size_field_offset = |
| 942 ClassHeapStats::allocated_size_since_gc_old_space_offset(); | 952 (space == Heap::kNew) |
| 953 ? ClassHeapStats::allocated_size_since_gc_new_space_offset() |
| 954 : ClassHeapStats::allocated_size_since_gc_old_space_offset(); |
| 943 LoadIsolate(temp_reg); | 955 LoadIsolate(temp_reg); |
| 944 intptr_t table_offset = | 956 intptr_t table_offset = |
| 945 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 957 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
| 946 lw(temp_reg, Address(temp_reg, table_offset)); | 958 lw(temp_reg, Address(temp_reg, table_offset)); |
| 947 AddImmediate(temp_reg, class_offset); | 959 AddImmediate(temp_reg, class_offset); |
| 948 lw(TMP, Address(temp_reg, count_field_offset)); | 960 lw(TMP, Address(temp_reg, count_field_offset)); |
| 949 AddImmediate(TMP, 1); | 961 AddImmediate(TMP, 1); |
| 950 sw(TMP, Address(temp_reg, count_field_offset)); | 962 sw(TMP, Address(temp_reg, count_field_offset)); |
| 951 lw(TMP, Address(temp_reg, size_field_offset)); | 963 lw(TMP, Address(temp_reg, size_field_offset)); |
| 952 addu(TMP, TMP, size_reg); | 964 addu(TMP, TMP, size_reg); |
| (...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1130 AddImmediate(SP, -frame_space); | 1142 AddImmediate(SP, -frame_space); |
| 1131 if (OS::ActivationFrameAlignment() > 1) { | 1143 if (OS::ActivationFrameAlignment() > 1) { |
| 1132 LoadImmediate(TMP, ~(OS::ActivationFrameAlignment() - 1)); | 1144 LoadImmediate(TMP, ~(OS::ActivationFrameAlignment() - 1)); |
| 1133 and_(SP, SP, TMP); | 1145 and_(SP, SP, TMP); |
| 1134 } | 1146 } |
| 1135 } | 1147 } |
| 1136 | 1148 |
| 1137 | 1149 |
| 1138 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { | 1150 void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) { |
| 1139 ASSERT(!in_delay_slot_); | 1151 ASSERT(!in_delay_slot_); |
| 1140 const intptr_t kPushedRegistersSize = | 1152 const intptr_t kPushedRegistersSize = kDartVolatileCpuRegCount * kWordSize + |
| 1141 kDartVolatileCpuRegCount * kWordSize + | 1153 3 * kWordSize + // PP, FP and RA. |
| 1142 3 * kWordSize + // PP, FP and RA. | 1154 kDartVolatileFpuRegCount * kWordSize; |
| 1143 kDartVolatileFpuRegCount * kWordSize; | |
| 1144 | 1155 |
| 1145 SetPrologueOffset(); | 1156 SetPrologueOffset(); |
| 1146 | 1157 |
| 1147 Comment("EnterCallRuntimeFrame"); | 1158 Comment("EnterCallRuntimeFrame"); |
| 1148 | 1159 |
| 1149 // Save volatile CPU and FPU registers on the stack: | 1160 // Save volatile CPU and FPU registers on the stack: |
| 1150 // ------------- | 1161 // ------------- |
| 1151 // FPU Registers | 1162 // FPU Registers |
| 1152 // CPU Registers | 1163 // CPU Registers |
| 1153 // RA | 1164 // RA |
| (...skipping 22 matching lines...) Expand all Loading... |
| 1176 LoadPoolPointer(); | 1187 LoadPoolPointer(); |
| 1177 | 1188 |
| 1178 mov(FP, SP); | 1189 mov(FP, SP); |
| 1179 | 1190 |
| 1180 ReserveAlignedFrameSpace(frame_space); | 1191 ReserveAlignedFrameSpace(frame_space); |
| 1181 } | 1192 } |
| 1182 | 1193 |
| 1183 | 1194 |
| 1184 void Assembler::LeaveCallRuntimeFrame() { | 1195 void Assembler::LeaveCallRuntimeFrame() { |
| 1185 ASSERT(!in_delay_slot_); | 1196 ASSERT(!in_delay_slot_); |
| 1186 const intptr_t kPushedRegistersSize = | 1197 const intptr_t kPushedRegistersSize = kDartVolatileCpuRegCount * kWordSize + |
| 1187 kDartVolatileCpuRegCount * kWordSize + | 1198 3 * kWordSize + // FP and RA. |
| 1188 3 * kWordSize + // FP and RA. | 1199 kDartVolatileFpuRegCount * kWordSize; |
| 1189 kDartVolatileFpuRegCount * kWordSize; | |
| 1190 | 1200 |
| 1191 Comment("LeaveCallRuntimeFrame"); | 1201 Comment("LeaveCallRuntimeFrame"); |
| 1192 | 1202 |
| 1193 // SP might have been modified to reserve space for arguments | 1203 // SP might have been modified to reserve space for arguments |
| 1194 // and ensure proper alignment of the stack frame. | 1204 // and ensure proper alignment of the stack frame. |
| 1195 // We need to restore it before restoring registers. | 1205 // We need to restore it before restoring registers. |
| 1196 mov(SP, FP); | 1206 mov(SP, FP); |
| 1197 | 1207 |
| 1198 // Restore volatile CPU and FPU registers from the stack. | 1208 // Restore volatile CPU and FPU registers from the stack. |
| 1199 lw(PP, Address(SP, 0 * kWordSize)); | 1209 lw(PP, Address(SP, 0 * kWordSize)); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1214 } | 1224 } |
| 1215 addiu(SP, SP, Immediate(kPushedRegistersSize)); | 1225 addiu(SP, SP, Immediate(kPushedRegistersSize)); |
| 1216 } | 1226 } |
| 1217 | 1227 |
| 1218 | 1228 |
| 1219 Address Assembler::ElementAddressForIntIndex(bool is_external, | 1229 Address Assembler::ElementAddressForIntIndex(bool is_external, |
| 1220 intptr_t cid, | 1230 intptr_t cid, |
| 1221 intptr_t index_scale, | 1231 intptr_t index_scale, |
| 1222 Register array, | 1232 Register array, |
| 1223 intptr_t index) const { | 1233 intptr_t index) const { |
| 1224 const int64_t offset = index * index_scale + | 1234 const int64_t offset = |
| 1235 index * index_scale + |
| 1225 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | 1236 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
| 1226 ASSERT(Utils::IsInt(32, offset)); | 1237 ASSERT(Utils::IsInt(32, offset)); |
| 1227 ASSERT(Address::CanHoldOffset(offset)); | 1238 ASSERT(Address::CanHoldOffset(offset)); |
| 1228 return Address(array, static_cast<int32_t>(offset)); | 1239 return Address(array, static_cast<int32_t>(offset)); |
| 1229 } | 1240 } |
| 1230 | 1241 |
| 1231 | 1242 |
| 1232 void Assembler::LoadElementAddressForIntIndex(Register address, | 1243 void Assembler::LoadElementAddressForIntIndex(Register address, |
| 1233 bool is_external, | 1244 bool is_external, |
| 1234 intptr_t cid, | 1245 intptr_t cid, |
| 1235 intptr_t index_scale, | 1246 intptr_t index_scale, |
| 1236 Register array, | 1247 Register array, |
| 1237 intptr_t index) { | 1248 intptr_t index) { |
| 1238 const int64_t offset = index * index_scale + | 1249 const int64_t offset = |
| 1250 index * index_scale + |
| 1239 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | 1251 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
| 1240 AddImmediate(address, array, offset); | 1252 AddImmediate(address, array, offset); |
| 1241 } | 1253 } |
| 1242 | 1254 |
| 1243 | 1255 |
| 1244 Address Assembler::ElementAddressForRegIndex(bool is_load, | 1256 Address Assembler::ElementAddressForRegIndex(bool is_load, |
| 1245 bool is_external, | 1257 bool is_external, |
| 1246 intptr_t cid, | 1258 intptr_t cid, |
| 1247 intptr_t index_scale, | 1259 intptr_t index_scale, |
| 1248 Register array, | 1260 Register array, |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1349 srl(tmp, src, 8); | 1361 srl(tmp, src, 8); |
| 1350 sb(tmp, Address(addr, 1)); | 1362 sb(tmp, Address(addr, 1)); |
| 1351 srl(tmp, src, 16); | 1363 srl(tmp, src, 16); |
| 1352 sb(tmp, Address(addr, 2)); | 1364 sb(tmp, Address(addr, 2)); |
| 1353 srl(tmp, src, 24); | 1365 srl(tmp, src, 24); |
| 1354 sb(tmp, Address(addr, 3)); | 1366 sb(tmp, Address(addr, 3)); |
| 1355 } | 1367 } |
| 1356 | 1368 |
| 1357 | 1369 |
| 1358 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { | 1370 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { |
| 1359 "zr", "tmp", "v0", "v1", "a0", "a1", "a2", "a3", | 1371 "zr", "tmp", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2", |
| 1360 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", | 1372 "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", |
| 1361 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", | 1373 "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra", |
| 1362 "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra", | |
| 1363 }; | 1374 }; |
| 1364 | 1375 |
| 1365 | 1376 |
| 1366 const char* Assembler::RegisterName(Register reg) { | 1377 const char* Assembler::RegisterName(Register reg) { |
| 1367 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); | 1378 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); |
| 1368 return cpu_reg_names[reg]; | 1379 return cpu_reg_names[reg]; |
| 1369 } | 1380 } |
| 1370 | 1381 |
| 1371 | 1382 |
| 1372 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { | 1383 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { |
| 1373 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", | 1384 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", |
| 1374 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", | 1385 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", |
| 1375 }; | 1386 }; |
| 1376 | 1387 |
| 1377 | 1388 |
| 1378 const char* Assembler::FpuRegisterName(FpuRegister reg) { | 1389 const char* Assembler::FpuRegisterName(FpuRegister reg) { |
| 1379 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); | 1390 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); |
| 1380 return fpu_reg_names[reg]; | 1391 return fpu_reg_names[reg]; |
| 1381 } | 1392 } |
| 1382 | 1393 |
| 1383 | 1394 |
| 1384 void Assembler::Stop(const char* message) { | 1395 void Assembler::Stop(const char* message) { |
| 1385 if (FLAG_print_stop_message) { | 1396 if (FLAG_print_stop_message) { |
| 1386 UNIMPLEMENTED(); | 1397 UNIMPLEMENTED(); |
| 1387 } | 1398 } |
| 1388 Label stop; | 1399 Label stop; |
| 1389 b(&stop); | 1400 b(&stop); |
| 1390 Emit(reinterpret_cast<int32_t>(message)); | 1401 Emit(reinterpret_cast<int32_t>(message)); |
| 1391 Bind(&stop); | 1402 Bind(&stop); |
| 1392 break_(Instr::kStopMessageCode); | 1403 break_(Instr::kStopMessageCode); |
| 1393 } | 1404 } |
| 1394 | 1405 |
| 1395 } // namespace dart | 1406 } // namespace dart |
| 1396 | 1407 |
| 1397 #endif // defined TARGET_ARCH_MIPS | 1408 #endif // defined TARGET_ARCH_MIPS |
| OLD | NEW |