OLD | NEW |
(Empty) | |
| 1 // Copyright 2011-2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #if defined(V8_TARGET_ARCH_SH4) |
| 31 |
| 32 #include "sh4/assembler-sh4-inl.h" |
| 33 #include "disassembler.h" |
| 34 #include "macro-assembler.h" |
| 35 #include "serialize.h" |
| 36 |
| 37 #include "checks-sh4.h" |
| 38 |
| 39 namespace v8 { |
| 40 namespace internal { |
| 41 |
| 42 #ifdef DEBUG |
| 43 bool CpuFeatures::initialized_ = false; |
| 44 #endif |
| 45 unsigned CpuFeatures::supported_ = 0; |
| 46 unsigned CpuFeatures::found_by_runtime_probing_ = 0; |
| 47 |
| 48 |
| 49 // Get the CPU features enabled by the build. For cross compilation the |
| 50 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS |
| 51 // can be defined to enable FPU instructions when building the |
| 52 // snapshot. |
| 53 static unsigned CpuFeaturesImpliedByCompiler() { |
| 54 unsigned answer = 0; |
| 55 #ifdef CAN_USE_FPU_INSTRUCTIONS |
| 56 answer |= 1u << FPU; |
| 57 #endif // CAN_USE_FPU_INSTRUCTIONS |
| 58 |
| 59 #ifdef __sh__ |
| 60 // If the compiler is allowed to use FPU then we can use FPU too in our code |
| 61 // generation even when generating snapshots. This won't work for cross |
| 62 // compilation. |
| 63 #if(defined(__SH_FPU_ANY__) && __SH_FPU_ANY__ != 0) |
| 64 answer |= 1u << FPU; |
| 65 #endif // defined(__SH_FPU_ANY__) && __SH_FPU_ANY__ != 0 |
| 66 #endif // def __sh__ |
| 67 |
| 68 return answer; |
| 69 } |
| 70 |
| 71 |
| 72 void CpuFeatures::Probe() { |
| 73 unsigned standard_features = static_cast<unsigned>( |
| 74 OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); |
| 75 ASSERT(supported_ == 0 || supported_ == standard_features); |
| 76 #ifdef DEBUG |
| 77 initialized_ = true; |
| 78 #endif |
| 79 |
| 80 // Get the features implied by the OS and the compiler settings. This is the |
| 81 // minimal set of features which is also alowed for generated code in the |
| 82 // snapshot. |
| 83 supported_ |= standard_features; |
| 84 |
| 85 if (Serializer::enabled()) { |
| 86 // No probing for features if we might serialize (generate snapshot). |
| 87 return; |
| 88 } |
| 89 |
| 90 #ifndef __sh__ |
| 91 // For the simulator=sh4 build, use FPU when FLAG_enable_fpu is enabled. |
| 92 if (FLAG_enable_fpu) { |
| 93 supported_ |= 1u << FPU; |
| 94 } |
| 95 #else // def __sh__ |
| 96 // Probe for additional features not already known to be available. |
| 97 if (!IsSupported(FPU) && OS::SHCpuHasFeature(FPU)) { |
| 98 // This implementation also sets the FPU flags if runtime |
| 99 // detection of FPU returns true. |
| 100 supported_ |= 1u << FPU; |
| 101 found_by_runtime_probing_ |= 1u << FPU; |
| 102 } |
| 103 #endif |
| 104 } |
| 105 |
| 106 |
| 107 // ----------------------------------------------------------------------------- |
| 108 // Implementation of Operand and MemOperand |
| 109 // See assembler-sh4-inl.h for inlined constructors |
| 110 |
| 111 Operand::Operand(Handle<Object> handle) { |
| 112 // Verify all Objects referred by code are NOT in new space. |
| 113 Object* obj = *handle; |
| 114 ASSERT(!HEAP->InNewSpace(obj)); |
| 115 if (obj->IsHeapObject()) { |
| 116 imm32_ = reinterpret_cast<intptr_t>(handle.location()); |
| 117 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
| 118 } else { |
| 119 // no relocation needed |
| 120 imm32_ = reinterpret_cast<intptr_t>(obj); |
| 121 rmode_ = RelocInfo::NONE; |
| 122 } |
| 123 } |
| 124 |
| 125 |
| 126 void Assembler::memcpy(Register dst, Register src, Register count, |
| 127 Register scratch1, Register scratch2, |
| 128 Register scratch3, Register scratch4) { |
| 129 align(); |
| 130 mov_(r0, scratch4); |
| 131 |
| 132 mov_(dst, r0); |
| 133 add_(count, r0); |
| 134 |
| 135 sub_(dst, src); |
| 136 add_imm_(-1, src); |
| 137 |
| 138 shlr_(count); |
| 139 mov_(src, scratch3); |
| 140 |
| 141 movb_dispR0Rs_(src, scratch1); |
| 142 bfs_(8); // odd |
| 143 |
| 144 add_imm_(-1, scratch3); |
| 145 tst_(count, count); |
| 146 |
| 147 bts_(12); // end |
| 148 movb_decRd_(scratch1, r0); |
| 149 |
| 150 // even: |
| 151 movb_dispR0Rs_(src, scratch1); |
| 152 // odd: |
| 153 movb_dispR0Rs_(scratch3, scratch2); |
| 154 dt_(count); |
| 155 |
| 156 movb_decRd_(scratch1, r0); |
| 157 bfs_(-12); // even |
| 158 movb_decRd_(scratch2, r0); |
| 159 |
| 160 mov_(scratch4, r0); |
| 161 } |
| 162 |
| 163 void Assembler::memcmp(Register left, Register right, Register length, |
| 164 Register scratch1, Register scratch2, Label *not_equal) { |
| 165 Label loop; |
| 166 bind(&loop); |
| 167 movb_incRs_(left, scratch1); |
| 168 movb_incRs_(right, scratch2); |
| 169 cmpeq(scratch1, scratch2); |
| 170 bf(not_equal); |
| 171 dt_(length); |
| 172 bf(&loop); |
| 173 } |
| 174 |
| 175 void Assembler::Align(int m) { |
| 176 ASSERT(m >= 4 && IsPowerOf2(m)); |
| 177 while ((pc_offset() & (m - 1)) != 0) { |
| 178 nop_(); |
| 179 } |
| 180 } |
| 181 |
| 182 |
| 183 static const int kMinimalBufferSize = 4*KB; |
| 184 |
| 185 |
| 186 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) |
| 187 : AssemblerBase(arg_isolate), |
| 188 recorded_ast_id_(TypeFeedbackId::None()), |
| 189 positions_recorder_(this), |
| 190 emit_debug_code_(FLAG_debug_code), |
| 191 predictable_code_size_(false) { |
| 192 if (buffer == NULL) { |
| 193 // Do our own buffer management. |
| 194 if (buffer_size <= kMinimalBufferSize) { |
| 195 buffer_size = kMinimalBufferSize; |
| 196 |
| 197 if (isolate()->assembler_spare_buffer() != NULL) { |
| 198 buffer = isolate()->assembler_spare_buffer(); |
| 199 isolate()->set_assembler_spare_buffer(NULL); |
| 200 } |
| 201 } |
| 202 if (buffer == NULL) { |
| 203 buffer_ = NewArray<byte>(buffer_size); |
| 204 } else { |
| 205 buffer_ = static_cast<byte*>(buffer); |
| 206 } |
| 207 buffer_size_ = buffer_size; |
| 208 own_buffer_ = true; |
| 209 |
| 210 } else { |
| 211 // Use externally provided buffer instead. |
| 212 ASSERT(buffer_size > 0); |
| 213 buffer_ = static_cast<byte*>(buffer); |
| 214 buffer_size_ = buffer_size; |
| 215 own_buffer_ = false; |
| 216 } |
| 217 |
| 218 // Fill the buffer with 0 so it will normally crash if we jump into it |
| 219 if (own_buffer_) { |
| 220 memset(buffer_, 0x00, buffer_size); |
| 221 } |
| 222 |
| 223 // Set up buffer pointers. |
| 224 ASSERT(buffer_ != NULL); |
| 225 pc_ = buffer_; |
| 226 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); |
| 227 |
| 228 ClearRecordedAstId(); |
| 229 } |
| 230 |
| 231 |
| 232 void Assembler::GetCode(CodeDesc* desc) { |
| 233 // Emit the constant pool if needed |
| 234 // FIXME(STM) |
| 235 |
| 236 desc->buffer = buffer_; |
| 237 desc->buffer_size = buffer_size_; |
| 238 desc->instr_size = pc_offset(); |
| 239 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 240 desc->origin = this; |
| 241 } |
| 242 |
| 243 // Debugging. |
| 244 void Assembler::RecordJSReturn() { |
| 245 positions_recorder()->WriteRecordedPositions(); |
| 246 CheckBuffer(); |
| 247 RecordRelocInfo(RelocInfo::JS_RETURN); |
| 248 } |
| 249 |
| 250 void Assembler::RecordComment(const char* msg, bool force) { |
| 251 if (FLAG_code_comments) { |
| 252 CheckBuffer(); |
| 253 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
| 254 } |
| 255 } |
| 256 |
| 257 |
| 258 void Assembler::GrowBuffer() { |
| 259 if (!own_buffer_) FATAL("external code buffer is too small"); |
| 260 |
| 261 // Compute new buffer size. |
| 262 CodeDesc desc; // the new buffer |
| 263 if (buffer_size_ < 4*KB) { |
| 264 desc.buffer_size = 4*KB; |
| 265 } else if (buffer_size_ < 1*MB) { |
| 266 desc.buffer_size = 2*buffer_size_; |
| 267 } else { |
| 268 desc.buffer_size = buffer_size_ + 1*MB; |
| 269 } |
| 270 CHECK_GT(desc.buffer_size, 0); // no overflow |
| 271 |
| 272 // Setup new buffer. |
| 273 desc.buffer = NewArray<byte>(desc.buffer_size); |
| 274 |
| 275 desc.instr_size = pc_offset(); |
| 276 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| 277 |
| 278 // Copy the data. |
| 279 int pc_delta = desc.buffer - buffer_; |
| 280 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); |
| 281 memmove(desc.buffer, buffer_, desc.instr_size); |
| 282 memmove(reloc_info_writer.pos() + rc_delta, |
| 283 reloc_info_writer.pos(), desc.reloc_size); |
| 284 |
| 285 // Switch buffers. |
| 286 DeleteArray(buffer_); |
| 287 buffer_ = desc.buffer; |
| 288 buffer_size_ = desc.buffer_size; |
| 289 pc_ += pc_delta; |
| 290 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
| 291 reloc_info_writer.last_pc() + pc_delta); |
| 292 } |
| 293 |
| 294 |
| 295 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 296 ASSERT(rmode != RelocInfo::NONE); |
| 297 // Don't record external references unless the heap will be serialized. |
| 298 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { |
| 299 #ifdef DEBUG |
| 300 if (!Serializer::enabled()) { |
| 301 Serializer::TooLateToEnableNow(); |
| 302 } |
| 303 #endif |
| 304 if (!Serializer::enabled() && !emit_debug_code()) { |
| 305 return; |
| 306 } |
| 307 } |
| 308 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
| 309 RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId().ToInt(), NULL); |
| 310 ClearRecordedAstId(); |
| 311 reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 312 } else { |
| 313 // we do not try to reuse pool constants |
| 314 RelocInfo rinfo(pc_, rmode, data, NULL); |
| 315 reloc_info_writer.Write(&rinfo); |
| 316 } |
| 317 } |
| 318 |
| 319 |
| 320 void Assembler::add(Register Rd, const Operand& imm, Register rtmp) { |
| 321 if (imm.is_int8()) { |
| 322 add_imm_(imm.imm32_, Rd); |
| 323 } else { |
| 324 ASSERT(!Rd.is(rtmp)); |
| 325 mov(rtmp, imm); |
| 326 add_(rtmp, Rd); |
| 327 } |
| 328 } |
| 329 |
| 330 |
| 331 void Assembler::add(Register Rd, Register Rs, const Operand& imm, |
| 332 Register rtmp) { |
| 333 if (Rs.code() != Rd.code()) { |
| 334 mov(Rd, imm); |
| 335 add_(Rs, Rd); |
| 336 } else { |
| 337 add(Rd, imm, rtmp); |
| 338 } |
| 339 } |
| 340 |
| 341 |
| 342 void Assembler::add(Register Rd, Register Rs, Register Rt) { |
| 343 if (Rs.code() == Rd.code()) |
| 344 add_(Rt, Rd); |
| 345 else if (Rt.code() == Rd.code()) { |
| 346 add_(Rs, Rd); |
| 347 } else { |
| 348 ASSERT(!Rs.is(Rd) && !Rt.is(Rd)); |
| 349 mov_(Rs, Rd); |
| 350 add_(Rt, Rd); |
| 351 } |
| 352 } |
| 353 |
| 354 |
| 355 void Assembler::sub(Register Rd, Register Rs, const Operand& imm, |
| 356 Register rtmp) { |
| 357 mov(rtmp, imm); |
| 358 if (Rs.code() == Rd.code()) { |
| 359 sub_(rtmp, Rd); |
| 360 } else { |
| 361 mov_(Rs, Rd); |
| 362 sub_(rtmp, Rd); |
| 363 } |
| 364 } |
| 365 |
| 366 |
| 367 void Assembler::sub(Register Rd, Register Rs, Register Rt) { |
| 368 if (Rs.code() == Rd.code()) { |
| 369 sub_(Rt, Rd); |
| 370 } else if (Rt.code() == Rd.code()) { |
| 371 ASSERT(!Rs.is(Rd)); |
| 372 neg_(Rt, Rd); |
| 373 add_(Rs, Rd); |
| 374 } else { |
| 375 ASSERT(!Rs.is(Rd) && !Rt.is(Rd)); |
| 376 mov_(Rs, Rd); |
| 377 sub_(Rt, Rd); |
| 378 } |
| 379 } |
| 380 |
| 381 |
| 382 void Assembler::addv(Register Rd, Register Rs, Register Rt) { |
| 383 if (Rs.code() == Rd.code()) |
| 384 addv_(Rt, Rd); |
| 385 else if (Rt.code() == Rd.code()) { |
| 386 addv_(Rs, Rd); |
| 387 } else { |
| 388 ASSERT(!Rs.is(Rd) && !Rt.is(Rd)); |
| 389 mov_(Rs, Rd); |
| 390 addv_(Rt, Rd); |
| 391 } |
| 392 } |
| 393 |
| 394 |
| 395 void Assembler::addv(Register Rd, Register Rs, const Operand& imm, |
| 396 Register rtmp) { |
| 397 mov(rtmp, imm); |
| 398 addv(Rd, Rs, rtmp); |
| 399 } |
| 400 |
| 401 void Assembler::addc(Register Rd, Register Rs, Register Rt) { |
| 402 // Clear T bit before using addc |
| 403 clrt_(); |
| 404 if (Rs.code() == Rd.code()) |
| 405 addc_(Rt, Rd); |
| 406 else if (Rt.code() == Rd.code()) { |
| 407 addc_(Rs, Rd); |
| 408 } else { |
| 409 ASSERT(!Rs.is(Rd) && !Rt.is(Rd)); |
| 410 mov_(Rs, Rd); |
| 411 addc_(Rt, Rd); |
| 412 } |
| 413 } |
| 414 |
| 415 |
| 416 void Assembler::subv(Register Rd, Register Rs, Register Rt, Register rtmp) { |
| 417 if (Rs.code() == Rd.code()) |
| 418 subv_(Rt, Rd); |
| 419 else if (Rt.code() == Rd.code()) { |
| 420 ASSERT(!Rs.is(rtmp) && !Rt.is(rtmp)); |
| 421 mov_(Rs, rtmp); |
| 422 subv_(Rt, rtmp); |
| 423 mov_(rtmp, Rd); |
| 424 } else { |
| 425 ASSERT(!Rs.is(Rd) && !Rt.is(Rd)); |
| 426 mov_(Rs, Rd); |
| 427 subv_(Rt, Rd); |
| 428 } |
| 429 } |
| 430 |
| 431 |
| 432 void Assembler::subc(Register Rd, Register Rs, Register Rt, Register rtmp) { |
| 433 // Clear T bit before using subc |
| 434 clrt_(); |
| 435 if (Rs.code() == Rd.code()) |
| 436 subc_(Rt, Rd); |
| 437 else if (Rt.code() == Rd.code()) { |
| 438 ASSERT(!Rs.is(rtmp) && !Rt.is(rtmp)); |
| 439 mov_(Rs, rtmp); |
| 440 subc_(Rt, rtmp); |
| 441 mov_(rtmp, Rd); |
| 442 } else { |
| 443 ASSERT(!Rs.is(Rd) && !Rt.is(Rd)); |
| 444 mov_(Rs, Rd); |
| 445 subc_(Rt, Rd); |
| 446 } |
| 447 } |
| 448 |
| 449 // TODO(stm): check why asl is useful? Is it like lsl? |
| 450 void Assembler::asl(Register Rd, Register Rs, const Operand& imm, |
| 451 Register rtmp) { |
| 452 ASSERT(imm.imm32_ >= 0 && imm.imm32_ < 32); |
| 453 if (Rs.code() != Rd.code()) |
| 454 mov_(Rs, Rd); |
| 455 if (imm.imm32_ == 1) { |
| 456 shal_(Rd); |
| 457 } else { |
| 458 ASSERT(!Rs.is(rtmp) && !Rd.is(rtmp)); |
| 459 mov_imm_(imm.imm32_, rtmp); |
| 460 shad_(rtmp, Rd); |
| 461 } |
| 462 } |
| 463 |
| 464 |
| 465 void Assembler::asr(Register Rd, Register Rs, Register Rt, bool in_range, |
| 466 Register rtmp) { |
| 467 ASSERT(!Rs.is(rtmp) && !Rd.is(rtmp) && !Rt.is(rtmp)); |
| 468 // If !in_range, we must clamp shift value to 31 max |
| 469 if (!in_range) { |
| 470 movt_(rtmp); |
| 471 push(rtmp); |
| 472 cmphi(Rt, Operand(31), rtmp); |
| 473 } |
| 474 neg_(Rt, rtmp); |
| 475 if (!in_range) { |
| 476 bf_(0); |
| 477 mov_imm_(-31, rtmp); |
| 478 } |
| 479 if (Rs.code() != Rd.code()) { |
| 480 mov_(Rs, Rd); |
| 481 } |
| 482 shad_(rtmp, Rd); |
| 483 if (!in_range) { |
| 484 pop(rtmp); |
| 485 cmppl_(rtmp); // gives back t bit |
| 486 } |
| 487 } |
| 488 |
| 489 |
| 490 void Assembler::asr(Register Rd, Register Rs, const Operand& imm, |
| 491 Register rtmp) { |
| 492 ASSERT(imm.imm32_ >= 0 && imm.imm32_ < 32); |
| 493 if (Rs.code() != Rd.code()) |
| 494 mov_(Rs, Rd); |
| 495 if (imm.imm32_ == 1) { |
| 496 shar_(Rd); |
| 497 } else { |
| 498 ASSERT(!Rs.is(rtmp) && !Rd.is(rtmp)); |
| 499 mov_imm_(-imm.imm32_, rtmp); |
| 500 shad_(rtmp, Rd); |
| 501 } |
| 502 } |
| 503 |
| 504 |
| 505 void Assembler::lsl(Register Rd, Register Rs, const Operand& imm, |
| 506 Register rtmp) { |
| 507 ASSERT(imm.imm32_ >= 0 && imm.imm32_ < 32); |
| 508 if (Rs.code() != Rd.code()) |
| 509 mov_(Rs, Rd); |
| 510 if (imm.imm32_ == 1) { |
| 511 shll_(Rd); |
| 512 } else if (imm.imm32_ == 2) { |
| 513 shll2_(Rd); |
| 514 } else { |
| 515 ASSERT(!Rs.is(rtmp) && !Rd.is(rtmp)); |
| 516 mov_imm_(imm.imm32_, rtmp); |
| 517 shld_(rtmp, Rd); |
| 518 } |
| 519 } |
| 520 |
| 521 void Assembler::lsl(Register Rd, Register Rs, Register Rt, bool wrap, |
| 522 Register rtmp) { |
| 523 ASSERT(!Rs.is(rtmp) && !Rd.is(rtmp) && !Rt.is(rtmp)); |
| 524 Register rshift = Rt; |
| 525 // If !in_range, we must flush in case of shift value >= 32 |
| 526 if (!wrap) { |
| 527 movt_(rtmp); |
| 528 push(rtmp); |
| 529 cmphi(Rt, Operand(31), rtmp); |
| 530 } |
| 531 if (Rs.code() != Rd.code()) { |
| 532 if (Rt.is(Rd)) { |
| 533 rshift = rtmp; |
| 534 mov_(Rt, rtmp); |
| 535 } |
| 536 mov_(Rs, Rd); |
| 537 } |
| 538 shld_(rshift, Rd); |
| 539 if (!wrap) { |
| 540 bf_(0); |
| 541 // Nullify result for shift amount >= 32 |
| 542 mov_imm_(0, Rd); |
| 543 pop(rtmp); |
| 544 cmppl_(rtmp); // gives back t bit |
| 545 } |
| 546 } |
| 547 |
| 548 |
| 549 void Assembler::lsr(Register Rd, Register Rs, const Operand& imm, |
| 550 Register rtmp) { |
| 551 ASSERT(imm.imm32_ >= 0 && imm.imm32_ < 32); |
| 552 if (Rs.code() != Rd.code()) |
| 553 mov_(Rs, Rd); |
| 554 if (imm.imm32_ == 1) { |
| 555 shlr_(Rd); |
| 556 } else if (imm.imm32_ == 2) { |
| 557 shlr2_(Rd); |
| 558 } else { |
| 559 ASSERT(!Rs.is(rtmp) && !Rd.is(rtmp)); |
| 560 mov_imm_(-imm.imm32_, rtmp); |
| 561 shld_(rtmp, Rd); |
| 562 } |
| 563 } |
| 564 |
| 565 void Assembler::lsr(Register Rd, Register Rs, Register Rt, bool in_range, |
| 566 Register rtmp) { |
| 567 ASSERT(!Rs.is(rtmp) && !Rd.is(rtmp) && !Rt.is(rtmp)); |
| 568 // If !in_range, we must flush in case of shift value >= 32 |
| 569 if (!in_range) { |
| 570 movt_(rtmp); |
| 571 push(rtmp); |
| 572 cmphi(Rt, Operand(31), rtmp); |
| 573 } |
| 574 neg_(Rt, rtmp); |
| 575 if (Rs.code() != Rd.code()) { |
| 576 mov_(Rs, Rd); |
| 577 } |
| 578 shld_(rtmp, Rd); |
| 579 if (!in_range) { |
| 580 bf_(0); |
| 581 // Nullify result for shift amount >= 32 |
| 582 mov_imm_(0, Rd); |
| 583 pop(rtmp); |
| 584 cmppl_(rtmp); // gives back t bit |
| 585 } |
| 586 } |
| 587 |
| 588 |
| 589 void Assembler::land(Register Rd, Register Rs, const Operand& imm, |
| 590 Register rtmp) { |
| 591 if (Rd.is(r0) && Rd.is(Rs) && FITS_SH4_and_imm_R0(imm.imm32_)) { |
| 592 and_imm_R0_(imm.imm32_); |
| 593 } else { |
| 594 ASSERT(!Rs.is(rtmp)); |
| 595 mov(rtmp, imm); |
| 596 land(Rd, Rs, rtmp); |
| 597 } |
| 598 } |
| 599 |
| 600 |
| 601 void Assembler::land(Register Rd, Register Rs, Register Rt) { |
| 602 if (Rs.code() == Rd.code()) |
| 603 and_(Rt, Rd); |
| 604 else if (Rt.code() == Rd.code()) { |
| 605 and_(Rs, Rd); |
| 606 } else { |
| 607 ASSERT(!Rt.is(Rd) && !Rs.is(Rd)); |
| 608 mov_(Rs, Rd); |
| 609 and_(Rt, Rd); |
| 610 } |
| 611 } |
| 612 |
| 613 |
| 614 void Assembler::lor(Register Rd, Register Rs, const Operand& imm, |
| 615 Register rtmp) { |
| 616 if (Rd.is(r0) && Rd.is(Rs) && FITS_SH4_or_imm_R0(imm.imm32_)) { |
| 617 or_imm_R0_(imm.imm32_); |
| 618 } else { |
| 619 ASSERT(!Rs.is(rtmp)); |
| 620 mov(rtmp, imm); |
| 621 lor(Rd, Rs, rtmp); |
| 622 } |
| 623 } |
| 624 |
| 625 void Assembler::lor(Register Rd, Register Rs, Register Rt) { |
| 626 if (Rs.code() == Rd.code()) |
| 627 or_(Rt, Rd); |
| 628 else if (Rt.code() == Rd.code()) { |
| 629 or_(Rs, Rd); |
| 630 } else { |
| 631 ASSERT(!Rt.is(Rd) && !Rs.is(Rd)); |
| 632 mov_(Rs, Rd); |
| 633 or_(Rt, Rd); |
| 634 } |
| 635 } |
| 636 |
| 637 void Assembler::lor(Register Rd, Register Rs, Register Rt, Condition cond) { |
| 638 ASSERT(cond == ne || cond == eq); |
| 639 Label end; |
| 640 if (cond == eq) |
| 641 bf_near(&end); // Jump after sequence if T bit is false |
| 642 else |
| 643 bt_near(&end); // Jump after sequence if T bit is true |
| 644 lor(Rd, Rs, Rt); |
| 645 bind(&end); |
| 646 } |
| 647 |
| 648 |
| 649 void Assembler::lor(Register Rd, Register Rs, const Operand& imm, |
| 650 Condition cond, Register rtmp) { |
| 651 ASSERT(cond == ne || cond == eq); |
| 652 Label end; |
| 653 if (cond == eq) |
| 654 bf_near(&end); // Jump after sequence if T bit is false |
| 655 else |
| 656 bt_near(&end); // Jump after sequence if T bit is true |
| 657 lor(Rd, Rs, imm, rtmp); |
| 658 bind(&end); |
| 659 } |
| 660 |
| 661 |
| 662 void Assembler::lxor(Register Rd, Register Rs, const Operand& imm, |
| 663 Register rtmp) { |
| 664 if (Rd.is(r0) && Rd.is(Rs) && FITS_SH4_xor_imm_R0(imm.imm32_)) { |
| 665 xor_imm_R0_(imm.imm32_); |
| 666 } else { |
| 667 ASSERT(!Rs.is(rtmp)); |
| 668 mov(rtmp, imm); |
| 669 lxor(Rd, Rs, rtmp); |
| 670 } |
| 671 } |
| 672 |
| 673 void Assembler::lxor(Register Rd, Register Rs, Register Rt) { |
| 674 if (Rs.code() == Rd.code()) |
| 675 xor_(Rt, Rd); |
| 676 else if (Rt.code() == Rd.code()) { |
| 677 xor_(Rs, Rd); |
| 678 } else { |
| 679 ASSERT(!Rt.is(Rd) && !Rs.is(Rd)); |
| 680 mov_(Rs, Rd); |
| 681 xor_(Rt, Rd); |
| 682 } |
| 683 } |
| 684 |
| 685 |
| 686 void Assembler::tst(Register Rd, const Operand& imm, Register rtmp) { |
| 687 ASSERT(!Rd.is(rtmp)); |
| 688 mov(rtmp, imm); |
| 689 tst_(Rd, rtmp); |
| 690 } |
| 691 |
| 692 void Assembler::call(Label* L) { |
| 693 jsr(L); |
| 694 } |
| 695 |
| 696 |
| 697 void Assembler::db(uint8_t data) { |
| 698 // No relocation info should be pending while using db. db is used |
| 699 // to write pure data with no pointers and the constant pool should |
| 700 // be emitted before using db. |
| 701 // TODO(STM): constant pool |
| 702 // ASSERT(num_pending_reloc_info_ == 0); |
| 703 CheckBuffer(); |
| 704 *reinterpret_cast<uint8_t*>(pc_) = data; |
| 705 pc_ += sizeof(uint8_t); |
| 706 } |
| 707 |
| 708 |
| 709 void Assembler::dw(uint16_t data) { |
| 710 // No relocation info should be pending while using db. db is used |
| 711 // to write pure data with no pointers and the constant pool should |
| 712 // be emitted before using db. |
| 713 // TODO(STM): constant pool |
| 714 // ASSERT(num_pending_reloc_info_ == 0); |
| 715 CheckBuffer(); |
| 716 *reinterpret_cast<uint16_t*>(pc_) = data; |
| 717 pc_ += sizeof(uint16_t); |
| 718 } |
| 719 |
| 720 |
| 721 void Assembler::dd(uint32_t data) { |
| 722 // No relocation info should be pending while using db. db is used |
| 723 // to write pure data with no pointers and the constant pool should |
| 724 // be emitted before using db. |
| 725 // TODO(STM): constant pool |
| 726 // ASSERT(num_pending_reloc_info_ == 0); |
| 727 CheckBuffer(); |
| 728 *reinterpret_cast<uint32_t*>(pc_) = data; |
| 729 pc_ += sizeof(uint32_t); |
| 730 } |
| 731 |
| 732 |
| 733 Register Assembler::GetRn(Instr instr) { |
| 734 ASSERT(IsCmpRegister(instr) || IsMovImmediate(instr)); |
| 735 Register reg; |
| 736 // extract Rn from cmp/xx Rm, Rn |
| 737 reg.code_ = (instr & 0x0F00) >> 8; |
| 738 return reg; |
| 739 } |
| 740 |
| 741 |
| 742 Register Assembler::GetRm(Instr instr) { |
| 743 ASSERT(IsCmpRegister(instr) || IsMovImmediate(instr)); |
| 744 Register reg; |
| 745 // extract Rn from cmp/xx Rm, Rn |
| 746 reg.code_ = (instr & 0x00F0) >> 4; |
| 747 return reg; |
| 748 } |
| 749 |
| 750 |
| 751 bool Assembler::IsMovImmediate(Instr instr) { |
| 752 // mov #ii, Rn |
| 753 return (instr & 0xF000) == 0xE000; |
| 754 } |
| 755 |
| 756 |
| 757 bool Assembler::IsBranch(Instr instr) { |
| 758 // bt|bf|bt/s|bf/s instrs. |
| 759 return (instr & 0xF900) == 0x8900; |
| 760 } |
| 761 |
| 762 |
| 763 Condition Assembler::GetCondition(Instr instr) { |
| 764 ASSERT(IsBranch(instr)); |
| 765 return (instr & 0x200) == 0x200 ? |
| 766 ne : // bf| bf/s |
| 767 eq; // bt|bt/s |
| 768 } |
| 769 |
| 770 |
| 771 bool Assembler::IsCmpRegister(Instr instr) { |
| 772 // cmp/eq Rm, Rn |
| 773 return (instr & 0xF00F) == 0x3000; |
| 774 } |
| 775 |
| 776 |
| 777 bool Assembler::IsCmpImmediate(Instr instr) { |
| 778 // cmp/eq #ii, R0 |
| 779 return (instr & 0xFF00) == 0x8800; |
| 780 } |
| 781 |
| 782 |
| 783 Register Assembler::GetCmpImmediateRegister(Instr instr) { |
| 784 ASSERT(IsCmpImmediate(instr)); |
| 785 // The instruction is cmpeq #ii, r0, return r0 |
| 786 return r0; |
| 787 } |
| 788 |
| 789 |
| 790 int Assembler::GetCmpImmediateAsUnsigned(Instr instr) { |
| 791 ASSERT(IsCmpImmediate(instr)); |
| 792 // The instruction is cmpeq #ii, r0, return 8-bit #ii as unsigned |
| 793 return (instr & 0xFF); |
| 794 } |
| 795 |
| 796 |
| 797 void Assembler::bind(Label* L) { |
| 798 // label can only be bound once |
| 799 ASSERT(!L->is_bound()); |
| 800 |
| 801 // Jump directly to the current PC |
| 802 int target_pos = reinterpret_cast<int>(pc_); |
| 803 int is_near_linked = L->is_near_linked(); |
| 804 Label::Distance distance = is_near_linked ? Label::kNear : Label::kFar; |
| 805 |
| 806 // List the links to patch |
| 807 while (L->is_linked()) { |
| 808 // Compute the current position |
| 809 // L->pos() is the offset from the begining of the buffer |
| 810 uint16_t* p_pos = reinterpret_cast<uint16_t*>(L->pos() + buffer_); |
| 811 ASSERT((is_near_linked && (int)(*p_pos) % 4 != 0) || !is_near_linked); |
| 812 |
| 813 // Compute the next before the patch |
| 814 next(L, distance); |
| 815 |
| 816 // Patching |
| 817 // Is it a backtrack label or a classical one ? |
| 818 // In this case the LSB is set to 1 |
| 819 if (!is_near_linked && (((signed)*p_pos) & 0x3) == 1) { |
| 820 ASSERT(((*reinterpret_cast<int16_t*>(p_pos)) & ~0x3) == kEndOfChain); |
| 821 *reinterpret_cast<uint32_t*>(p_pos) = target_pos - (unsigned)buffer_ + |
| 822 (Code::kHeaderSize - kHeapObjectTag); |
| 823 } else { |
| 824 patchBranchOffset(target_pos, p_pos, is_near_linked); |
| 825 } |
| 826 } |
| 827 L->bind_to(pc_offset()); |
| 828 L->UnuseNear(); |
| 829 |
| 830 // Keep track of the last bound label so we don't eliminate any instructions |
| 831 // before a bound label. |
| 832 if (pc_offset() > last_bound_pos_) |
| 833 last_bound_pos_ = pc_offset(); |
| 834 } |
| 835 |
| 836 |
| 837 void Assembler::next(Label* L, Label::Distance distance) { |
| 838 if (distance == Label::kNear) { |
| 839 ASSERT(L->is_near_linked()); |
| 840 int16_t link = (*reinterpret_cast<uint16_t*>(L->pos() + buffer_)) & ~0x3; |
| 841 if (link > 0) { |
| 842 L->link_to(link & ~0x3); |
| 843 } else { |
| 844 ASSERT(link == kEndOfChain); |
| 845 L->Unuse(); |
| 846 } |
| 847 } else { |
| 848 ASSERT(L->is_linked()); |
| 849 int link = *reinterpret_cast<uint32_t*>(L->pos() + buffer_); |
| 850 if (link > 0) { |
| 851 L->link_to(link); |
| 852 } else { |
| 853 ASSERT((link & ~0x3) == kEndOfChain); |
| 854 L->Unuse(); |
| 855 } |
| 856 } |
| 857 } |
| 858 |
| 859 |
| 860 void Assembler::load_label(Label* L) { |
| 861 ASSERT(!L->is_bound()); |
| 862 int offset = L->is_linked() ? L->pos() : kEndOfChain; |
| 863 ASSERT((offset % 4) == 0); |
| 864 mov(r0, Operand(offset + 0x1), true); |
| 865 L->link_to(pc_offset() - sizeof(uint32_t)); |
| 866 } |
| 867 |
| 868 |
| 869 void Assembler::branch(Label* L, Register rtmp, branch_type type, |
| 870 Label::Distance distance) { |
| 871 // when bound both near and far labels are represented the same way |
| 872 if (L->is_bound()) { |
| 873 ASSERT(L->pos() != kEndOfChain); |
| 874 branch(L->pos() - pc_offset(), rtmp, type, distance, false); |
| 875 } else { |
| 876 // The only difference between Near and far label is in the |
| 877 // is_near_linked function. |
| 878 // For this reason, we set the near_link_pos to 1 and then we use the |
| 879 // generic link_to to know the position |
| 880 // Moreover the position of the linked branch will be altered when dumped |
| 881 // on memory to carry the type of branch to write when patching back |
| 882 if (distance == Label::kNear) |
| 883 L->link_to(1, Label::kNear); |
| 884 |
| 885 if (L->is_linked()) { |
| 886 ASSERT(L->pos() != kEndOfChain); |
| 887 branch(L->pos(), rtmp, type, distance, true); |
| 888 } else { |
| 889 branch(kEndOfChain, rtmp, type, distance, true); // Patched later on |
| 890 } |
| 891 |
| 892 int pos; |
| 893 if (distance == Label::kFar) { |
| 894 // Compensate the place of the constant (sizeof(uint32_t)) |
| 895 // Constant pool is always emited last in the sequence |
| 896 // the position is defined as an offset from the begining of the buffer |
| 897 pos = pc_offset() - sizeof(uint32_t); |
| 898 } else { |
| 899 pos = pc_offset() - sizeof(uint16_t); |
| 900 } |
| 901 L->link_to(pos); // Link to the constant |
| 902 } |
| 903 } |
| 904 |
| 905 |
| 906 void Assembler::jmp(Register Rd) { |
| 907 // Record position of a jmp to code |
| 908 positions_recorder()->WriteRecordedPositions(); |
| 909 jmp_indRd_(Rd); |
| 910 nop_(); |
| 911 } |
| 912 |
| 913 void Assembler::jsr(Register Rd) { |
| 914 // Record position of a jmp to code |
| 915 positions_recorder()->WriteRecordedPositions(); |
| 916 jsr_indRd_(Rd); |
| 917 nop_(); |
| 918 } |
| 919 |
| 920 void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode, Register rtmp) { |
| 921 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 922 // Record position of a jmp to code |
| 923 positions_recorder()->WriteRecordedPositions(); |
| 924 // TODO(stm): make a faster sequence where the constant pool is |
| 925 // after the branch |
| 926 mov(rtmp, Operand(reinterpret_cast<intptr_t>(code.location()), rmode)); |
| 927 jmp_indRd_(rtmp); |
| 928 nop_(); |
| 929 } |
| 930 |
| 931 void Assembler::jsr(Handle<Code> code, RelocInfo::Mode rmode, Register rtmp) { |
| 932 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 933 // Record position of a jsr to code |
| 934 positions_recorder()->WriteRecordedPositions(); |
| 935 mov(rtmp, Operand(reinterpret_cast<intptr_t>(code.location()), rmode)); |
| 936 jsr_indRd_(rtmp); |
| 937 nop_(); |
| 938 } |
| 939 |
| 940 void Assembler::branch(int offset, Register rtmp, branch_type type, |
| 941 Label::Distance distance, bool patched_later) { |
| 942 switch (type) { |
| 943 case branch_true: |
| 944 conditional_branch(offset, rtmp, distance, patched_later, true); |
| 945 break; |
| 946 case branch_false: |
| 947 conditional_branch(offset, rtmp, distance, patched_later, false); |
| 948 break; |
| 949 case branch_unconditional: |
| 950 jmp(offset, rtmp, distance, patched_later); |
| 951 break; |
| 952 case branch_subroutine: |
| 953 jsr(offset, rtmp, patched_later); |
| 954 break; |
| 955 } |
| 956 } |
| 957 |
| 958 |
| 959 void Assembler::patchBranchOffset(int target_pos, uint16_t *p_constant, |
| 960 int is_near_linked) { |
| 961 if (is_near_linked) { |
| 962 // The two least significant bits represent the type of branch (1, 2 or 3) |
| 963 ASSERT((*p_constant & 0x3) == 1 || // bt |
| 964 (*p_constant & 0x3) == 2 || // bf |
| 965 (*p_constant & 0x3) == 3); // jmp |
| 966 int disp = target_pos - (unsigned)p_constant - 4; |
| 967 |
| 968 // select the right branch type |
| 969 switch ((*p_constant & 0x3)) { |
| 970 case 1: |
| 971 ASSERT(FITS_SH4_bt(disp)); |
| 972 *p_constant = (0x8 << 12) | (0x9 << 8) | (((disp & 0x1FE) >> 1) << 0); |
| 973 break; |
| 974 case 2: |
| 975 ASSERT(FITS_SH4_bf(disp)); |
| 976 *p_constant = (0x8 << 12) | (0xB << 8) | (((disp & 0x1FE) >> 1) << 0); |
| 977 break; |
| 978 case 3: |
| 979 ASSERT(FITS_SH4_bra(disp + 2)); |
| 980 ASSERT(*(p_constant - 1) == 0x9); |
| 981 *(p_constant - 1) = (0xA << 12) | ((((disp + 2) & 0x1FFE) >> 1) << 0); |
| 982 *(p_constant) = 0x9; |
| 983 break; |
| 984 default: |
| 985 UNREACHABLE(); |
| 986 } |
| 987 |
| 988 } else { |
| 989 // Patch the constant |
| 990 ASSERT(*(p_constant - 1) == 0x09); |
| 991 // Is it a jsr or any other branch ? |
| 992 if (*(p_constant - 2) == 0xa002) |
| 993 *reinterpret_cast<uint32_t*>(p_constant) = target_pos - |
| 994 (unsigned)p_constant + 4; |
| 995 else |
| 996 *reinterpret_cast<uint32_t*>(p_constant) = target_pos - |
| 997 (unsigned)p_constant; |
| 998 } |
| 999 } |
| 1000 |
| 1001 |
| 1002 void Assembler::conditional_branch(int offset, Register rtmp, |
| 1003 Label::Distance distance, bool patched_later, |
| 1004 bool type) { |
| 1005 if (patched_later) { |
| 1006 if (distance == Label::kNear) { |
| 1007 align(); |
| 1008 // Use the 2 least significant bits to store the type of branch |
| 1009 // We assume (and assert) that they always are null |
| 1010 ASSERT((offset % 4) == 0); |
| 1011 dw(offset + (type ? 0x1 : 0x2)); |
| 1012 } else { |
| 1013 align(); |
| 1014 type ? bf_(12) : bt_(12); |
| 1015 nop_(); |
| 1016 movl_dispPC_(4, rtmp); |
| 1017 nop_(); |
| 1018 braf_(rtmp); |
| 1019 nop_(); |
| 1020 dd(offset); |
| 1021 } |
| 1022 } else { |
| 1023 if (FITS_SH4_bt(offset - 4)) { |
| 1024 type ? bt_(offset - 4) : bf_(offset - 4); |
| 1025 nop_(); |
| 1026 } else { |
| 1027 int nop_count = align(); |
| 1028 type ? bf_(12) : bt_(12); |
| 1029 nop_(); |
| 1030 movl_dispPC_(4, rtmp); |
| 1031 nop_(); |
| 1032 braf_(rtmp); |
| 1033 nop_(); |
| 1034 dd(offset - 4 - 8 - 2 * nop_count); |
| 1035 } |
| 1036 } |
| 1037 } |
| 1038 |
| 1039 |
| 1040 void Assembler::jmp(int offset, Register rtmp, Label::Distance distance, |
| 1041 bool patched_later) { |
| 1042 positions_recorder()->WriteRecordedPositions(); |
| 1043 |
| 1044 // Is it going to be pacthed later on |
| 1045 if (patched_later) { |
| 1046 if (distance == Label::kNear) { |
| 1047 misalign(); |
| 1048 nop(); |
| 1049 ASSERT((offset % 4) == 0); |
| 1050 dw(offset + 0x3); |
| 1051 } else { |
| 1052 // There is no way to know the size of the offset: take the worst case |
| 1053 align(); |
| 1054 movl_dispPC_(4, rtmp); |
| 1055 nop(); |
| 1056 braf_(rtmp); |
| 1057 nop_(); |
| 1058 dd(offset); |
| 1059 } |
| 1060 } else { |
| 1061 // Does it fits in a bra offset |
| 1062 if (FITS_SH4_bra(offset - 4)) { |
| 1063 bra_(offset - 4); |
| 1064 nop_(); |
| 1065 } else { |
| 1066 int nop_count = align(); |
| 1067 movl_dispPC_(4, rtmp); |
| 1068 nop(); |
| 1069 braf_(rtmp); |
| 1070 nop_(); |
| 1071 dd(offset - 4 - 4 - 2 * nop_count); |
| 1072 } |
| 1073 } |
| 1074 } |
| 1075 |
| 1076 void Assembler::jsr(int offset, Register rtmp, bool patched_later) { |
| 1077 positions_recorder()->WriteRecordedPositions(); |
| 1078 |
| 1079 // Is it going to be patched later on ? |
| 1080 if (patched_later) { |
| 1081 // There is no way to know the size of the offset: take the worst case |
| 1082 align(); |
| 1083 movl_dispPC_(8, rtmp); |
| 1084 nop(); |
| 1085 bsrf_(rtmp); |
| 1086 nop_(); |
| 1087 bra_(4); |
| 1088 nop_(); |
| 1089 dd(offset); |
| 1090 } else { |
| 1091 // Does it fits in a bsr offset |
| 1092 if (FITS_SH4_bsr(offset - 4)) { |
| 1093 bsr_(offset - 4); |
| 1094 nop_(); |
| 1095 } else { |
| 1096 int nop_count = align(); |
| 1097 movl_dispPC_(8, rtmp); |
| 1098 nop(); |
| 1099 bsrf_(rtmp); |
| 1100 nop_(); |
| 1101 bra_(4); |
| 1102 nop_(); |
| 1103 dd(offset - 4 - 4 - 2 * nop_count); |
| 1104 } |
| 1105 } |
| 1106 } |
| 1107 |
| 1108 |
| 1109 void Assembler::mov(Register Rd, const Operand& imm, bool force) { |
| 1110 // FIXME(STM): Internal ref not handled |
| 1111 ASSERT(imm.rmode_ != RelocInfo::INTERNAL_REFERENCE); |
| 1112 |
| 1113 // Move based on immediates can only be 8 bits long |
| 1114 if (force == false && (imm.is_int8() && imm.rmode_ == RelocInfo::NONE)) { |
| 1115 mov_imm_(imm.imm32_, Rd); |
| 1116 } else { |
| 1117 // Use a tiny constant pool and jump above |
| 1118 align(); |
| 1119 #ifdef DEBUG |
| 1120 int instr_address = pc_offset(); |
| 1121 #endif |
| 1122 // Record the relocation location (after the align). |
| 1123 // Actually we record the PC of the instruction, |
| 1124 // though the target address is encoded in the constant pool below. |
| 1125 // If the code sequence changes, one must update |
| 1126 // Assembler::target_address_address_at(). |
| 1127 if (imm.rmode_ != RelocInfo::NONE) RecordRelocInfo(imm.rmode_); |
| 1128 movl_dispPC_(4, Rd); |
| 1129 nop_(); |
| 1130 bra_(4); |
| 1131 nop_(); |
| 1132 #ifdef DEBUG |
| 1133 if (imm.rmode_ != RelocInfo::NONE) { |
| 1134 Address target_address = pc_; |
| 1135 // Verify that target_address_address_at() is actually returning |
| 1136 // the address where the target address for the instruction is stored. |
| 1137 ASSERT(target_address == |
| 1138 target_pointer_address_at( |
| 1139 reinterpret_cast<byte*>(buffer_ + instr_address))); |
| 1140 } |
| 1141 #endif |
| 1142 dd(imm.imm32_); |
| 1143 } |
| 1144 } |
| 1145 |
| 1146 |
| 1147 void Assembler::addpc(Register Rd, int offset, Register Pr) { |
| 1148 // We compute a pc+offset value where the pc |
| 1149 // is the pc after this code sequence. |
| 1150 // In order to do this, we do a bsr and get the link register. |
| 1151 ASSERT(Pr.is(pr)); |
| 1152 bsr_(0); |
| 1153 nop_(); |
| 1154 sts_PR_(Rd); |
| 1155 add_imm_(4+offset, Rd); |
| 1156 } |
| 1157 |
| 1158 |
| 1159 void Assembler::mov(Register Rd, Register Rs, Condition cond) { |
| 1160 ASSERT(cond == ne || cond == eq || cond == al); |
| 1161 // If cond is eq, we move Rs into Rd, otherwise, nop |
| 1162 if (cond == eq) |
| 1163 bf_(0); // Jump after sequence if T bit is false |
| 1164 else if (cond == ne) |
| 1165 bt_(0); // Jump after sequence if T bit is true |
| 1166 if (Rs.is(pr)) { |
| 1167 ASSERT(Rd.is_valid()); |
| 1168 sts_PR_(Rd); |
| 1169 } else if (Rd.is(pr)) { |
| 1170 ASSERT(Rs.is_valid()); |
| 1171 lds_PR_(Rs); |
| 1172 } else { |
| 1173 ASSERT(Rs.is_valid()); |
| 1174 ASSERT(Rd.is_valid()); |
| 1175 mov_(Rs, Rd); |
| 1176 } |
| 1177 } |
| 1178 |
| 1179 |
| 1180 void Assembler::mov(Register Rd, const Operand& imm, Condition cond) { |
| 1181 ASSERT(cond == ne || cond == eq); |
| 1182 if (FITS_SH4_mov_imm(imm.imm32_)) { |
| 1183 // If cond is eq, we move Rs into Rd, otherwise, nop |
| 1184 if (cond == eq) |
| 1185 bf_(0); // Jump after sequence if T bit is false |
| 1186 else |
| 1187 bt_(0); // Jump after sequence if T bit is true |
| 1188 mov_imm_(imm.imm32_, Rd); |
| 1189 } else { |
| 1190 Label skip; |
| 1191 if (cond == eq) |
| 1192 bf_near(&skip); |
| 1193 else |
| 1194 bt_near(&skip); |
| 1195 mov(Rd, imm); |
| 1196 bind(&skip); |
| 1197 } |
| 1198 } |
| 1199 |
| 1200 |
| 1201 void Assembler::mov(Register Rd, const MemOperand& src, Register rtmp) { |
| 1202 ASSERT(src.mode_ == Offset); |
| 1203 if (src.rn_.is_valid()) { |
| 1204 ASSERT(rtmp.is_valid()); |
| 1205 add(rtmp, src.rm_, src.rn_); |
| 1206 movl_indRs_(rtmp, Rd); |
| 1207 } else { |
| 1208 if (src.offset_ == 0) { |
| 1209 movl_indRs_(src.rm_, Rd); |
| 1210 } else if (FITS_SH4_movl_dispRs(src.offset_)) { |
| 1211 movl_dispRs_(src.offset_, src.rm_, Rd); |
| 1212 } else { |
| 1213 ASSERT(rtmp.is_valid()); |
| 1214 add(rtmp, src.rm_, Operand(src.offset_)); |
| 1215 movl_indRs_(rtmp, Rd); |
| 1216 } |
| 1217 } |
| 1218 } |
| 1219 |
| 1220 |
| 1221 void Assembler::movb(Register Rd, const MemOperand& src, Register rtmp) { |
| 1222 ASSERT(src.mode_ == Offset); |
| 1223 if (src.rn_.is_valid()) { |
| 1224 add(rtmp, src.rm_, src.rn_); |
| 1225 movb_indRs_(rtmp, Rd); |
| 1226 } else { |
| 1227 if (src.offset_ == 0) { |
| 1228 movb_indRs_(src.rm_, Rd); |
| 1229 } else { |
| 1230 add(rtmp, src.rm_, Operand(src.offset_)); |
| 1231 movb_indRs_(rtmp, Rd); |
| 1232 } |
| 1233 } |
| 1234 extub_(Rd, Rd); // zero extension |
| 1235 } |
| 1236 |
| 1237 |
| 1238 void Assembler::movw(Register Rd, const MemOperand& src, Register rtmp) { |
| 1239 ASSERT(src.mode_ == Offset); |
| 1240 if (src.rn_.is_valid()) { |
| 1241 add(rtmp, src.rm_, src.rn_); |
| 1242 movw_indRs_(rtmp, Rd); |
| 1243 } else { |
| 1244 if (src.offset_ == 0) { |
| 1245 movw_indRs_(src.rm_, Rd); |
| 1246 } else { |
| 1247 add(rtmp, src.rm_, Operand(src.offset_)); |
| 1248 movw_indRs_(rtmp, Rd); |
| 1249 } |
| 1250 } |
| 1251 // Zero extension |
| 1252 extuw_(Rd, Rd); |
| 1253 } |
| 1254 |
| 1255 |
| 1256 void Assembler::movd(DwVfpRegister Dd, Register Rs1, Register Rs2) { |
| 1257 align(); |
| 1258 push(Rs1); |
| 1259 push(Rs2); |
| 1260 fmov_incRs_(sp, Dd.low()); |
| 1261 fmov_incRs_(sp, Dd.high()); |
| 1262 } |
| 1263 |
| 1264 |
| 1265 void Assembler::movd(Register Rd1, Register Rd2, DwVfpRegister Ds) { |
| 1266 align(); |
| 1267 fmov_decRd_(Ds.low(), sp); |
| 1268 fmov_decRd_(Ds.high(), sp); |
| 1269 pop(Rd1); |
| 1270 pop(Rd2); |
| 1271 } |
| 1272 |
| 1273 |
| 1274 void Assembler::ldrsb(Register Rd, const MemOperand& src, Register rtmp) { |
| 1275 ASSERT(src.mode_ == Offset); |
| 1276 if (src.rn_.is_valid()) { |
| 1277 add(rtmp, src.rm_, src.rn_); |
| 1278 movb_indRs_(rtmp, Rd); |
| 1279 } else { |
| 1280 if (src.offset_ == 0) { |
| 1281 movb_indRs_(src.rm_, Rd); |
| 1282 } else { |
| 1283 add(rtmp, src.rm_, Operand(src.offset_)); |
| 1284 movb_indRs_(rtmp, Rd); |
| 1285 } |
| 1286 } |
| 1287 } |
| 1288 |
| 1289 |
| 1290 void Assembler::ldrsh(Register Rd, const MemOperand& src, Register rtmp) { |
| 1291 ASSERT(src.mode_ == Offset); |
| 1292 if (src.rn_.is_valid()) { |
| 1293 add(rtmp, src.rm_, src.rn_); |
| 1294 movw_indRs_(rtmp, Rd); |
| 1295 } else { |
| 1296 if (src.offset_ == 0) { |
| 1297 movw_indRs_(src.rm_, Rd); |
| 1298 } else { |
| 1299 add(rtmp, src.rm_, Operand(src.offset_)); |
| 1300 movw_indRs_(rtmp, Rd); |
| 1301 } |
| 1302 } |
| 1303 } |
| 1304 |
| 1305 |
| 1306 void Assembler::fldr(SwVfpRegister dst, const MemOperand& src, Register rtmp) { |
| 1307 if (src.rn_.is_valid()) { |
| 1308 add(rtmp, src.rm_, src.rn_); |
| 1309 fmov_indRs_(rtmp, dst); |
| 1310 } else { |
| 1311 if (src.offset_ == 0) { |
| 1312 fmov_indRs_(src.rm_, dst); |
| 1313 } else { |
| 1314 ASSERT(src.rn_.is(no_reg)); |
| 1315 add(rtmp, src.rm_, Operand(src.offset_)); |
| 1316 fmov_indRs_(rtmp, dst); |
| 1317 } |
| 1318 } |
| 1319 } |
| 1320 |
| 1321 |
| 1322 void Assembler::dldr(DwVfpRegister dst, const MemOperand& src, Register rtmp) { |
| 1323 if (src.rn_.is_valid()) { |
| 1324 UNIMPLEMENTED(); |
| 1325 } else { |
| 1326 fldr(dst.high(), src, rtmp); |
| 1327 fldr(dst.low(), MemOperand(src.rm_, src.offset_ + 4), rtmp); |
| 1328 } |
| 1329 } |
| 1330 |
| 1331 |
| 1332 void Assembler::fstr(SwVfpRegister src, const MemOperand& dst, Register rtmp) { |
| 1333 if (dst.rn_.is_valid()) { |
| 1334 add(rtmp, dst.rm_, dst.rn_); |
| 1335 fmov_indRd_(src, rtmp); |
| 1336 } else { |
| 1337 if (dst.offset_ == 0) { |
| 1338 fmov_indRd_(src, dst.rm_); |
| 1339 } else { |
| 1340 ASSERT(dst.rn_.is(no_reg)); |
| 1341 add(rtmp, dst.rm_, Operand(dst.offset_)); |
| 1342 fmov_indRd_(src, rtmp); |
| 1343 } |
| 1344 } |
| 1345 } |
| 1346 |
| 1347 |
| 1348 void Assembler::dstr(DwVfpRegister src, const MemOperand& dst, Register rtmp) { |
| 1349 if (dst.rn_.is_valid()) { |
| 1350 UNIMPLEMENTED(); |
| 1351 } else { |
| 1352 fstr(src.high(), dst, rtmp); |
| 1353 fstr(src.low(), MemOperand(dst.rm_, dst.offset_ + 4), rtmp); |
| 1354 } |
| 1355 } |
| 1356 |
| 1357 |
| 1358 void Assembler::dfloat(DwVfpRegister Dd, const Operand &imm, Register rtmp) { |
| 1359 mov(rtmp, imm); |
| 1360 dfloat(Dd, rtmp); |
| 1361 } |
| 1362 |
| 1363 |
| 1364 void Assembler::dfloat(DwVfpRegister Dd, Register Rs) { |
| 1365 lds_FPUL_(Rs); |
| 1366 float_FPUL_double_(Dd); |
| 1367 } |
| 1368 |
| 1369 |
| 1370 void Assembler::dufloat(DwVfpRegister Dd, Register Rs, DwVfpRegister drtmp, |
| 1371 Register rtmp) { |
| 1372 Label too_large, end; |
| 1373 |
| 1374 // Test the sign bit to see if the conversion from unsigned to signed is safe |
| 1375 tst(Rs, Operand(0x80000000), rtmp); |
| 1376 bf(&too_large); |
| 1377 |
| 1378 // The unsigned integer is smal enough to be a signed one |
| 1379 lds_FPUL_(Rs); |
| 1380 float_FPUL_double_(Dd); |
| 1381 b(&end); |
| 1382 |
| 1383 // Do some correction to convert the unsigned to a floating point value |
| 1384 bind(&too_large); |
| 1385 dfloat(drtmp, Operand(0x7fffffff), rtmp); |
| 1386 dfloat(Dd, Operand(1), rtmp); |
| 1387 fadd(drtmp, Dd); |
| 1388 fadd(drtmp, drtmp); |
| 1389 dfloat(Dd, Rs); |
| 1390 fadd(Dd, drtmp); |
| 1391 |
| 1392 bind(&end); |
| 1393 } |
| 1394 |
| 1395 |
| 1396 void Assembler::idouble(Register Rd, DwVfpRegister Ds, Register fpscr) { |
| 1397 ftrc_double_FPUL_(Ds); |
| 1398 if (!fpscr.is(no_reg)) |
| 1399 sts_FPSCR_(fpscr); |
| 1400 sts_FPUL_(Rd); |
| 1401 } |
| 1402 |
| 1403 |
| 1404 void Assembler::isingle(Register Rd, SwVfpRegister Fs) { |
| 1405 flds_FPUL_(Fs); |
| 1406 sts_FPUL_(Rd); |
| 1407 } |
| 1408 |
| 1409 |
| 1410 void Assembler::mov(const MemOperand& dst, Register Rd, Register rtmp) { |
| 1411 ASSERT(dst.mode_ == Offset); |
| 1412 if (dst.rn_.is_valid()) { |
| 1413 add(rtmp, dst.rm_, dst.rn_); |
| 1414 movl_indRd_(Rd, rtmp); |
| 1415 } else { |
| 1416 if (dst.offset_ == 0) { |
| 1417 movl_indRd_(Rd, dst.rm_); |
| 1418 } else { |
| 1419 if (FITS_SH4_movl_dispRd(dst.offset_)) { |
| 1420 movl_dispRd_(Rd, dst.offset_, dst.rm_); |
| 1421 } else { |
| 1422 ASSERT(!Rd.is(rtmp)); |
| 1423 add(rtmp, dst.rm_, Operand(dst.offset_)); |
| 1424 movl_indRd_(Rd, rtmp); |
| 1425 } |
| 1426 } |
| 1427 } |
| 1428 } |
| 1429 |
| 1430 |
| 1431 void Assembler::movb(const MemOperand& dst, Register Rd, Register rtmp) { |
| 1432 ASSERT(dst.mode_ == Offset); |
| 1433 if (dst.rn_.is_valid()) { |
| 1434 add(rtmp, dst.rm_, dst.rn_); |
| 1435 movb_indRd_(Rd, rtmp); |
| 1436 } else { |
| 1437 if (dst.offset_ == 0) { |
| 1438 movb_indRd_(Rd, dst.rm_); |
| 1439 } else { |
| 1440 ASSERT(!Rd.is(rtmp)); |
| 1441 add(rtmp, dst.rm_, Operand(dst.offset_)); |
| 1442 movb_indRd_(Rd, rtmp); |
| 1443 } |
| 1444 } |
| 1445 } |
| 1446 |
| 1447 |
| 1448 void Assembler::movw(const MemOperand& dst, Register Rd, Register rtmp) { |
| 1449 ASSERT(dst.mode_ == Offset); |
| 1450 if (dst.rn_.is_valid()) { |
| 1451 add(rtmp, dst.rm_, dst.rn_); |
| 1452 movw_indRd_(Rd, rtmp); |
| 1453 } else { |
| 1454 if (dst.offset_ == 0) { |
| 1455 movw_indRd_(Rd, dst.rm_); |
| 1456 } else { |
| 1457 ASSERT(!Rd.is(rtmp)); |
| 1458 add(rtmp, dst.rm_, Operand(dst.offset_)); |
| 1459 movw_indRd_(Rd, rtmp); |
| 1460 } |
| 1461 } |
| 1462 } |
| 1463 |
| 1464 |
| 1465 void Assembler::mul(Register Rd, Register Rs, Register Rt) { |
| 1466 mull_(Rs, Rt); |
| 1467 sts_MACL_(Rd); |
| 1468 } |
| 1469 |
| 1470 |
| 1471 void Assembler::dmuls(Register dstL, Register dstH, Register src1, |
| 1472 Register src2) { |
| 1473 dmulsl_(src1, src2); |
| 1474 sts_MACL_(dstL); |
| 1475 sts_MACH_(dstH); |
| 1476 } |
| 1477 |
| 1478 |
| 1479 void Assembler::pop(Register dst) { |
| 1480 if (dst.is(pr)) |
| 1481 ldsl_incRd_PR_(sp); |
| 1482 else |
| 1483 movl_incRs_(sp, dst); |
| 1484 } |
| 1485 |
| 1486 |
| 1487 void Assembler::pop(DwVfpRegister dst) { |
| 1488 fmov_incRs_(sp, SwVfpRegister::from_code(dst.code())); |
| 1489 fmov_incRs_(sp, SwVfpRegister::from_code(dst.code()+1)); |
| 1490 } |
| 1491 |
| 1492 |
| 1493 void Assembler::push(Register src) { |
| 1494 if (src.is(pr)) |
| 1495 stsl_PR_decRd_(sp); |
| 1496 else |
| 1497 movl_decRd_(src, sp); |
| 1498 } |
| 1499 |
| 1500 |
| 1501 void Assembler::push(DwVfpRegister src) { |
| 1502 fmov_decRd_(SwVfpRegister::from_code(src.code()), sp); |
| 1503 fmov_decRd_(SwVfpRegister::from_code(src.code()+1), sp); |
| 1504 } |
| 1505 |
| 1506 |
| 1507 void Assembler::push(const Operand& op, Register rtmp) { |
| 1508 mov(rtmp, op); |
| 1509 push(rtmp); |
| 1510 } |
| 1511 |
| 1512 |
| 1513 void Assembler::pushm(RegList dst, bool doubles) { |
| 1514 if (!doubles) { |
| 1515 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) { |
| 1516 if ((dst & (1 << i)) != 0) { |
| 1517 push(Register::from_code(i)); |
| 1518 } |
| 1519 } |
| 1520 } else { |
| 1521 for (int16_t i = DwVfpRegister::kNumRegisters - 1; i >= 0; i -= 2) { |
| 1522 if ((dst & (1 << i)) != 0) { |
| 1523 push(DwVfpRegister::from_code(i)); |
| 1524 } |
| 1525 } |
| 1526 } |
| 1527 } |
| 1528 |
| 1529 |
| 1530 void Assembler::popm(RegList src, bool doubles) { |
| 1531 if (!doubles) { |
| 1532 for (uint16_t i = 0; i < Register::kNumRegisters; i++) { |
| 1533 if ((src & (1 << i)) != 0) { |
| 1534 pop(Register::from_code(i)); |
| 1535 } |
| 1536 } |
| 1537 } else { |
| 1538 for (uint16_t i = 0; i < Register::kNumRegisters; i += 2) { |
| 1539 if ((src & (1 << i)) != 0) { |
| 1540 pop(DwVfpRegister::from_code(i)); |
| 1541 } |
| 1542 } |
| 1543 } |
| 1544 } |
| 1545 |
| 1546 |
| 1547 // Exception-generating instructions and debugging support. |
| 1548 // Stops with a non-negative code less than kNumOfWatchedStops support |
| 1549 // enabling/disabling and a counter feature. See simulator-arm.h . |
| 1550 void Assembler::stop(const char* msg) { |
| 1551 // TODO(stm): handle simulator based stuff (ref to ARM code) |
| 1552 bkpt(); |
| 1553 } |
| 1554 |
| 1555 |
| 1556 void Assembler::bkpt() { |
| 1557 // Use a privileged instruction. |
| 1558 // Will generate an illegal instruction exception code: 0x180. |
| 1559 ldtlb_(); |
| 1560 } |
| 1561 |
| 1562 |
| 1563 #ifdef SH4_DUMP_BUFFER |
| 1564 static int buffer_count = 0; |
| 1565 #endif |
| 1566 |
| 1567 Assembler::~Assembler() { |
| 1568 #ifdef SH4_DUMP_BUFFER |
| 1569 // dump the buffer on the disk |
| 1570 printf("dumping a buffer %i\n", buffer_count++); |
| 1571 char *psz_filename; |
| 1572 asprintf(&psz_filename, "buffer-%d.st40", buffer_count); |
| 1573 FILE *dump = fopen(psz_filename, "w"); |
| 1574 if (dump) { |
| 1575 fwrite(buffer_, buffer_size_, 1, dump); |
| 1576 fclose(dump); |
| 1577 } |
| 1578 free(psz_filename); |
| 1579 #endif |
| 1580 |
| 1581 if (own_buffer_) { |
| 1582 if (isolate()->assembler_spare_buffer() == NULL && |
| 1583 buffer_size_ == kMinimalBufferSize) { |
| 1584 isolate()->set_assembler_spare_buffer(buffer_); |
| 1585 } else { |
| 1586 DeleteArray(buffer_); |
| 1587 } |
| 1588 } |
| 1589 } |
| 1590 |
| 1591 |
| 1592 const int RelocInfo::kApplyMask = 0; |
| 1593 |
| 1594 bool RelocInfo::IsCodedSpecially() { |
| 1595 UNIMPLEMENTED(); |
| 1596 return false; |
| 1597 } |
| 1598 |
| 1599 |
| 1600 } } // namespace v8::internal |
| 1601 |
| 1602 #endif // V8_TARGET_ARCH_SH4 |
OLD | NEW |