OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 12 matching lines...) Expand all Loading... |
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | 23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | 24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | 25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | 26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | 27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | 30 |
31 // The original source code covered by the above license above has been | 31 // The original source code covered by the above license above has been |
32 // modified significantly by Google Inc. | 32 // modified significantly by Google Inc. |
33 // Copyright 2010 the V8 project authors. All rights reserved. | 33 // Copyright 2011 the V8 project authors. All rights reserved. |
34 | 34 |
35 | 35 |
36 #include "v8.h" | 36 #include "v8.h" |
37 | 37 |
38 #if defined(V8_TARGET_ARCH_MIPS) | 38 #if defined(V8_TARGET_ARCH_MIPS) |
39 | 39 |
40 #include "mips/assembler-mips-inl.h" | 40 #include "mips/assembler-mips-inl.h" |
41 #include "serialize.h" | 41 #include "serialize.h" |
42 | 42 |
43 namespace v8 { | 43 namespace v8 { |
44 namespace internal { | 44 namespace internal { |
45 | 45 |
46 CpuFeatures::CpuFeatures() | 46 #ifdef DEBUG |
47 : supported_(0), | 47 bool CpuFeatures::initialized_ = false; |
48 enabled_(0), | 48 #endif |
49 found_by_runtime_probing_(0) { | 49 unsigned CpuFeatures::supported_ = 0; |
50 } | 50 unsigned CpuFeatures::found_by_runtime_probing_ = 0; |
51 | 51 |
52 void CpuFeatures::Probe(bool portable) { | 52 void CpuFeatures::Probe() { |
| 53 ASSERT(!initialized_); |
| 54 #ifdef DEBUG |
| 55 initialized_ = true; |
| 56 #endif |
53 // If the compiler is allowed to use fpu then we can use fpu too in our | 57 // If the compiler is allowed to use fpu then we can use fpu too in our |
54 // code generation. | 58 // code generation. |
55 #if !defined(__mips__) | 59 #if !defined(__mips__) |
56 // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled. | 60 // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled. |
57 if (FLAG_enable_fpu) { | 61 if (FLAG_enable_fpu) { |
58 supported_ |= 1u << FPU; | 62 supported_ |= 1u << FPU; |
59 } | 63 } |
60 #else | 64 #else |
61 if (portable && Serializer::enabled()) { | 65 if (Serializer::enabled()) { |
62 supported_ |= OS::CpuFeaturesImpliedByPlatform(); | 66 supported_ |= OS::CpuFeaturesImpliedByPlatform(); |
63 return; // No features if we might serialize. | 67 return; // No features if we might serialize. |
64 } | 68 } |
65 | 69 |
66 if (OS::MipsCpuHasFeature(FPU)) { | 70 if (OS::MipsCpuHasFeature(FPU)) { |
67 // This implementation also sets the FPU flags if | 71 // This implementation also sets the FPU flags if |
68 // runtime detection of FPU returns true. | 72 // runtime detection of FPU returns true. |
69 supported_ |= 1u << FPU; | 73 supported_ |= 1u << FPU; |
70 found_by_runtime_probing_ |= 1u << FPU; | 74 found_by_runtime_probing_ |= 1u << FPU; |
71 } | 75 } |
72 | |
73 if (!portable) found_by_runtime_probing_ = 0; | |
74 #endif | 76 #endif |
75 } | 77 } |
76 | 78 |
77 | 79 |
78 int ToNumber(Register reg) { | 80 int ToNumber(Register reg) { |
79 ASSERT(reg.is_valid()); | 81 ASSERT(reg.is_valid()); |
80 const int kNumbers[] = { | 82 const int kNumbers[] = { |
81 0, // zero_reg | 83 0, // zero_reg |
82 1, // at | 84 1, // at |
83 2, // v0 | 85 2, // v0 |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
228 const Instr kRtMask = kRtFieldMask; | 230 const Instr kRtMask = kRtFieldMask; |
229 const Instr kLwSwInstrTypeMask = 0xffe00000; | 231 const Instr kLwSwInstrTypeMask = 0xffe00000; |
230 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; | 232 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; |
231 const Instr kLwSwOffsetMask = kImm16Mask; | 233 const Instr kLwSwOffsetMask = kImm16Mask; |
232 | 234 |
233 | 235 |
234 // Spare buffer. | 236 // Spare buffer. |
235 static const int kMinimalBufferSize = 4 * KB; | 237 static const int kMinimalBufferSize = 4 * KB; |
236 | 238 |
237 | 239 |
238 Assembler::Assembler(void* buffer, int buffer_size) | 240 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) |
239 : AssemblerBase(Isolate::Current()), | 241 : AssemblerBase(arg_isolate), |
240 positions_recorder_(this), | 242 positions_recorder_(this), |
241 allow_peephole_optimization_(false) { | 243 allow_peephole_optimization_(false), |
242 // BUG(3245989): disable peephole optimization if crankshaft is enabled. | 244 emit_debug_code_(FLAG_debug_code) { |
243 allow_peephole_optimization_ = FLAG_peephole_optimization; | 245 allow_peephole_optimization_ = FLAG_peephole_optimization; |
244 if (buffer == NULL) { | 246 if (buffer == NULL) { |
245 // Do our own buffer management. | 247 // Do our own buffer management. |
246 if (buffer_size <= kMinimalBufferSize) { | 248 if (buffer_size <= kMinimalBufferSize) { |
247 buffer_size = kMinimalBufferSize; | 249 buffer_size = kMinimalBufferSize; |
248 | 250 |
249 if (isolate()->assembler_spare_buffer() != NULL) { | 251 if (isolate()->assembler_spare_buffer() != NULL) { |
250 buffer = isolate()->assembler_spare_buffer(); | 252 buffer = isolate()->assembler_spare_buffer(); |
251 isolate()->set_assembler_spare_buffer(NULL); | 253 isolate()->set_assembler_spare_buffer(NULL); |
252 } | 254 } |
(...skipping 16 matching lines...) Expand all Loading... |
269 | 271 |
270 // Setup buffer pointers. | 272 // Setup buffer pointers. |
271 ASSERT(buffer_ != NULL); | 273 ASSERT(buffer_ != NULL); |
272 pc_ = buffer_; | 274 pc_ = buffer_; |
273 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); | 275 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); |
274 | 276 |
275 last_trampoline_pool_end_ = 0; | 277 last_trampoline_pool_end_ = 0; |
276 no_trampoline_pool_before_ = 0; | 278 no_trampoline_pool_before_ = 0; |
277 trampoline_pool_blocked_nesting_ = 0; | 279 trampoline_pool_blocked_nesting_ = 0; |
278 next_buffer_check_ = kMaxBranchOffset - kTrampolineSize; | 280 next_buffer_check_ = kMaxBranchOffset - kTrampolineSize; |
| 281 internal_trampoline_exception_ = false; |
| 282 |
| 283 ast_id_for_reloc_info_ = kNoASTId; |
279 } | 284 } |
280 | 285 |
281 | 286 |
282 Assembler::~Assembler() { | 287 Assembler::~Assembler() { |
283 if (own_buffer_) { | 288 if (own_buffer_) { |
284 if (isolate()->assembler_spare_buffer() == NULL && | 289 if (isolate()->assembler_spare_buffer() == NULL && |
285 buffer_size_ == kMinimalBufferSize) { | 290 buffer_size_ == kMinimalBufferSize) { |
286 isolate()->set_assembler_spare_buffer(buffer_); | 291 isolate()->set_assembler_spare_buffer(buffer_); |
287 } else { | 292 } else { |
288 DeleteArray(buffer_); | 293 DeleteArray(buffer_); |
289 } | 294 } |
290 } | 295 } |
291 } | 296 } |
292 | 297 |
293 | 298 |
294 void Assembler::GetCode(CodeDesc* desc) { | 299 void Assembler::GetCode(CodeDesc* desc) { |
295 ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. | 300 ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. |
(...skipping 13 matching lines...) Expand all Loading... |
309 } | 314 } |
310 | 315 |
311 | 316 |
312 void Assembler::CodeTargetAlign() { | 317 void Assembler::CodeTargetAlign() { |
313 // No advantage to aligning branch/call targets to more than | 318 // No advantage to aligning branch/call targets to more than |
314 // single instruction, that I am aware of. | 319 // single instruction, that I am aware of. |
315 Align(4); | 320 Align(4); |
316 } | 321 } |
317 | 322 |
318 | 323 |
319 Register Assembler::GetRt(Instr instr) { | 324 Register Assembler::GetRtReg(Instr instr) { |
320 Register rt; | 325 Register rt; |
321 rt.code_ = (instr & kRtMask) >> kRtShift; | 326 rt.code_ = (instr & kRtFieldMask) >> kRtShift; |
322 return rt; | 327 return rt; |
323 } | 328 } |
324 | 329 |
325 | 330 |
| 331 Register Assembler::GetRsReg(Instr instr) { |
| 332 Register rs; |
| 333 rs.code_ = (instr & kRsFieldMask) >> kRsShift; |
| 334 return rs; |
| 335 } |
| 336 |
| 337 |
| 338 Register Assembler::GetRdReg(Instr instr) { |
| 339 Register rd; |
| 340 rd.code_ = (instr & kRdFieldMask) >> kRdShift; |
| 341 return rd; |
| 342 } |
| 343 |
| 344 |
| 345 uint32_t Assembler::GetRt(Instr instr) { |
| 346 return (instr & kRtFieldMask) >> kRtShift; |
| 347 } |
| 348 |
| 349 |
| 350 uint32_t Assembler::GetRtField(Instr instr) { |
| 351 return instr & kRtFieldMask; |
| 352 } |
| 353 |
| 354 |
| 355 uint32_t Assembler::GetRs(Instr instr) { |
| 356 return (instr & kRsFieldMask) >> kRsShift; |
| 357 } |
| 358 |
| 359 |
| 360 uint32_t Assembler::GetRsField(Instr instr) { |
| 361 return instr & kRsFieldMask; |
| 362 } |
| 363 |
| 364 |
| 365 uint32_t Assembler::GetRd(Instr instr) { |
| 366 return (instr & kRdFieldMask) >> kRdShift; |
| 367 } |
| 368 |
| 369 |
| 370 uint32_t Assembler::GetRdField(Instr instr) { |
| 371 return instr & kRdFieldMask; |
| 372 } |
| 373 |
| 374 |
| 375 uint32_t Assembler::GetSa(Instr instr) { |
| 376 return (instr & kSaFieldMask) >> kSaShift; |
| 377 } |
| 378 |
| 379 |
| 380 uint32_t Assembler::GetSaField(Instr instr) { |
| 381 return instr & kSaFieldMask; |
| 382 } |
| 383 |
| 384 |
| 385 uint32_t Assembler::GetOpcodeField(Instr instr) { |
| 386 return instr & kOpcodeMask; |
| 387 } |
| 388 |
| 389 |
| 390 uint32_t Assembler::GetImmediate16(Instr instr) { |
| 391 return instr & kImm16Mask; |
| 392 } |
| 393 |
| 394 |
| 395 uint32_t Assembler::GetLabelConst(Instr instr) { |
| 396 return instr & ~kImm16Mask; |
| 397 } |
| 398 |
| 399 |
326 bool Assembler::IsPop(Instr instr) { | 400 bool Assembler::IsPop(Instr instr) { |
327 return (instr & ~kRtMask) == kPopRegPattern; | 401 return (instr & ~kRtMask) == kPopRegPattern; |
328 } | 402 } |
329 | 403 |
330 | 404 |
331 bool Assembler::IsPush(Instr instr) { | 405 bool Assembler::IsPush(Instr instr) { |
332 return (instr & ~kRtMask) == kPushRegPattern; | 406 return (instr & ~kRtMask) == kPushRegPattern; |
333 } | 407 } |
334 | 408 |
335 | 409 |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
367 | 441 |
368 // The link chain is terminated by a value in the instruction of -1, | 442 // The link chain is terminated by a value in the instruction of -1, |
369 // which is an otherwise illegal value (branch -1 is inf loop). | 443 // which is an otherwise illegal value (branch -1 is inf loop). |
370 // The instruction 16-bit offset field addresses 32-bit words, but in | 444 // The instruction 16-bit offset field addresses 32-bit words, but in |
371 // code is conv to an 18-bit value addressing bytes, hence the -4 value. | 445 // code is conv to an 18-bit value addressing bytes, hence the -4 value. |
372 | 446 |
373 const int kEndOfChain = -4; | 447 const int kEndOfChain = -4; |
374 | 448 |
375 | 449 |
376 bool Assembler::IsBranch(Instr instr) { | 450 bool Assembler::IsBranch(Instr instr) { |
377 uint32_t opcode = ((instr & kOpcodeMask)); | 451 uint32_t opcode = GetOpcodeField(instr); |
378 uint32_t rt_field = ((instr & kRtFieldMask)); | 452 uint32_t rt_field = GetRtField(instr); |
379 uint32_t rs_field = ((instr & kRsFieldMask)); | 453 uint32_t rs_field = GetRsField(instr); |
380 uint32_t label_constant = (instr & ~kImm16Mask); | 454 uint32_t label_constant = GetLabelConst(instr); |
381 // Checks if the instruction is a branch. | 455 // Checks if the instruction is a branch. |
382 return opcode == BEQ || | 456 return opcode == BEQ || |
383 opcode == BNE || | 457 opcode == BNE || |
384 opcode == BLEZ || | 458 opcode == BLEZ || |
385 opcode == BGTZ || | 459 opcode == BGTZ || |
386 opcode == BEQL || | 460 opcode == BEQL || |
387 opcode == BNEL || | 461 opcode == BNEL || |
388 opcode == BLEZL || | 462 opcode == BLEZL || |
389 opcode == BGTZL|| | 463 opcode == BGTZL || |
390 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || | 464 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || |
391 rt_field == BLTZAL || rt_field == BGEZAL)) || | 465 rt_field == BLTZAL || rt_field == BGEZAL)) || |
392 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch. | 466 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch. |
393 label_constant == 0; // Emitted label const in reg-exp engine. | 467 label_constant == 0; // Emitted label const in reg-exp engine. |
394 } | 468 } |
395 | 469 |
396 | 470 |
| 471 bool Assembler::IsBeq(Instr instr) { |
| 472 return GetOpcodeField(instr) == BEQ; |
| 473 } |
| 474 |
| 475 |
| 476 bool Assembler::IsBne(Instr instr) { |
| 477 return GetOpcodeField(instr) == BNE; |
| 478 } |
| 479 |
| 480 |
397 bool Assembler::IsNop(Instr instr, unsigned int type) { | 481 bool Assembler::IsNop(Instr instr, unsigned int type) { |
398 // See Assembler::nop(type). | 482 // See Assembler::nop(type). |
399 ASSERT(type < 32); | 483 ASSERT(type < 32); |
400 uint32_t opcode = ((instr & kOpcodeMask)); | 484 uint32_t opcode = GetOpcodeField(instr); |
401 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift); | 485 uint32_t rt = GetRt(instr); |
402 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift); | 486 uint32_t rs = GetRs(instr); |
403 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift); | 487 uint32_t sa = GetSa(instr); |
404 | 488 |
405 // nop(type) == sll(zero_reg, zero_reg, type); | 489 // nop(type) == sll(zero_reg, zero_reg, type); |
406 // Technically all these values will be 0 but | 490 // Technically all these values will be 0 but |
407 // this makes more sense to the reader. | 491 // this makes more sense to the reader. |
408 | 492 |
409 bool ret = (opcode == SLL && | 493 bool ret = (opcode == SLL && |
410 rt == static_cast<uint32_t>(ToNumber(zero_reg)) && | 494 rt == static_cast<uint32_t>(ToNumber(zero_reg)) && |
411 rs == static_cast<uint32_t>(ToNumber(zero_reg)) && | 495 rs == static_cast<uint32_t>(ToNumber(zero_reg)) && |
412 sa == type); | 496 sa == type); |
413 | 497 |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
458 return ((instr & kOpcodeMask) == ADDIU); | 542 return ((instr & kOpcodeMask) == ADDIU); |
459 } | 543 } |
460 | 544 |
461 | 545 |
462 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { | 546 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { |
463 ASSERT(IsAddImmediate(instr)); | 547 ASSERT(IsAddImmediate(instr)); |
464 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); | 548 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); |
465 } | 549 } |
466 | 550 |
467 | 551 |
| 552 bool Assembler::IsAndImmediate(Instr instr) { |
| 553 return GetOpcodeField(instr) == ANDI; |
| 554 } |
| 555 |
| 556 |
468 int Assembler::target_at(int32_t pos) { | 557 int Assembler::target_at(int32_t pos) { |
469 Instr instr = instr_at(pos); | 558 Instr instr = instr_at(pos); |
470 if ((instr & ~kImm16Mask) == 0) { | 559 if ((instr & ~kImm16Mask) == 0) { |
471 // Emitted label constant, not part of a branch. | 560 // Emitted label constant, not part of a branch. |
472 if (instr == 0) { | 561 if (instr == 0) { |
473 return kEndOfChain; | 562 return kEndOfChain; |
474 } else { | 563 } else { |
475 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | 564 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; |
476 return (imm18 + pos); | 565 return (imm18 + pos); |
477 } | 566 } |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
539 | 628 |
540 void Assembler::bind_to(Label* L, int pos) { | 629 void Assembler::bind_to(Label* L, int pos) { |
541 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. | 630 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. |
542 while (L->is_linked()) { | 631 while (L->is_linked()) { |
543 int32_t fixup_pos = L->pos(); | 632 int32_t fixup_pos = L->pos(); |
544 int32_t dist = pos - fixup_pos; | 633 int32_t dist = pos - fixup_pos; |
545 next(L); // Call next before overwriting link with target at fixup_pos. | 634 next(L); // Call next before overwriting link with target at fixup_pos. |
546 if (dist > kMaxBranchOffset) { | 635 if (dist > kMaxBranchOffset) { |
547 do { | 636 do { |
548 int32_t trampoline_pos = get_trampoline_entry(fixup_pos); | 637 int32_t trampoline_pos = get_trampoline_entry(fixup_pos); |
| 638 if (kInvalidSlotPos == trampoline_pos) { |
| 639 // Internal error. |
| 640 return; |
| 641 } |
549 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); | 642 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); |
550 target_at_put(fixup_pos, trampoline_pos); | 643 target_at_put(fixup_pos, trampoline_pos); |
551 fixup_pos = trampoline_pos; | 644 fixup_pos = trampoline_pos; |
552 dist = pos - fixup_pos; | 645 dist = pos - fixup_pos; |
553 } while (dist > kMaxBranchOffset); | 646 } while (dist > kMaxBranchOffset); |
554 } else if (dist < -kMaxBranchOffset) { | 647 } else if (dist < -kMaxBranchOffset) { |
555 do { | 648 do { |
556 int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false); | 649 int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false); |
| 650 if (kInvalidSlotPos == trampoline_pos) { |
| 651 // Internal error. |
| 652 return; |
| 653 } |
557 ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset); | 654 ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset); |
558 target_at_put(fixup_pos, trampoline_pos); | 655 target_at_put(fixup_pos, trampoline_pos); |
559 fixup_pos = trampoline_pos; | 656 fixup_pos = trampoline_pos; |
560 dist = pos - fixup_pos; | 657 dist = pos - fixup_pos; |
561 } while (dist < -kMaxBranchOffset); | 658 } while (dist < -kMaxBranchOffset); |
562 }; | 659 }; |
563 target_at_put(fixup_pos, pos); | 660 target_at_put(fixup_pos, pos); |
564 } | 661 } |
565 L->bind_to(pos); | 662 L->bind_to(pos); |
566 | 663 |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
645 } | 742 } |
646 | 743 |
647 | 744 |
648 void Assembler::GenInstrRegister(Opcode opcode, | 745 void Assembler::GenInstrRegister(Opcode opcode, |
649 SecondaryField fmt, | 746 SecondaryField fmt, |
650 FPURegister ft, | 747 FPURegister ft, |
651 FPURegister fs, | 748 FPURegister fs, |
652 FPURegister fd, | 749 FPURegister fd, |
653 SecondaryField func) { | 750 SecondaryField func) { |
654 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); | 751 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); |
655 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); | 752 ASSERT(CpuFeatures::IsEnabled(FPU)); |
656 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) | 753 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) |
657 | (fd.code() << kFdShift) | func; | 754 | (fd.code() << kFdShift) | func; |
658 emit(instr); | 755 emit(instr); |
659 } | 756 } |
660 | 757 |
661 | 758 |
662 void Assembler::GenInstrRegister(Opcode opcode, | 759 void Assembler::GenInstrRegister(Opcode opcode, |
663 SecondaryField fmt, | 760 SecondaryField fmt, |
664 Register rt, | 761 Register rt, |
665 FPURegister fs, | 762 FPURegister fs, |
666 FPURegister fd, | 763 FPURegister fd, |
667 SecondaryField func) { | 764 SecondaryField func) { |
668 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); | 765 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); |
669 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); | 766 ASSERT(CpuFeatures::IsEnabled(FPU)); |
670 Instr instr = opcode | fmt | (rt.code() << kRtShift) | 767 Instr instr = opcode | fmt | (rt.code() << kRtShift) |
671 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; | 768 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; |
672 emit(instr); | 769 emit(instr); |
673 } | 770 } |
674 | 771 |
675 | 772 |
676 void Assembler::GenInstrRegister(Opcode opcode, | 773 void Assembler::GenInstrRegister(Opcode opcode, |
677 SecondaryField fmt, | 774 SecondaryField fmt, |
678 Register rt, | 775 Register rt, |
679 FPUControlRegister fs, | 776 FPUControlRegister fs, |
680 SecondaryField func) { | 777 SecondaryField func) { |
681 ASSERT(fs.is_valid() && rt.is_valid()); | 778 ASSERT(fs.is_valid() && rt.is_valid()); |
682 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); | 779 ASSERT(CpuFeatures::IsEnabled(FPU)); |
683 Instr instr = | 780 Instr instr = |
684 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; | 781 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; |
685 emit(instr); | 782 emit(instr); |
686 } | 783 } |
687 | 784 |
688 | 785 |
689 // Instructions with immediate value. | 786 // Instructions with immediate value. |
690 // Registers are in the order of the instruction encoding, from left to right. | 787 // Registers are in the order of the instruction encoding, from left to right. |
691 void Assembler::GenInstrImmediate(Opcode opcode, | 788 void Assembler::GenInstrImmediate(Opcode opcode, |
692 Register rs, | 789 Register rs, |
(...skipping 14 matching lines...) Expand all Loading... |
707 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); | 804 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); |
708 emit(instr); | 805 emit(instr); |
709 } | 806 } |
710 | 807 |
711 | 808 |
712 void Assembler::GenInstrImmediate(Opcode opcode, | 809 void Assembler::GenInstrImmediate(Opcode opcode, |
713 Register rs, | 810 Register rs, |
714 FPURegister ft, | 811 FPURegister ft, |
715 int32_t j) { | 812 int32_t j) { |
716 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); | 813 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); |
717 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); | 814 ASSERT(CpuFeatures::IsEnabled(FPU)); |
718 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | 815 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
719 | (j & kImm16Mask); | 816 | (j & kImm16Mask); |
720 emit(instr); | 817 emit(instr); |
721 } | 818 } |
722 | 819 |
723 | 820 |
724 // Registers are in the order of the instruction encoding, from left to right. | 821 // Registers are in the order of the instruction encoding, from left to right. |
725 void Assembler::GenInstrJump(Opcode opcode, | 822 void Assembler::GenInstrJump(Opcode opcode, |
726 uint32_t address) { | 823 uint32_t address) { |
727 BlockTrampolinePoolScope block_trampoline_pool(this); | 824 BlockTrampolinePoolScope block_trampoline_pool(this); |
(...skipping 25 matching lines...) Expand all Loading... |
753 } | 850 } |
754 } | 851 } |
755 } | 852 } |
756 return label_entry; | 853 return label_entry; |
757 } | 854 } |
758 | 855 |
759 | 856 |
760 // Returns the next free trampoline entry from the next trampoline pool. | 857 // Returns the next free trampoline entry from the next trampoline pool. |
761 int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) { | 858 int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) { |
762 int trampoline_count = trampolines_.length(); | 859 int trampoline_count = trampolines_.length(); |
763 int32_t trampoline_entry = 0; | 860 int32_t trampoline_entry = kInvalidSlotPos; |
764 ASSERT(trampoline_count > 0); | 861 ASSERT(trampoline_count > 0); |
765 | 862 |
766 if (next_pool) { | 863 if (!internal_trampoline_exception_) { |
767 for (int i = 0; i < trampoline_count; i++) { | 864 if (next_pool) { |
768 if (trampolines_[i].start() > pos) { | 865 for (int i = 0; i < trampoline_count; i++) { |
769 trampoline_entry = trampolines_[i].take_slot(); | 866 if (trampolines_[i].start() > pos) { |
770 break; | 867 trampoline_entry = trampolines_[i].take_slot(); |
| 868 break; |
| 869 } |
| 870 } |
| 871 } else { // Caller needs a trampoline entry from the previous pool. |
| 872 for (int i = trampoline_count-1; i >= 0; i--) { |
| 873 if (trampolines_[i].end() < pos) { |
| 874 trampoline_entry = trampolines_[i].take_slot(); |
| 875 break; |
| 876 } |
771 } | 877 } |
772 } | 878 } |
773 } else { // Caller needs a trampoline entry from the previous pool. | 879 if (kInvalidSlotPos == trampoline_entry) { |
774 for (int i = trampoline_count-1; i >= 0; i--) { | 880 internal_trampoline_exception_ = true; |
775 if (trampolines_[i].end() < pos) { | |
776 trampoline_entry = trampolines_[i].take_slot(); | |
777 break; | |
778 } | |
779 } | 881 } |
780 } | 882 } |
781 return trampoline_entry; | 883 return trampoline_entry; |
782 } | 884 } |
783 | 885 |
784 | 886 |
785 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { | 887 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
786 int32_t target_pos; | 888 int32_t target_pos; |
787 int32_t pc_offset_v = pc_offset(); | 889 int32_t pc_offset_v = pc_offset(); |
788 | 890 |
789 if (L->is_bound()) { | 891 if (L->is_bound()) { |
790 target_pos = L->pos(); | 892 target_pos = L->pos(); |
791 int32_t dist = pc_offset_v - target_pos; | 893 int32_t dist = pc_offset_v - target_pos; |
792 if (dist > kMaxBranchOffset) { | 894 if (dist > kMaxBranchOffset) { |
793 do { | 895 do { |
794 int32_t trampoline_pos = get_trampoline_entry(target_pos); | 896 int32_t trampoline_pos = get_trampoline_entry(target_pos); |
| 897 if (kInvalidSlotPos == trampoline_pos) { |
| 898 // Internal error. |
| 899 return 0; |
| 900 } |
795 ASSERT((trampoline_pos - target_pos) > 0); | 901 ASSERT((trampoline_pos - target_pos) > 0); |
796 ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset); | 902 ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset); |
797 target_at_put(trampoline_pos, target_pos); | 903 target_at_put(trampoline_pos, target_pos); |
798 target_pos = trampoline_pos; | 904 target_pos = trampoline_pos; |
799 dist = pc_offset_v - target_pos; | 905 dist = pc_offset_v - target_pos; |
800 } while (dist > kMaxBranchOffset); | 906 } while (dist > kMaxBranchOffset); |
801 } else if (dist < -kMaxBranchOffset) { | 907 } else if (dist < -kMaxBranchOffset) { |
802 do { | 908 do { |
803 int32_t trampoline_pos = get_trampoline_entry(target_pos, false); | 909 int32_t trampoline_pos = get_trampoline_entry(target_pos, false); |
| 910 if (kInvalidSlotPos == trampoline_pos) { |
| 911 // Internal error. |
| 912 return 0; |
| 913 } |
804 ASSERT((target_pos - trampoline_pos) > 0); | 914 ASSERT((target_pos - trampoline_pos) > 0); |
805 ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset); | 915 ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset); |
806 target_at_put(trampoline_pos, target_pos); | 916 target_at_put(trampoline_pos, target_pos); |
807 target_pos = trampoline_pos; | 917 target_pos = trampoline_pos; |
808 dist = pc_offset_v - target_pos; | 918 dist = pc_offset_v - target_pos; |
809 } while (dist < -kMaxBranchOffset); | 919 } while (dist < -kMaxBranchOffset); |
810 } | 920 } |
811 } else { | 921 } else { |
812 if (L->is_linked()) { | 922 if (L->is_linked()) { |
813 target_pos = L->pos(); // L's link. | 923 target_pos = L->pos(); // L's link. |
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1011 Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize); | 1121 Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize); |
1012 | 1122 |
1013 if (IsPush(push_instr) && | 1123 if (IsPush(push_instr) && |
1014 IsPop(pop_instr) && pre_push_sp_set == kPushInstruction && | 1124 IsPop(pop_instr) && pre_push_sp_set == kPushInstruction && |
1015 post_pop_sp_set == kPopInstruction) { | 1125 post_pop_sp_set == kPopInstruction) { |
1016 if ((pop_instr & kRtMask) != (push_instr & kRtMask)) { | 1126 if ((pop_instr & kRtMask) != (push_instr & kRtMask)) { |
1017 // For consecutive push and pop on different registers, | 1127 // For consecutive push and pop on different registers, |
1018 // we delete both the push & pop and insert a register move. | 1128 // we delete both the push & pop and insert a register move. |
1019 // push ry, pop rx --> mov rx, ry. | 1129 // push ry, pop rx --> mov rx, ry. |
1020 Register reg_pushed, reg_popped; | 1130 Register reg_pushed, reg_popped; |
1021 reg_pushed = GetRt(push_instr); | 1131 reg_pushed = GetRtReg(push_instr); |
1022 reg_popped = GetRt(pop_instr); | 1132 reg_popped = GetRtReg(pop_instr); |
1023 pc_ -= 4 * kInstrSize; | 1133 pc_ -= 4 * kInstrSize; |
1024 // Insert a mov instruction, which is better than a pair of push & pop. | 1134 // Insert a mov instruction, which is better than a pair of push & pop. |
1025 or_(reg_popped, reg_pushed, zero_reg); | 1135 or_(reg_popped, reg_pushed, zero_reg); |
1026 if (FLAG_print_peephole_optimization) { | 1136 if (FLAG_print_peephole_optimization) { |
1027 PrintF("%x push/pop (diff reg) replaced by a reg move\n", | 1137 PrintF("%x push/pop (diff reg) replaced by a reg move\n", |
1028 pc_offset()); | 1138 pc_offset()); |
1029 } | 1139 } |
1030 } else { | 1140 } else { |
1031 // For consecutive push and pop on the same register, | 1141 // For consecutive push and pop on the same register, |
1032 // both the push and the pop can be deleted. | 1142 // both the push and the pop can be deleted. |
(...skipping 14 matching lines...) Expand all Loading... |
1047 | 1157 |
1048 if (IsPush(mem_write_instr) && | 1158 if (IsPush(mem_write_instr) && |
1049 pre_push_sp_set == kPushInstruction && | 1159 pre_push_sp_set == kPushInstruction && |
1050 IsPop(mem_read_instr) && | 1160 IsPop(mem_read_instr) && |
1051 post_pop_sp_set == kPopInstruction) { | 1161 post_pop_sp_set == kPopInstruction) { |
1052 if ((IsLwRegFpOffset(lw_instr) || | 1162 if ((IsLwRegFpOffset(lw_instr) || |
1053 IsLwRegFpNegOffset(lw_instr))) { | 1163 IsLwRegFpNegOffset(lw_instr))) { |
1054 if ((mem_write_instr & kRtMask) == | 1164 if ((mem_write_instr & kRtMask) == |
1055 (mem_read_instr & kRtMask)) { | 1165 (mem_read_instr & kRtMask)) { |
1056 // Pattern: push & pop from/to same register, | 1166 // Pattern: push & pop from/to same register, |
1057 // with a fp+offset lw in between. | 1167 // with a fp + offset lw in between. |
1058 // | 1168 // |
1059 // The following: | 1169 // The following: |
1060 // addiu sp, sp, -4 | 1170 // addiu sp, sp, -4 |
1061 // sw rx, [sp, #0]! | 1171 // sw rx, [sp, #0]! |
1062 // lw rz, [fp, #-24] | 1172 // lw rz, [fp, #-24] |
1063 // lw rx, [sp, 0], | 1173 // lw rx, [sp, 0], |
1064 // addiu sp, sp, 4 | 1174 // addiu sp, sp, 4 |
1065 // | 1175 // |
1066 // Becomes: | 1176 // Becomes: |
1067 // if(rx == rz) | 1177 // if(rx == rz) |
1068 // delete all | 1178 // delete all |
1069 // else | 1179 // else |
1070 // lw rz, [fp, #-24] | 1180 // lw rz, [fp, #-24] |
1071 | 1181 |
1072 if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) { | 1182 if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) { |
1073 pc_ -= 5 * kInstrSize; | 1183 pc_ -= 5 * kInstrSize; |
1074 } else { | 1184 } else { |
1075 pc_ -= 5 * kInstrSize; | 1185 pc_ -= 5 * kInstrSize; |
1076 // Reinsert back the lw rz. | 1186 // Reinsert back the lw rz. |
1077 emit(lw_instr); | 1187 emit(lw_instr); |
1078 } | 1188 } |
1079 if (FLAG_print_peephole_optimization) { | 1189 if (FLAG_print_peephole_optimization) { |
1080 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset()); | 1190 PrintF("%x push/pop -dead ldr fp + offset in middle\n", |
| 1191 pc_offset()); |
1081 } | 1192 } |
1082 } else { | 1193 } else { |
1083 // Pattern: push & pop from/to different registers | 1194 // Pattern: push & pop from/to different registers |
1084 // with a fp + offset lw in between. | 1195 // with a fp + offset lw in between. |
1085 // | 1196 // |
1086 // The following: | 1197 // The following: |
1087 // addiu sp, sp ,-4 | 1198 // addiu sp, sp ,-4 |
1088 // sw rx, [sp, 0] | 1199 // sw rx, [sp, 0] |
1089 // lw rz, [fp, #-24] | 1200 // lw rz, [fp, #-24] |
1090 // lw ry, [sp, 0] | 1201 // lw ry, [sp, 0] |
1091 // addiu sp, sp, 4 | 1202 // addiu sp, sp, 4 |
1092 // | 1203 // |
1093 // Becomes: | 1204 // Becomes: |
1094 // if(ry == rz) | 1205 // if(ry == rz) |
1095 // mov ry, rx; | 1206 // mov ry, rx; |
1096 // else if(rx != rz) | 1207 // else if(rx != rz) |
1097 // lw rz, [fp, #-24] | 1208 // lw rz, [fp, #-24] |
1098 // mov ry, rx | 1209 // mov ry, rx |
1099 // else if((ry != rz) || (rx == rz)) becomes: | 1210 // else if((ry != rz) || (rx == rz)) becomes: |
1100 // mov ry, rx | 1211 // mov ry, rx |
1101 // lw rz, [fp, #-24] | 1212 // lw rz, [fp, #-24] |
1102 | 1213 |
1103 Register reg_pushed, reg_popped; | 1214 Register reg_pushed, reg_popped; |
1104 if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) { | 1215 if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) { |
1105 reg_pushed = GetRt(mem_write_instr); | 1216 reg_pushed = GetRtReg(mem_write_instr); |
1106 reg_popped = GetRt(mem_read_instr); | 1217 reg_popped = GetRtReg(mem_read_instr); |
1107 pc_ -= 5 * kInstrSize; | 1218 pc_ -= 5 * kInstrSize; |
1108 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. | 1219 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. |
1109 } else if ((mem_write_instr & kRtMask) | 1220 } else if ((mem_write_instr & kRtMask) |
1110 != (lw_instr & kRtMask)) { | 1221 != (lw_instr & kRtMask)) { |
1111 reg_pushed = GetRt(mem_write_instr); | 1222 reg_pushed = GetRtReg(mem_write_instr); |
1112 reg_popped = GetRt(mem_read_instr); | 1223 reg_popped = GetRtReg(mem_read_instr); |
1113 pc_ -= 5 * kInstrSize; | 1224 pc_ -= 5 * kInstrSize; |
1114 emit(lw_instr); | 1225 emit(lw_instr); |
1115 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. | 1226 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. |
1116 } else if (((mem_read_instr & kRtMask) | 1227 } else if (((mem_read_instr & kRtMask) |
1117 != (lw_instr & kRtMask)) || | 1228 != (lw_instr & kRtMask)) || |
1118 ((mem_write_instr & kRtMask) | 1229 ((mem_write_instr & kRtMask) |
1119 == (lw_instr & kRtMask)) ) { | 1230 == (lw_instr & kRtMask)) ) { |
1120 reg_pushed = GetRt(mem_write_instr); | 1231 reg_pushed = GetRtReg(mem_write_instr); |
1121 reg_popped = GetRt(mem_read_instr); | 1232 reg_popped = GetRtReg(mem_read_instr); |
1122 pc_ -= 5 * kInstrSize; | 1233 pc_ -= 5 * kInstrSize; |
1123 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. | 1234 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. |
1124 emit(lw_instr); | 1235 emit(lw_instr); |
1125 } | 1236 } |
1126 if (FLAG_print_peephole_optimization) { | 1237 if (FLAG_print_peephole_optimization) { |
1127 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset()); | 1238 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset()); |
1128 } | 1239 } |
1129 } | 1240 } |
1130 } | 1241 } |
1131 } | 1242 } |
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1347 // | 1458 // |
1348 // The following: | 1459 // The following: |
1349 // sw rx, [fp, #-12] | 1460 // sw rx, [fp, #-12] |
1350 // lw ry, [fp, #-12] | 1461 // lw ry, [fp, #-12] |
1351 // | 1462 // |
1352 // Becomes: | 1463 // Becomes: |
1353 // sw rx, [fp, #-12] | 1464 // sw rx, [fp, #-12] |
1354 // mov ry, rx | 1465 // mov ry, rx |
1355 | 1466 |
1356 Register reg_stored, reg_loaded; | 1467 Register reg_stored, reg_loaded; |
1357 reg_stored = GetRt(sw_instr); | 1468 reg_stored = GetRtReg(sw_instr); |
1358 reg_loaded = GetRt(lw_instr); | 1469 reg_loaded = GetRtReg(lw_instr); |
1359 pc_ -= 1 * kInstrSize; | 1470 pc_ -= 1 * kInstrSize; |
1360 // Insert a mov instruction, which is better than lw. | 1471 // Insert a mov instruction, which is better than lw. |
1361 or_(reg_loaded, reg_stored, zero_reg); // Move instruction. | 1472 or_(reg_loaded, reg_stored, zero_reg); // Move instruction. |
1362 if (FLAG_print_peephole_optimization) { | 1473 if (FLAG_print_peephole_optimization) { |
1363 PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset()); | 1474 PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset()); |
1364 } | 1475 } |
1365 } | 1476 } |
1366 } | 1477 } |
1367 } | 1478 } |
1368 } | 1479 } |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1538 } | 1649 } |
1539 | 1650 |
1540 | 1651 |
1541 void Assembler::movn(Register rd, Register rs, Register rt) { | 1652 void Assembler::movn(Register rd, Register rs, Register rt) { |
1542 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN); | 1653 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN); |
1543 } | 1654 } |
1544 | 1655 |
1545 | 1656 |
1546 void Assembler::movt(Register rd, Register rs, uint16_t cc) { | 1657 void Assembler::movt(Register rd, Register rs, uint16_t cc) { |
1547 Register rt; | 1658 Register rt; |
1548 rt.code_ = (cc & 0x0003) << 2 | 1; | 1659 rt.code_ = (cc & 0x0007) << 2 | 1; |
1549 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); | 1660 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); |
1550 } | 1661 } |
1551 | 1662 |
1552 | 1663 |
1553 void Assembler::movf(Register rd, Register rs, uint16_t cc) { | 1664 void Assembler::movf(Register rd, Register rs, uint16_t cc) { |
1554 Register rt; | 1665 Register rt; |
1555 rt.code_ = (cc & 0x0003) << 2 | 0; | 1666 rt.code_ = (cc & 0x0007) << 2 | 0; |
1556 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); | 1667 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); |
1557 } | 1668 } |
1558 | 1669 |
1559 | 1670 |
1560 // Bit twiddling. | 1671 // Bit twiddling. |
1561 void Assembler::clz(Register rd, Register rs) { | 1672 void Assembler::clz(Register rd, Register rs) { |
1562 // Clz instr requires same GPR number in 'rd' and 'rt' fields. | 1673 // Clz instr requires same GPR number in 'rd' and 'rt' fields. |
1563 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); | 1674 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); |
1564 } | 1675 } |
1565 | 1676 |
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1809 | 1920 |
1810 | 1921 |
1811 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { | 1922 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { |
1812 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); | 1923 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); |
1813 } | 1924 } |
1814 | 1925 |
1815 | 1926 |
1816 // Conditions. | 1927 // Conditions. |
1817 void Assembler::c(FPUCondition cond, SecondaryField fmt, | 1928 void Assembler::c(FPUCondition cond, SecondaryField fmt, |
1818 FPURegister fs, FPURegister ft, uint16_t cc) { | 1929 FPURegister fs, FPURegister ft, uint16_t cc) { |
1819 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); | 1930 ASSERT(CpuFeatures::IsEnabled(FPU)); |
1820 ASSERT(is_uint3(cc)); | 1931 ASSERT(is_uint3(cc)); |
1821 ASSERT((fmt & ~(31 << kRsShift)) == 0); | 1932 ASSERT((fmt & ~(31 << kRsShift)) == 0); |
1822 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift | 1933 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift |
1823 | cc << 8 | 3 << 4 | cond; | 1934 | cc << 8 | 3 << 4 | cond; |
1824 emit(instr); | 1935 emit(instr); |
1825 } | 1936 } |
1826 | 1937 |
1827 | 1938 |
1828 void Assembler::fcmp(FPURegister src1, const double src2, | 1939 void Assembler::fcmp(FPURegister src1, const double src2, |
1829 FPUCondition cond) { | 1940 FPUCondition cond) { |
1830 ASSERT(isolate()->cpu_features()->IsSupported(FPU)); | 1941 ASSERT(CpuFeatures::IsEnabled(FPU)); |
1831 ASSERT(src2 == 0.0); | 1942 ASSERT(src2 == 0.0); |
1832 mtc1(zero_reg, f14); | 1943 mtc1(zero_reg, f14); |
1833 cvt_d_w(f14, f14); | 1944 cvt_d_w(f14, f14); |
1834 c(cond, D, src1, f14, 0); | 1945 c(cond, D, src1, f14, 0); |
1835 } | 1946 } |
1836 | 1947 |
1837 | 1948 |
1838 void Assembler::bc1f(int16_t offset, uint16_t cc) { | 1949 void Assembler::bc1f(int16_t offset, uint16_t cc) { |
1839 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); | 1950 ASSERT(CpuFeatures::IsEnabled(FPU)); |
1840 ASSERT(is_uint3(cc)); | 1951 ASSERT(is_uint3(cc)); |
1841 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); | 1952 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); |
1842 emit(instr); | 1953 emit(instr); |
1843 } | 1954 } |
1844 | 1955 |
1845 | 1956 |
1846 void Assembler::bc1t(int16_t offset, uint16_t cc) { | 1957 void Assembler::bc1t(int16_t offset, uint16_t cc) { |
1847 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); | 1958 ASSERT(CpuFeatures::IsEnabled(FPU)); |
1848 ASSERT(is_uint3(cc)); | 1959 ASSERT(is_uint3(cc)); |
1849 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); | 1960 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); |
1850 emit(instr); | 1961 emit(instr); |
1851 } | 1962 } |
1852 | 1963 |
1853 | 1964 |
1854 // Debugging. | 1965 // Debugging. |
1855 void Assembler::RecordJSReturn() { | 1966 void Assembler::RecordJSReturn() { |
1856 positions_recorder()->WriteRecordedPositions(); | 1967 positions_recorder()->WriteRecordedPositions(); |
1857 CheckBuffer(); | 1968 CheckBuffer(); |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1942 // These modes do not need an entry in the constant pool. | 2053 // These modes do not need an entry in the constant pool. |
1943 } | 2054 } |
1944 if (rinfo.rmode() != RelocInfo::NONE) { | 2055 if (rinfo.rmode() != RelocInfo::NONE) { |
1945 // Don't record external references unless the heap will be serialized. | 2056 // Don't record external references unless the heap will be serialized. |
1946 if (rmode == RelocInfo::EXTERNAL_REFERENCE && | 2057 if (rmode == RelocInfo::EXTERNAL_REFERENCE && |
1947 !Serializer::enabled() && | 2058 !Serializer::enabled() && |
1948 !FLAG_debug_code) { | 2059 !FLAG_debug_code) { |
1949 return; | 2060 return; |
1950 } | 2061 } |
1951 ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. | 2062 ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. |
1952 reloc_info_writer.Write(&rinfo); | 2063 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
| 2064 ASSERT(ast_id_for_reloc_info_ != kNoASTId); |
| 2065 RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_); |
| 2066 ast_id_for_reloc_info_ = kNoASTId; |
| 2067 reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 2068 } else { |
| 2069 reloc_info_writer.Write(&rinfo); |
| 2070 } |
1953 } | 2071 } |
1954 } | 2072 } |
1955 | 2073 |
1956 | 2074 |
1957 void Assembler::BlockTrampolinePoolFor(int instructions) { | 2075 void Assembler::BlockTrampolinePoolFor(int instructions) { |
1958 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); | 2076 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); |
1959 } | 2077 } |
1960 | 2078 |
1961 | 2079 |
1962 void Assembler::CheckTrampolinePool(bool force_emit) { | 2080 void Assembler::CheckTrampolinePool(bool force_emit) { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2010 // move the check offset forward by the standard interval. | 2128 // move the check offset forward by the standard interval. |
2011 next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools; | 2129 next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools; |
2012 } | 2130 } |
2013 return; | 2131 return; |
2014 } | 2132 } |
2015 | 2133 |
2016 | 2134 |
2017 Address Assembler::target_address_at(Address pc) { | 2135 Address Assembler::target_address_at(Address pc) { |
2018 Instr instr1 = instr_at(pc); | 2136 Instr instr1 = instr_at(pc); |
2019 Instr instr2 = instr_at(pc + kInstrSize); | 2137 Instr instr2 = instr_at(pc + kInstrSize); |
2020 // Check we have 2 instructions generated by li. | 2138 // Interpret 2 instructions generated by li: lui/ori |
2021 ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) || | 2139 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) { |
2022 ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI || | 2140 // Assemble the 32 bit value. |
2023 (instr2 & kOpcodeMask) == ORI || | |
2024 (instr2 & kOpcodeMask) == LUI))); | |
2025 // Interpret these 2 instructions. | |
2026 if (instr1 == nopInstr) { | |
2027 if ((instr2 & kOpcodeMask) == ADDI) { | |
2028 return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16); | |
2029 } else if ((instr2 & kOpcodeMask) == ORI) { | |
2030 return reinterpret_cast<Address>(instr2 & kImm16Mask); | |
2031 } else if ((instr2 & kOpcodeMask) == LUI) { | |
2032 return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16); | |
2033 } | |
2034 } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) { | |
2035 // 32 bit value. | |
2036 return reinterpret_cast<Address>( | 2141 return reinterpret_cast<Address>( |
2037 (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask)); | 2142 (GetImmediate16(instr1) << 16) | GetImmediate16(instr2)); |
2038 } | 2143 } |
2039 | 2144 |
2040 // We should never get here. | 2145 // We should never get here, force a bad address if we do. |
2041 UNREACHABLE(); | 2146 UNREACHABLE(); |
2042 return (Address)0x0; | 2147 return (Address)0x0; |
2043 } | 2148 } |
2044 | 2149 |
2045 | 2150 |
2046 void Assembler::set_target_address_at(Address pc, Address target) { | 2151 void Assembler::set_target_address_at(Address pc, Address target) { |
2047 // On MIPS we need to patch the code to generate. | 2152 // On MIPS we patch the address into lui/ori instruction pair. |
2048 | 2153 |
2049 // First check we have a li. | 2154 // First check we have an li (lui/ori pair). |
2050 Instr instr2 = instr_at(pc + kInstrSize); | 2155 Instr instr2 = instr_at(pc + kInstrSize); |
2051 #ifdef DEBUG | 2156 #ifdef DEBUG |
2052 Instr instr1 = instr_at(pc); | 2157 Instr instr1 = instr_at(pc); |
2053 | 2158 |
2054 // Check we have indeed the result from a li with MustUseReg true. | 2159 // Check we have indeed the result from a li with MustUseReg true. |
2055 CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) || | 2160 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI)); |
2056 ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU || | |
2057 (instr2 & kOpcodeMask)== ORI || | |
2058 (instr2 & kOpcodeMask)== LUI))); | |
2059 #endif | 2161 #endif |
2060 | 2162 |
2061 uint32_t rt_code = (instr2 & kRtFieldMask); | 2163 uint32_t rt_code = GetRtField(instr2); |
2062 uint32_t* p = reinterpret_cast<uint32_t*>(pc); | 2164 uint32_t* p = reinterpret_cast<uint32_t*>(pc); |
2063 uint32_t itarget = reinterpret_cast<uint32_t>(target); | 2165 uint32_t itarget = reinterpret_cast<uint32_t>(target); |
2064 | 2166 |
2065 if (is_int16(itarget)) { | 2167 // lui rt, high-16. |
2066 // nop. | 2168 // ori rt rt, low-16. |
2067 // addiu rt zero_reg j. | 2169 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); |
2068 *p = nopInstr; | 2170 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); |
2069 *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask); | |
2070 } else if (!(itarget & kHiMask)) { | |
2071 // nop. | |
2072 // ori rt zero_reg j. | |
2073 *p = nopInstr; | |
2074 *(p+1) = ORI | rt_code | (itarget & kImm16Mask); | |
2075 } else if (!(itarget & kImm16Mask)) { | |
2076 // nop. | |
2077 // lui rt (kHiMask & itarget) >> kLuiShift. | |
2078 *p = nopInstr; | |
2079 *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); | |
2080 } else { | |
2081 // lui rt (kHiMask & itarget) >> kLuiShift. | |
2082 // ori rt rt, (kImm16Mask & itarget). | |
2083 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); | |
2084 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); | |
2085 } | |
2086 | 2171 |
2087 CPU::FlushICache(pc, 2 * sizeof(int32_t)); | 2172 CPU::FlushICache(pc, 2 * sizeof(int32_t)); |
2088 } | 2173 } |
2089 | 2174 |
2090 | 2175 |
2091 } } // namespace v8::internal | 2176 } } // namespace v8::internal |
2092 | 2177 |
2093 #endif // V8_TARGET_ARCH_MIPS | 2178 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |