Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(486)

Side by Side Diff: src/mips/assembler-mips.cc

Issue 549079: Support for MIPS in architecture independent files.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
Søren Thygesen Gjesse 2010/01/19 22:59:12 Please use the copyright header from src/mips/asse
Alexandre 2010/01/22 23:08:42 Replaced the copyright. On 2010/01/19 22:59:12, Sø
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28
29 #include "v8.h"
30 #include "mips/assembler-mips-inl.h"
31 #include "serialize.h"
32
33
34 namespace v8 {
35 namespace internal {
36
37
38
39 const Register no_reg = { -1 };
40
41 const Register zero_reg = { 0 };
42 const Register at = { 1 };
43 const Register v0 = { 2 };
44 const Register v1 = { 3 };
45 const Register a0 = { 4 };
46 const Register a1 = { 5 };
47 const Register a2 = { 6 };
48 const Register a3 = { 7 };
49 const Register t0 = { 8 };
50 const Register t1 = { 9 };
51 const Register t2 = { 10 };
52 const Register t3 = { 11 };
53 const Register t4 = { 12 };
54 const Register t5 = { 13 };
55 const Register t6 = { 14 };
56 const Register t7 = { 15 };
57 const Register s0 = { 16 };
58 const Register s1 = { 17 };
59 const Register s2 = { 18 };
60 const Register s3 = { 19 };
61 const Register s4 = { 20 };
62 const Register s5 = { 21 };
63 const Register s6 = { 22 };
64 const Register s7 = { 23 };
65 const Register t8 = { 24 };
66 const Register t9 = { 25 };
67 const Register k0 = { 26 };
68 const Register k1 = { 27 };
69 const Register gp = { 28 };
70 const Register sp = { 29 };
71 const Register s8_fp = { 30 };
72 const Register ra = { 31 };
73
74
75 const CRegister no_creg = { -1 };
76
77 const CRegister f0 = { 0 };
78 const CRegister f1 = { 1 };
79 const CRegister f2 = { 2 };
80 const CRegister f3 = { 3 };
81 const CRegister f4 = { 4 };
82 const CRegister f5 = { 5 };
83 const CRegister f6 = { 6 };
84 const CRegister f7 = { 7 };
85 const CRegister f8 = { 8 };
86 const CRegister f9 = { 9 };
87 const CRegister f10 = { 10 };
88 const CRegister f11 = { 11 };
89 const CRegister f12 = { 12 };
90 const CRegister f13 = { 13 };
91 const CRegister f14 = { 14 };
92 const CRegister f15 = { 15 };
93 const CRegister f16 = { 16 };
94 const CRegister f17 = { 17 };
95 const CRegister f18 = { 18 };
96 const CRegister f19 = { 19 };
97 const CRegister f20 = { 20 };
98 const CRegister f21 = { 21 };
99 const CRegister f22 = { 22 };
100 const CRegister f23 = { 23 };
101 const CRegister f24 = { 24 };
102 const CRegister f25 = { 25 };
103 const CRegister f26 = { 26 };
104 const CRegister f27 = { 27 };
105 const CRegister f28 = { 28 };
106 const CRegister f29 = { 29 };
107 const CRegister f30 = { 30 };
108 const CRegister f31 = { 31 };
109
110 int ToNumber(Register reg) {
111 ASSERT(reg.is_valid());
112 const int kNumbers[] = {
113 0, // zero_reg
114 1, // at
115 2, // v0
116 3, // v1
117 4, // a0
118 5, // a1
119 6, // a2
120 7, // a3
121 8, // t0
122 9, // t1
123 10, // t2
124 11, // t3
125 12, // t4
126 13, // t5
127 14, // t6
128 15, // t7
129 16, // s0
130 17, // s1
131 18, // s2
132 19, // s3
133 20, // s4
134 21, // s5
135 22, // s6
136 23, // s7
137 24, // t8
138 25, // t9
139 26, // k0
140 27, // k1
141 28, // gp
142 29, // sp
143 30, // s8_fp
144 31, // ra
145 };
146 return kNumbers[reg.code()];
147 }
148
149 Register ToRegister(int num) {
150 ASSERT(num >= 0 && num < kNumRegisters);
151 const Register kRegisters[] = {
152 zero_reg,
153 at,
154 v0, v1,
155 a0, a1, a2, a3,
156 t0, t1, t2, t3, t4, t5, t6, t7,
157 s0, s1, s2, s3, s4, s5, s6, s7,
158 t8, t9,
159 k0, k1,
160 gp,
161 sp,
162 s8_fp,
163 ra
164 };
165 return kRegisters[num];
166 }
167
168 // -----------------------------------------------------------------------------
169 // Implementation of RelocInfo
170
171 // WII? cf assembler.h 233 :Modes affected by apply. Depends on arch.
172 const int RelocInfo::kApplyMask = 0;
173
174
175 // Patch the code at the current address with the supplied instructions.
176 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
177 Instr* pc = reinterpret_cast<Instr*>(pc_);
178 Instr* instr = reinterpret_cast<Instr*>(instructions);
179 for (int i = 0; i < instruction_count; i++) {
180 *(pc + i) = *(instr + i);
181 }
182
183 // Indicate that code has changed.
184 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
185 }
186
187
188 // Patch the code at the current PC with a call to the target address.
189 // Additional guard instructions can be added if required.
190 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
191 // Patch the code at the current address with a call to the target.
192 UNIMPLEMENTED();
193 }
194
195
196
197 // -----------------------------------------------------------------------------
198 // Implementation of Operand and MemOperand
199 // See assembler-mips-inl.h for inlined constructors
200
201 Operand::Operand(Handle<Object> handle) {
202 rm_ = no_reg;
203 // Verify all Objects referred by code are NOT in new space.
204 Object* obj = *handle;
205 ASSERT(!Heap::InNewSpace(obj));
206 if (obj->IsHeapObject()) {
207 imm32_ = reinterpret_cast<intptr_t>(handle.location());
208 rmode_ = RelocInfo::EMBEDDED_OBJECT;
209 } else {
210 // no relocation needed
211 imm32_ = reinterpret_cast<intptr_t>(obj);
212 rmode_ = RelocInfo::NONE;
213 }
214 }
215
216 MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
217 offset_ = offset;
218 }
219
220
221 // -----------------------------------------------------------------------------
222 // Implementation of Assembler
223
224 static const int kMinimalBufferSize = 4*KB;
225 static byte* spare_buffer_ = NULL;
226
227 Assembler::Assembler(void* buffer, int buffer_size) {
228 if (buffer == NULL) {
229 // do our own buffer management
230 if (buffer_size <= kMinimalBufferSize) {
231 buffer_size = kMinimalBufferSize;
232
233 if (spare_buffer_ != NULL) {
234 buffer = spare_buffer_;
235 spare_buffer_ = NULL;
236 }
237 }
238 if (buffer == NULL) {
239 buffer_ = NewArray<byte>(buffer_size);
240 } else {
241 buffer_ = static_cast<byte*>(buffer);
242 }
243 buffer_size_ = buffer_size;
244 own_buffer_ = true;
245
246 } else {
247 // use externally provided buffer instead
248 ASSERT(buffer_size > 0);
249 buffer_ = static_cast<byte*>(buffer);
250 buffer_size_ = buffer_size;
251 own_buffer_ = false;
252 }
253
254 // setup buffer pointers
255 ASSERT(buffer_ != NULL);
256 pc_ = buffer_;
257 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
258 current_statement_position_ = RelocInfo::kNoPosition;
259 current_position_ = RelocInfo::kNoPosition;
260 written_statement_position_ = current_statement_position_;
261 written_position_ = current_position_;
262 }
263
264
265 Assembler::~Assembler() {
266 if (own_buffer_) {
267 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
268 spare_buffer_ = buffer_;
269 } else {
270 DeleteArray(buffer_);
271 }
272 }
273 }
274
275
276 void Assembler::GetCode(CodeDesc* desc) {
277 ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
278 // setup desc
279 desc->buffer = buffer_;
280 desc->buffer_size = buffer_size_;
281 desc->instr_size = pc_offset();
282 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
283 }
284
285
286 void Assembler::Align(int m) {
287 ASSERT(m >= 4 && IsPowerOf2(m));
288 while ((pc_offset() & (m - 1)) != 0) {
289 nop();
290 }
291 }
292
293
294 // Labels refer to positions in the (to be) generated code.
295 // There are bound, linked, and unused labels.
296 //
297 // Bound labels refer to known positions in the already
298 // generated code. pos() is the position the label refers to.
299 //
300 // Linked labels refer to unknown positions in the code
301 // to be generated; pos() is the position of the last
302 // instruction using the label.
303
304
305 // The link chain is terminated by a negative code position (must be aligned)
306 const int kEndOfChain = -4;
307
308 bool Assembler::is_branch(Instr instr) {
309 uint32_t opcode = (( instr & OpcodeMask )) ;
310 uint32_t rt_field = (( instr & rtFieldMask ));
311 uint32_t rs_field = (( instr & rsFieldMask ));
312 // Checks if the instruction is a branch or a jump (jr and jalr excluded)
antonm 2010/01/21 13:10:45 I think this line should be indented as well.
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
313 return opcode == BEQ
314 || opcode == BNE
315 || opcode == BLEZ
316 || opcode == BGTZ
317 || opcode == BEQL
318 || opcode == BNEL
319 || opcode == BLEZL
320 || opcode == BGTZL
321 || ( opcode == REGIMM && ( rt_field == BLTZ
322 || rt_field == BGEZ
323 || rt_field == BLTZAL
324 || rt_field == BGEZAL ) )
325 || ( opcode == COP1 && rs_field == BC1 ); // Coprocessor branch
326 }
327
328 int Assembler::target_at(int32_t pos) {
329 Instr instr = instr_at(pos);
330 if ((instr & ~Imm16Mask) == 0) {
331 // Emitted label constant, not part of a branch.
332 return instr - (Code::kHeaderSize - kHeapObjectTag);
333 }
334 // Check we have a branch instruction. (or jump when implemented)
335 ASSERT(is_branch(instr));
336 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
337 // the compiler uses arithmectic shifts for signed integers.
338 int32_t imm18 = (((int32_t)instr & (int32_t)Imm16Mask) << 16) >>14;
antonm 2010/01/21 13:10:45 nit: a space between >> and 14
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
339
340 return pos + kBranchPCOffset + imm18;
341 }
342
343
344 void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
345 Instr instr = instr_at(pos);
346 if ((instr & ~Imm16Mask) == 0) {
347 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
348 // Emitted label constant, not part of a branch.
349 // Make label relative to Code* of generated Code object.
350 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
351 return;
352 }
353
354 ASSERT(is_branch(instr));
355 int32_t imm18 = target_pos - (pos + kBranchPCOffset);
356 ASSERT( (imm18 & 3) == 0 );
357
358 instr &= ~Imm16Mask;
359 int32_t imm16 = imm18 >> 2;
360 ASSERT(is_int16(imm16));
361
362 instr_at_put(pos, instr | (imm16 & Imm16Mask));
363 }
364
365 // TODO : Upgrade this.
366 void Assembler::print(Label* L) {
367 if (L->is_unused()) {
368 PrintF("unused label\n");
369 } else if (L->is_bound()) {
370 PrintF("bound label to %d\n", L->pos());
371 } else if (L->is_linked()) {
372 Label l = *L;
373 PrintF("unbound label");
374 while (l.is_linked()) {
375 PrintF("@ %d ", l.pos());
376 Instr instr = instr_at(l.pos());
377 if ((instr & ~Imm16Mask) == 0) {
378 PrintF("value\n");
379 } else {
380 PrintF("%d\n", instr);
381 }
382 next(&l);
383 }
384 } else {
385 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
386 }
387 }
388
389
390 void Assembler::bind_to(Label* L, int pos) {
391 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
392 while (L->is_linked()) {
393 int32_t fixup_pos = L->pos();
394 next(L); // call next before overwriting link with target at fixup_pos
395 target_at_put(fixup_pos, pos);
396 }
397 L->bind_to(pos);
398
399 // Keep track of the last bound label so we don't eliminate any instructions
400 // before a bound label.
401 if (pos > last_bound_pos_)
402 last_bound_pos_ = pos;
403 }
404
405
406 void Assembler::link_to(Label* L, Label* appendix) {
407 if (appendix->is_linked()) {
408 if (L->is_linked()) {
409 // append appendix to L's list
410 int fixup_pos;
411 int link = L->pos();
412 do {
413 fixup_pos = link;
414 link = target_at(fixup_pos);
415 } while (link > 0);
416 ASSERT(link == kEndOfChain);
417 target_at_put(fixup_pos, appendix->pos());
418 } else {
419 // L is empty, simply use appendix
420 *L = *appendix;
421 }
422 }
423 appendix->Unuse(); // appendix should not be used anymore
424 }
425
426
427 void Assembler::bind(Label* L) {
428 ASSERT(!L->is_bound()); // label can only be bound once
429 bind_to(L, pc_offset());
430 }
431
432
433 void Assembler::next(Label* L) {
434 ASSERT(L->is_linked());
435 int link = target_at(L->pos());
436 if (link > 0) {
437 L->link_to(link);
438 } else {
439 ASSERT(link == kEndOfChain);
440 L->Unuse();
441 }
442 }
443
444
445
446 // We have to use the temporary register for things that can be relocated even
447 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
448 // space. There is no guarantee that the relocated location can be similarly
449 // encoded.
450 static bool MustUse_at(RelocInfo::Mode rmode) {
451 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
452 return Serializer::enabled();
453 } else if (rmode == RelocInfo::NONE) {
454 return false;
455 }
456 return true;
457 }
458
459
460 void Assembler::instrmod1( Opcode opcode,
461 Register r1,
462 Register r2,
463 Register r3,
464 uint16_t sa,
465 SecondaryField func) {
466 ASSERT(is_uint5(sa));
467 Instr instr = opcode | (r1.code() << 21) | (r2.code() << 16)
468 | (r3.code() << 11) | (sa << 6) | func;
469 emit(instr);
470 }
471 void Assembler::instrmod1( Opcode opcode,
472 SecondaryField fmt,
473 CRegister ft,
474 CRegister fs,
475 CRegister fd,
476 SecondaryField func) {
477 Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << 11)
478 | (fd.code() << 6) | func;
479 emit(instr);
480 }
481 void Assembler::instrmod1( Opcode opcode,
482 SecondaryField fmt,
483 Register rt,
484 CRegister fs,
485 CRegister fd,
486 SecondaryField func) {
487 Instr instr = opcode | fmt | (rt.code() << 16) | (fs.code() << 11)
488 | (fd.code() << 6) | func;
489 emit(instr);
490 }
491
492 // Instructions with immediate value
493 // Registers are in the order of the instruction encoding, from left to right.
494 void Assembler::instrmod2( Opcode opcode,
495 Register rs,
496 Register rt,
497 int16_t j) {
498 Instr instr = opcode | (rs.code() << rs_o)
499 | (rt.code() << rt_o) | (j & Imm16Mask);
500 emit(instr);
501 }
502
503 void Assembler::instrmod2( Opcode opcode,
504 Register rs,
505 Register rt,
506 uint16_t j) {
507 Instr instr = opcode | (rs.code() << rs_o)
508 | (rt.code() << rt_o) | (j & Imm16Mask);
509 emit(instr);
510 }
511 void Assembler::instrmod2( Opcode opcode,
512 Register rs,
513 SecondaryField SF,
514 int16_t j) {
515 Instr instr = opcode | (rs.code() << rs_o) | SF | (j & Imm16Mask);
516 emit(instr);
517 }
518 void Assembler::instrmod2( Opcode opcode,
519 Register rs,
520 SecondaryField SF,
521 uint16_t j) {
522 Instr instr = opcode | (rs.code() << rs_o) | SF | (j & Imm16Mask);
523 emit(instr);
524 }
525
526 void Assembler::instrmod2( Opcode opcode,
527 Register rs,
528 CRegister rt,
529 int16_t j) {
530 Instr instr = opcode | (rs.code() << rs_o)
531 | (rt.code() << rt_o) | (j & Imm16Mask);
532 emit(instr);
533 }
534
535 // Registers are in the order of the instruction encoding, from left to right.
536 void Assembler::instrmod3( Opcode opcode,
537 Register r1,
538 uint32_t address) {
539 ASSERT(is_uint26(address));
540 Instr instr = opcode | r1.code() <<21 | address;
541 emit(instr);
542
543 }
544
545
546 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
547 int32_t target_pos;
548 if (L->is_bound()) {
549 target_pos = L->pos();
550 } else {
551 if (L->is_linked()) {
552 target_pos = L->pos(); // L's link
553 } else {
554 target_pos = kEndOfChain;
555 }
556 L->link_to(pc_offset());
557 }
558
559 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
560 return offset;
561 }
562
563
564 void Assembler::label_at_put(Label* L, int at_offset) {
565 int target_pos;
566 if (L->is_bound()) {
567 target_pos = L->pos();
568 } else {
569 if (L->is_linked()) {
570 target_pos = L->pos(); // L's link
571 } else {
572 target_pos = kEndOfChain;
573 }
574 L->link_to(at_offset);
575 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
576 }
577 }
578
579
580
581 //------- Branch and jump instructions --------
582
583 // Emulated condtional branches do not emit a nop in the branch delay slot.
584
585 // Trashes the at register if no scratch register is provided.
586 void Assembler::bcond(Condition cond, int16_t offset, Register rs,
587 const Operand& rt, Register scratch) {
588 Register r2;
589 if(rt.is_reg()) {
antonm 2010/01/21 13:10:45 nit: a space between if and (
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
590 // We don't want any other register but scratch clobbered.
591 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
592 r2 = rt.rm_;
593 } else {
594 // We don't want any other register but scratch clobbered.
595 ASSERT(!scratch.is(rs));
596 r2 = scratch;
597 li(r2, rt);
598 }
599
600 switch(cond) {
601 case cc_always:
602 b(offset);
603 break;
604 case eq:
605 beq(rs, r2, offset);
606 break;
607 case ne:
608 bne(rs, r2, offset);
609 break;
610
611 // Signed comparison
612 case greater:
613 slt(scratch, r2, rs);
614 bne(scratch, zero_reg, offset);
615 break;
616 case greater_equal:
617 slt(scratch, rs, r2);
618 beq(scratch, zero_reg, offset);
619 break;
620 case less:
621 slt(scratch, rs, r2);
622 bne(scratch, zero_reg, offset);
623 break;
624 case less_equal:
625 slt(scratch, r2, rs);
626 beq(scratch, zero_reg, offset);
627 break;
628
629 // Unsigned comparison.
630 case Ugreater:
631 sltu(scratch, r2, rs);
632 bne(scratch, zero_reg, offset);
633 break;
634 case Ugreater_equal:
635 sltu(scratch, rs, r2);
636 beq(scratch, zero_reg, offset);
637 break;
638 case Uless:
639 sltu(scratch, rs, r2);
640 bne(scratch, zero_reg, offset);
641 break;
642 case Uless_equal:
643 sltu(scratch, r2, rs);
644 beq(scratch, zero_reg, offset);
645 break;
646
647 default:
648 UNREACHABLE();
649 }
650 }
651 void Assembler::bcond(Condition cond, Label* L, Register rs,
652 const Operand& rt, Register scratch) {
653 Register r2;
654 if(rt.is_reg()) {
antonm 2010/01/21 13:10:45 nit: space between if and (
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
655 r2 = rt.rm_;
656 } else {
657 r2 = scratch;
658 li(r2, rt);
659 }
660
661 // We use branch_offset as an argument for the branch instructions to be sure
662 // it is called just before generating the branch instruction, as needed.
663
664 switch(cond) {
665 case cc_always:
666 b(branch_offset(L, false)>>2);
667 break;
668 case eq:
669 beq(rs, r2, branch_offset(L, false)>>2);
670 break;
671 case ne:
672 bne(rs, r2, branch_offset(L, false)>>2);
673 break;
674
675 // Signed comparison
antonm 2010/01/21 13:10:45 nit: -2 spaces
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
676 case greater:
677 slt(scratch, r2, rs);
678 bne(scratch, zero_reg, branch_offset(L, false)>>2);
679 break;
680 case greater_equal:
681 slt(scratch, rs, r2);
682 beq(scratch, zero_reg, branch_offset(L, false)>>2);
683 break;
684 case less:
685 slt(scratch, rs, r2);
686 bne(scratch, zero_reg, branch_offset(L, false)>>2);
687 break;
688 case less_equal:
689 slt(scratch, r2, rs);
690 beq(scratch, zero_reg, branch_offset(L, false)>>2);
691 break;
692
693 // Unsigned comparison.
antonm 2010/01/21 13:10:45 ditto
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
694 case Ugreater:
695 sltu(scratch, r2, rs);
696 bne(scratch, zero_reg, branch_offset(L, false)>>2);
697 break;
698 case Ugreater_equal:
699 sltu(scratch, rs, r2);
700 beq(scratch, zero_reg, branch_offset(L, false)>>2);
701 break;
702 case Uless:
703 sltu(scratch, rs, r2);
704 bne(scratch, zero_reg, branch_offset(L, false)>>2);
705 break;
706 case Uless_equal:
707 sltu(scratch, r2, rs);
708 beq(scratch, zero_reg, branch_offset(L, false)>>2);
709 break;
710
711 default:
712 UNREACHABLE();
713 }
714 }
715
716 // Trashes the at register if no scratch register is provided.
717 // We need to use a bgezal or bltzal, but they can't be used directly with the
718 // slt instructions. We could use sub or add instead but we would miss overflow
719 // cases, so we keep slt and add an intermediate third instruction.
720 void Assembler::blcond(Condition cond, int16_t offset, Register rs,
721 const Operand& rt, Register scratch) {
722 Register r2;
723 if(rt.is_reg()) {
724 r2 = rt.rm_;
725 } else {
726 r2 = scratch;
727 li(r2, rt);
728 }
729
730 switch(cond) {
731 case cc_always:
732 bal(offset);
733 break;
734 case eq:
735 bne(rs, r2, 2);
736 nop();
737 bal(offset);
738 break;
739 case ne:
740 beq(rs, r2, 2);
741 nop();
742 bal(offset);
743 break;
744
745 // Signed comparison
antonm 2010/01/21 13:10:45 ditto
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
746 case greater:
747 slt(scratch, r2, rs);
748 addiu(scratch, scratch, Operand(-1));
749 bgezal(scratch, offset);
750 break;
751 case greater_equal:
752 slt(scratch, rs, r2);
753 addiu(scratch, scratch, Operand(-1));
754 bltzal(scratch, offset);
755 break;
756 case less:
757 slt(scratch, rs, r2);
758 addiu(scratch, scratch, Operand(-1));
759 bgezal(scratch, offset);
760 break;
761 case less_equal:
762 slt(scratch, r2, rs);
763 addiu(scratch, scratch, Operand(-1));
764 bltzal(scratch, offset);
765 break;
766
767 // Unsigned comparison.
antonm 2010/01/21 13:10:45 ditto
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
768 case Ugreater:
769 sltu(scratch, r2, rs);
770 addiu(scratch, scratch, Operand(-1));
771 bgezal(scratch, offset);
772 break;
773 case Ugreater_equal:
774 sltu(scratch, rs, r2);
775 addiu(scratch, scratch, Operand(-1));
776 bltzal(scratch, offset);
777 break;
778 case Uless:
779 sltu(scratch, rs, r2);
780 addiu(scratch, scratch, Operand(-1));
781 bgezal(scratch, offset);
782 break;
783 case Uless_equal:
784 sltu(scratch, r2, rs);
785 addiu(scratch, scratch, Operand(-1));
786 bltzal(scratch, offset);
787 break;
788
789 default:
790 UNREACHABLE();
791 }
792 }
793
794 void Assembler::blcond(Condition cond, Label* L, Register rs,
795 const Operand& rt, Register scratch) {
796 Register r2;
797 if(rt.is_reg()) {
antonm 2010/01/21 13:10:45 if (
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
798 r2 = rt.rm_;
799 } else {
800 r2 = scratch;
801 li(r2, rt);
802 }
803
804 switch(cond) {
805 case cc_always:
806 bal(branch_offset(L, false)>>2);
807 break;
808 case eq:
809 bne(rs, r2, 2);
810 nop();
811 bal(branch_offset(L, false)>>2);
812 break;
813 case ne:
814 beq(rs, r2, 2);
815 nop();
816 bal(branch_offset(L, false)>>2);
817 break;
818
819 // Signed comparison
antonm 2010/01/21 13:10:45 ditto
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
820 case greater:
821 slt(scratch, r2, rs);
822 addiu(scratch, scratch, Operand(-1));
823 bgezal(scratch, branch_offset(L, false)>>2);
824 break;
825 case greater_equal:
826 slt(scratch, rs, r2);
827 addiu(scratch, scratch, Operand(-1));
828 bltzal(scratch, branch_offset(L, false)>>2);
829 break;
830 case less:
831 slt(scratch, rs, r2);
832 addiu(scratch, scratch, Operand(-1));
833 bgezal(scratch, branch_offset(L, false)>>2);
834 break;
835 case less_equal:
836 slt(scratch, r2, rs);
837 addiu(scratch, scratch, Operand(-1));
838 bltzal(scratch, branch_offset(L, false)>>2);
839 break;
840
841 // Unsigned comparison.
antonm 2010/01/21 13:10:45 ditto
842 case Ugreater:
843 sltu(scratch, r2, rs);
844 addiu(scratch, scratch, Operand(-1));
845 bgezal(scratch, branch_offset(L, false)>>2);
846 break;
847 case Ugreater_equal:
848 sltu(scratch, rs, r2);
849 addiu(scratch, scratch, Operand(-1));
850 bltzal(scratch, branch_offset(L, false)>>2);
851 break;
852 case Uless:
853 sltu(scratch, rs, r2);
854 addiu(scratch, scratch, Operand(-1));
855 bgezal(scratch, branch_offset(L, false)>>2);
856 break;
857 case Uless_equal:
858 sltu(scratch, r2, rs);
859 addiu(scratch, scratch, Operand(-1));
860 bltzal(scratch, branch_offset(L, false)>>2);
861 break;
862
863 default:
864 UNREACHABLE();
865 }
866 }
867
868 void Assembler::b(int16_t offset) {
869 beq(zero_reg, zero_reg, offset);
870 }
871
872 void Assembler::bal(int16_t offset) {
873 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
874 bgezal(zero_reg, offset);
875 }
876
877 void Assembler::beq(Register rs, Register rt, int16_t offset) {
878 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
879 instrmod2(BEQ, rs, rt, offset);
880 }
881
882 void Assembler::bgez(Register rs, int16_t offset) {
883 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
884 instrmod2(REGIMM, rs, BGEZ, offset);
885 }
886
887 void Assembler::bgezal(Register rs, int16_t offset) {
888 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
889 instrmod2(REGIMM, rs, BGEZAL, offset);
890 }
891
892 void Assembler::bgtz(Register rs, int16_t offset) {
893 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
894 instrmod2(BGTZ, rs, zero_reg, offset);
895 }
896
897 void Assembler::blez(Register rs, int16_t offset) {
898 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
899 instrmod2(BLEZ, rs, zero_reg, offset);
900 }
901
902 void Assembler::bltz(Register rs, int16_t offset) {
903 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
904 instrmod2(REGIMM, rs, BLTZ, offset);
905 }
906
907 void Assembler::bltzal(Register rs, int16_t offset) {
908 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
909 instrmod2(REGIMM, rs, BLTZAL, offset);
910 }
911
912 //void Assembler::beql(Register rs, Register rt, int16_t offset) {
913 // instrmod2(BEQL, rs, rt, offset);
914 //}
915
916 //void Assembler::blezl(Register rs, int16_t offset) {
917 // ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
918 // instrmod2(BLEZL, rs, zero_reg, offset);
919 //}
920
921 //void Assembler::bgtzl(Register rs, int16_t offset) {
922 // ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
923 // instrmod2(BGTZL, rs, zero_reg, offset);
924 //}
925
926 void Assembler::bne(Register rs, Register rt, int16_t offset) {
927 ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
928 instrmod2(BNE, rs, rt, offset);
929 }
930
931 //void Assembler::bnel(Register rs, Register rt, int16_t offset) {
932 // ASSERT(is_int16(offset)); // We check the offset can be used in a branch.
933 // instrmod2(BNEL, rs, rt, offset);
934 //}
935
936
937 void Assembler::j(const Operand& rt) {
938 ASSERT(!rt.is_reg() && is_uint26(rt.imm32_));
939 if(!MustUse_at(rt.rmode_)) {
antonm 2010/01/21 13:10:45 if (. And below
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
940 emit(J | rt.imm32_);
antonm 2010/01/21 13:10:45 +2 spaces
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
941 } else {
942 li(at, rt);
943 jr(Operand(at));
944 }
945 }
946
947 void Assembler::jr(Register rs) {
948 instrmod1(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
949 }
950 void Assembler::jr(const Operand& rs) {
951 ASSERT(rs.is_reg());
952 jr(rs.rm());
953 }
954
955 void Assembler::jal(const Operand& rt) {
956 ASSERT(!rt.is_reg() && is_uint26(rt.imm32_));
957 if(!MustUse_at(rt.rmode_)) {
958 emit(JAL | rt.imm32_);
959 } else {
960 li(at, rt);
961 jalr(Operand(at));
962 }
963 }
964
965 void Assembler::jalr(Register rs, Register rd) {
966 instrmod1(SPECIAL, rs, zero_reg, rd, 0, JALR);
967 }
968 void Assembler::jalr(const Operand& rs, Register rd) {
969 ASSERT(rs.is_reg());
970 jalr(rs.rm(), rd);
971 }
972
973 void Assembler::jcond(const Operand& target,
974 Condition cond, Register rs, const Operand& rt) {
975 if(target.is_reg()) {
976 if(cond == cc_always) {
977 jr(target);
978 } else {
979 bcond(NegateCondition(cond), 2, rs, rt);
980 nop();
981 jr(target);
982 }
983 } else { // !target.is_reg()
984 if(cond == cc_always) {
985 j(target);
986 } else {
987 if(!MustUse_at(rt.rmode_)) {
988 bcond(NegateCondition(cond), 2, rs, rt);
989 nop();
990 j(target); // will generate only one instruction.
991 } else {
992 bcond(NegateCondition(cond), 4, rs, rt);
993 nop();
994 j(target); // will generate exactly 3 instructions.
995 }
996 }
997 }
998 }
999 void Assembler::jalcond(const Operand& target,
1000 Condition cond, Register rs, const Operand& rt) {
1001 if(target.is_reg()) {
1002 if(cond == cc_always) {
1003 jalr(target);
1004 } else {
1005 bcond(NegateCondition(cond), 2, rs, rt);
1006 nop();
1007 jalr(target);
1008 }
1009 } else { // !target.is_reg()
1010 if(cond == cc_always) {
1011 jal(target);
1012 } else {
1013 if(!MustUse_at(rt.rmode_)) {
1014 bcond(NegateCondition(cond), 2, rs, rt);
1015 nop();
1016 jal(target); // will generate only one instruction.
1017 } else {
1018 bcond(NegateCondition(cond), 4, rs, rt);
1019 nop();
1020 jal(target); // will generate exactly 3 instructions.
1021 }
1022 }
1023 }
1024 }
1025
1026
1027 //-------Data-processing-instructions---------
1028
1029 // Arithmetic
1030
1031 void Assembler::add(Register rd, Register rs, const Operand& rt) {
1032 if (!rt.is_reg()) {
1033 addi(rd, rs, rt);
1034 } else {
1035 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, ADD);
1036 }
1037 }
1038 void Assembler::addi(Register rd, Register rs, const Operand& rt) {
1039 if(is_int16(rt.imm32_)) {
1040 instrmod2(ADDI, rs, rd, (int16_t) rt.imm32_);
1041 } else {
1042 CHECK(!rs.is(at));
1043 li(at, rt);
1044 add(rd, rs, at);
1045 }
1046 }
1047
1048 void Assembler::addu(Register rd, Register rs, const Operand& rt) {
1049 if (!rt.is_reg()) {
1050 addiu(rd, rs, rt);
1051 } else {
1052 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, ADDU);
1053 }
1054 }
1055 void Assembler::addiu(Register rd, Register rs, const Operand& rt) {
1056 if(is_int16(rt.imm32_)) {
1057 instrmod2(ADDIU, rs, rd, (int16_t) rt.imm32_);
1058 } else {
1059 CHECK(!rs.is(at));
1060 li(at, rt);
1061 addu(rd, rs, at);
1062 }
1063 }
1064
1065 void Assembler::sub(Register rd, Register rs, const Operand& rt) {
1066 ASSERT(rt.is_reg());
1067 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, SUB);
1068 }
1069 void Assembler::subu(Register rd, Register rs, const Operand& rt) {
1070 ASSERT(rt.is_reg());
1071 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, SUBU);
1072 }
1073
1074 void Assembler::mult(Register rs, const Operand& rt) {
1075 if(!rt.is_reg()) {
1076 li(at, Operand(rt));
1077 instrmod1(SPECIAL, rs, at, zero_reg, 0, MULT);
1078 } else {
1079 instrmod1(SPECIAL, rs, rt.rm(), zero_reg, 0, MULT);
1080 }
1081 }
1082 void Assembler::multu(Register rs, const Operand& rt) {
1083 if(!rt.is_reg()) {
1084 li(at, Operand(rt));
1085 instrmod1(SPECIAL, rs, at, zero_reg, 0, MULTU);
1086 } else {
1087 instrmod1(SPECIAL, rs, rt.rm(), zero_reg, 0, MULTU);
1088 }
1089 }
1090
1091 void Assembler::div(Register rs, const Operand& rt) {
1092 if(!rt.is_reg()) {
1093 li(at, Operand(rt));
1094 instrmod1(SPECIAL, rs, at, zero_reg, 0, DIV);
1095 } else {
1096 instrmod1(SPECIAL, rs, rt.rm(), zero_reg, 0, DIV);
1097 }
1098 }
1099 void Assembler::divu(Register rs, const Operand& rt) {
1100 if(!rt.is_reg()) {
1101 li(at, Operand(rt));
1102 instrmod1(SPECIAL, rs, at, zero_reg, 0, DIVU);
1103 } else {
1104 instrmod1(SPECIAL, rs, rt.rm(), zero_reg, 0, DIVU);
1105 }
1106 }
1107
1108 void Assembler::mul(Register rd, Register rs, const Operand& rt) {
1109 if(!rt.is_reg()) {
1110 li(at, Operand(rt));
1111 instrmod1(SPECIAL2, rs, at, rd, 0, MUL);
1112 } else {
1113 instrmod1(SPECIAL2, rs, rt.rm(), rd, 0, MUL);
1114 }
1115 }
1116
1117 // Logical
1118
1119 void Assembler::and_(Register rd, Register rs, const Operand& rt) {
1120 if (!rt.is_reg()) {
1121 andi(rd, rs, rt);
1122 } else {
1123 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, AND);
1124 }
1125 }
1126 void Assembler::andi(Register rd, Register rs, const Operand& rt) {
1127 if(is_uint16(rt.imm32_)) {
1128 instrmod2(ANDI, rs, rd, (int16_t) rt.imm32_);
1129 } else {
1130 CHECK(!rs.is(at));
1131 li(at, rt);
1132 and_(rd, rs, Operand(at));
1133 }
1134 }
1135
1136 void Assembler::or_(Register rd, Register rs, const Operand& rt) {
1137 if (!rt.is_reg()) {
1138 ori(rd, rs, rt);
1139 } else {
1140 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, OR);
1141 }
1142 }
1143 void Assembler::ori(Register rt, Register rs, const Operand& j) {
1144 if(is_uint16(j.imm32_)) {
1145 instrmod2(ORI, rs, rt, (int16_t) j.imm32_);
1146 } else {
1147 CHECK(!rs.is(at));
1148 li(at, j);
1149 or_(rt, rs, Operand(at));
1150 }
1151 }
1152
1153 void Assembler::xor_(Register rd, Register rs, const Operand& rt) {
1154 if (!rt.is_reg()) {
1155 xori(rd, rs, rt);
1156 } else {
1157 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, XOR);
1158 }
1159 }
1160 void Assembler::xori(Register rd, Register rs, const Operand& rt) {
1161 if(is_uint16(rt.imm32_)) {
1162 instrmod2(XORI, rs, rd, (int16_t) rt.imm32_);
1163 } else {
1164 CHECK(!rs.is(at));
1165 li(at, rt);
1166 xor_(rd, rs, Operand(at));
1167 }
1168 }
1169
1170 void Assembler::nor(Register rd, Register rs, const Operand& rt) {
1171 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, NOR);
1172 }
1173
1174 // Shifts
1175 void Assembler::sll(Register rd, Register rt, uint16_t sa) {
1176 instrmod1(SPECIAL, zero_reg, rt, rd, sa, SLL);
1177 }
1178 void Assembler::sllv(Register rd, Register rt, Register rs) {
1179 instrmod1(SPECIAL, rs, rt, rd, 0, SLLV);
1180 }
1181
1182 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1183 instrmod1(SPECIAL, zero_reg, rt, rd, sa, SRL);
1184 }
1185 void Assembler::srlv(Register rd, Register rt, Register rs) {
1186 instrmod1(SPECIAL, rs, rt, rd, 0, SRLV);
1187 }
1188
1189 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1190 instrmod1(SPECIAL, zero_reg, rt, rd, sa, SRA);
1191 }
1192 void Assembler::srav(Register rd, Register rt, Register rs) {
1193 instrmod1(SPECIAL, rs, rt, rd, 0, SRAV);
1194 }
1195
1196
1197 //------------Memory-instructions-------------
1198
1199 void Assembler::lb(Register rd, const MemOperand& rs) {
1200 instrmod2(LB, rs.rm(), rd, rs.offset_);
1201 }
1202
1203 void Assembler::lbu(Register rd, const MemOperand& rs) {
1204 instrmod2(LBU, rs.rm(), rd, rs.offset_);
1205 }
1206
1207 void Assembler::lw(Register rd, const MemOperand& rs) {
1208 instrmod2(LW, rs.rm(), rd, rs.offset_);
1209 }
1210
1211 void Assembler::sb(Register rd, const MemOperand& rs) {
1212 instrmod2(SB, rs.rm(), rd, rs.offset_);
1213 }
1214
1215 void Assembler::sw(Register rd, const MemOperand& rs) {
1216 instrmod2(SW, rs.rm(), rd, rs.offset_);
1217 }
1218
1219 void Assembler::lui(Register rd, uint16_t j) {
1220 instrmod2(LUI, zero_reg, rd, j);
1221 }
1222
1223 //-------------Misc-instructions--------------
1224
1225 // Break / Trap instructions
1226 void Assembler::break_(uint32_t code) {
1227 ASSERT( (code & ~0xfffff) == 0);
1228 Instr break_instr = SPECIAL | BREAK | (code<<6);
1229 emit(break_instr);
1230 }
1231
1232 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1233 ASSERT(is_uint10(code));
1234 Instr instr = SPECIAL | TGE | rs.code()<<21 | rt.code()<<16 | code<<6;
1235 emit(instr);
1236 }
1237
1238 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1239 ASSERT(is_uint10(code));
1240 Instr instr = SPECIAL | TGEU | rs.code()<<21 | rt.code()<<16 | code<<6;
1241 emit(instr);
1242 }
1243
1244 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1245 ASSERT(is_uint10(code));
1246 Instr instr = SPECIAL | TLT | rs.code()<<21 | rt.code()<<16 | code<<6;
1247 emit(instr);
1248 }
1249
1250 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1251 ASSERT(is_uint10(code));
1252 Instr instr = SPECIAL | TLTU | rs.code()<<21 | rt.code()<<16 | code<<6;
1253 emit(instr);
1254 }
1255
1256
1257 void Assembler::teq(Register rs, Register rt, uint16_t code) {
1258 ASSERT(is_uint10(code));
1259 Instr instr = SPECIAL | TEQ | rs.code()<<21 | rt.code()<<16 | code<<6;
1260 emit(instr);
1261 }
1262
1263 void Assembler::tne(Register rs, Register rt, uint16_t code) {
1264 ASSERT(is_uint10(code));
1265 Instr instr = SPECIAL | TNE | rs.code()<<21 | rt.code()<<16 | code<<6;
1266 emit(instr);
1267 }
1268
1269 // Move from HI/LO register
1270
1271 void Assembler::mfhi(Register rd) {
1272 instrmod1(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1273 }
1274 void Assembler::mflo(Register rd) {
1275 instrmod1(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1276 }
1277
1278 // Set on less than instructions
1279 void Assembler::slt(Register rd, Register rs, const Operand& rt) {
1280 if (!rt.is_reg()) {
1281 slti(rd, rs, rt);
1282 } else {
1283 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, SLT);
1284 }
1285 }
1286
1287 void Assembler::sltu(Register rd, Register rs, const Operand& rt) {
1288 if (!rt.is_reg()) {
1289 sltiu(rd, rs, rt);
1290 } else {
1291 instrmod1(SPECIAL, rs, rt.rm(), rd, 0, SLTU);
1292 }
1293 }
1294
1295 void Assembler::slti(Register rd, Register rs, const Operand& rt) {
1296 ASSERT(!rt.is_reg());
1297 if(is_uint16(rt.imm32_)) {
1298 instrmod2(SLTI, rs, rd, (int16_t) rt.imm32_);
1299 } else {
1300 CHECK(!rs.is(at));
1301 li(at, rt);
1302 slt(rd, rs, Operand(at));
1303 }
1304 }
1305
1306 void Assembler::sltiu(Register rd, Register rs, const Operand& rt) {
1307 ASSERT(!rt.is_reg());
1308 if(is_uint16(rt.imm32_)) {
1309 instrmod2(SLTIU, rs, rd, (int16_t) rt.imm32_);
1310 } else {
1311 CHECK(!rs.is(at));
1312 li(at, rt);
1313 sltu(rd, rs, Operand(at));
1314 }
1315 }
1316
1317
1318 //--------Coprocessor-instructions----------------
1319
1320 // Load, store, move
antonm 2010/01/21 13:10:45 why additional indenting?
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
1321 void Assembler::lwc1(CRegister fd, const MemOperand& src) {
1322 instrmod2(LWC1, src.rm(), fd, src.offset_ );
1323 }
1324
1325 void Assembler::ldc1(CRegister fd, const MemOperand& src) {
1326 // TODO(MIPS.6)
1327 // MIPS architecture expect doubles to be 8-bytes aligned. However v8 has
1328 // currently everything aligned to 4 bytes.
1329 // The current solution is to use intermediate general purpose registers
1330 // when the value is not 8-byte aligned.
1331
1332 // Load the address int at.
1333 addiu(at, src.rm(), Operand(src.offset_));
1334 andi(at, at, Operand(7));
1335 beq(at, zero_reg, 7);
1336 nop();
1337
1338 // Address is not 8-byte aligned. Load manually.
1339 // mtc1 must be executed first. (see MIPS32 ISA)
1340
1341 // Load first half of the double
1342 lw(at, MemOperand(src.rm(), src.offset_ + kPointerSize));
1343 mtc1(fd, at);
1344 // Load second half of the double
1345 lw(at, MemOperand(src.rm(), src.offset_));
1346 mthc1(fd, at);
1347 b(2);
1348 nop();
1349
1350 // Address is 8-byte aligned.
1351 instrmod2(LDC1, src.rm(), fd, src.offset_ );
1352 }
1353
1354 void Assembler::swc1(CRegister fd, const MemOperand& src) {
1355 instrmod2(SWC1, src.rm(), fd, src.offset_ );
1356 }
1357
1358 void Assembler::sdc1(CRegister fd, const MemOperand& src) {
1359 // TODO(MIPS.6)
1360 // MIPS architecture expect doubles to be 8-bytes aligned. However v8 has
1361 // currently everything aligned to 4 bytes.
1362 // The current solution is to use intermediate general purpose registers
1363 // when the value is not 8-byte aligned.
1364
1365 // Load the address int at.
1366 addiu(at, src.rm(), Operand(src.offset_));
1367 andi(at, at, Operand(7));
1368 beq(at, zero_reg, 7);
1369 //nop(); The following mfhc1 is harmless.
1370
1371 // Address is not 8-byte aligned. Load manually
1372 // Load first half of the double
1373 mfhc1(fd, at);
1374 sw(at, MemOperand(src.rm(), src.offset_));
1375 // Load second half of the double
1376 mfc1(fd, at);
1377 sw(at, MemOperand(src.rm(), src.offset_ + kPointerSize));
1378 b(2);
1379 nop();
1380
1381 // Address is 8-byte aligned.
1382 instrmod2(SDC1, src.rm(), fd, src.offset_ );
1383 }
1384
1385 void Assembler::mtc1(CRegister fs, Register rt) {
1386 instrmod1(COP1, MTC1, rt, fs, f0 );
1387 }
1388
1389 void Assembler::mthc1(CRegister fs, Register rt) {
1390 instrmod1(COP1, MTHC1, rt, fs, f0 );
1391 }
1392
1393 void Assembler::mfc1(CRegister fs, Register rt) {
1394 instrmod1(COP1, MFC1, rt, fs, f0 );
1395 }
1396
1397 void Assembler::mfhc1(CRegister fs, Register rt) {
1398 instrmod1(COP1, MFHC1, rt, fs, f0 );
1399 }
1400
1401 // Conversions
1402
1403 void Assembler::cvt_w_s(CRegister fd, CRegister fs) {
1404 instrmod1(COP1, S, f0, fs, fd, CVT_W_S);
1405 }
1406
1407 void Assembler::cvt_w_d(CRegister fd, CRegister fs) {
1408 instrmod1(COP1, D, f0, fs, fd, CVT_W_D);
1409 }
1410
1411 void Assembler::cvt_l_s(CRegister fd, CRegister fs) {
1412 instrmod1(COP1, S, f0, fs, fd, CVT_L_S);
1413 }
1414
1415 void Assembler::cvt_l_d(CRegister fd, CRegister fs) {
1416 instrmod1(COP1, D, f0, fs, fd, CVT_L_D);
1417 }
1418
1419 void Assembler::cvt_s_w(CRegister fd, CRegister fs) {
1420 instrmod1(COP1, W, f0, fs, fd, CVT_S_W);
1421 }
1422
1423 void Assembler::cvt_s_l(CRegister fd, CRegister fs) {
1424 instrmod1(COP1, L, f0, fs, fd, CVT_S_L);
1425 }
1426
1427 void Assembler::cvt_s_d(CRegister fd, CRegister fs) {
1428 instrmod1(COP1, D, f0, fs, fd, CVT_S_D);
1429 }
1430
1431 void Assembler::cvt_d_w(CRegister fd, CRegister fs) {
1432 instrmod1(COP1, W, f0, fs, fd, CVT_D_W);
1433 }
1434
1435 void Assembler::cvt_d_l(CRegister fd, CRegister fs) {
1436 instrmod1(COP1, L, f0, fs, fd, CVT_D_L);
1437 }
1438
1439 void Assembler::cvt_d_s(CRegister fd, CRegister fs) {
1440 instrmod1(COP1, S, f0, fs, fd, CVT_D_S);
1441 }
1442
1443 // Conditions
1444 void Assembler::c(C_Condition cond, SecondaryField fmt,
1445 CRegister ft, CRegister fs, uint16_t cc) {
1446 ASSERT(is_uint3(cc));
1447 ASSERT((fmt & ~(31<<21)) == 0);
1448 Instr instr = COP1 | fmt | ft.code()<<16 | fs.code()<<11 | cc<<8 | 3<<4 | co nd;
1449 emit(instr);
1450 }
1451
1452 void Assembler::bc1f(int16_t offset, uint16_t cc) {
1453 ASSERT(is_uint3(cc));
1454 Instr instr = COP1 | BC1 | cc<<18 | 0<<16 | (offset & Imm16Mask);
1455 emit(instr);
1456 }
1457
1458 void Assembler::bc1t(int16_t offset, uint16_t cc) {
1459 ASSERT(is_uint3(cc));
1460 Instr instr = COP1 | BC1 | cc<<18 | 1<<16 | (offset & Imm16Mask);
1461 emit(instr);
1462 }
1463
1464 //------------Pseudo-instructions-------------
1465
1466 void Assembler::movn(Register rd, Register rt) {
1467 addiu(at, zero_reg, Operand(-1)); // Fill at with ones.
1468 xor_(rd, rt, Operand(at));
1469 }
1470
1471 // load wartd in a register
1472 void Assembler::li(Register rd, Operand j, bool gen2instr) {
1473 ASSERT(!j.is_reg());
1474
1475
1476 if(!MustUse_at(j.rmode_) && !gen2instr) {
1477 // Normal load of an immediate value which does not need Relocation Info.
1478 if(is_int16(j.imm32_))
1479 addiu(rd, zero_reg, j);
1480 else if(!(j.imm32_ & HIMask))
1481 ori(rd, zero_reg, j);
1482 else if(!(j.imm32_ & LOMask))
1483 lui(rd, (HIMask & j.imm32_)>>16);
1484 else {
1485 lui(rd, (HIMask & j.imm32_)>>16);
1486 ori(rd, rd, (LOMask & j.imm32_));
1487 }
1488 } else if ( MustUse_at(j.rmode_) ) {
1489 // We need Relocation Information here.
1490 RecordRelocInfo(j.rmode_, j.imm32_);
1491 // We need always the same number of instructions as we may need to patch
1492 // this code to load another value which may need 2 instructions to load.
1493 if(is_int16(j.imm32_)){
1494 nop();
1495 addiu(rd, zero_reg, j);
1496 } else if(!(j.imm32_ & HIMask)) {
1497 nop();
1498 ori(rd, zero_reg, j);
1499 } else if(!(j.imm32_ & LOMask)) {
1500 nop();
1501 lui(rd, (HIMask & j.imm32_)>>16);
1502 } else {
1503 lui(rd, (HIMask & j.imm32_)>>16);
1504 ori(rd, rd, (LOMask & j.imm32_));
1505 }
1506 } else if ( gen2instr ) {
1507 // We need always the same number of instructions as we may need to patch
1508 // this code to load another value which may need 2 instructions to load.
1509 if(is_int16(j.imm32_)){
1510 nop();
1511 addiu(rd, zero_reg, j);
1512 } else if(!(j.imm32_ & HIMask)) {
1513 nop();
1514 ori(rd, zero_reg, j);
1515 } else if(!(j.imm32_ & LOMask)) {
1516 nop();
1517 lui(rd, (HIMask & j.imm32_)>>16);
1518 } else {
1519 lui(rd, (HIMask & j.imm32_)>>16);
1520 ori(rd, rd, (LOMask & j.imm32_));
1521 }
1522 }
1523 }
1524
1525
1526 void Assembler::multi_push(RegList regs) {
1527 int16_t NumSaved = 0;
1528 int16_t NumToPush = NumBitsSet(regs);
1529
1530 addiu(sp, sp, Operand(-4*NumToPush));
1531 for (int16_t i = 0; i< kNumRegisters; i++) {
1532 if((regs & (1<<i)) != 0 ) {
1533 sw(ToRegister(i),
1534 MemOperand(sp, 4*(NumToPush - ++NumSaved)));
1535 }
1536 }
1537 }
1538 void Assembler::multi_push_reversed(RegList regs) {
1539 int16_t NumSaved = 0;
1540 int16_t NumToPush = NumBitsSet(regs);
1541
1542 addiu(sp, sp, Operand(-4*NumToPush));
1543 for (int16_t i = kNumRegisters; --i>=0;) {
1544 if((regs & (1<<i)) != 0 ) {
1545 sw(ToRegister(i),
1546 MemOperand(sp, 4*(NumToPush - ++NumSaved)));
1547 }
1548 }
1549 }
1550
1551 void Assembler::multi_pop(RegList regs) {
1552 int16_t NumSaved = 0;
1553
1554 for (int16_t i = kNumRegisters; --i>=0;) {
1555 if((regs & (1<<i)) != 0 ) {
1556 lw(ToRegister(i), MemOperand(sp, 4*(NumSaved++)));
1557 }
1558 }
1559 addiu(sp, sp, Operand(4*NumSaved));
1560 }
1561 void Assembler::multi_pop_reversed(RegList regs) {
1562 int16_t NumSaved = 0;
1563
1564 for (int16_t i = 0; i< kNumRegisters; i++) {
1565 if((regs & (1<<i)) != 0 ) {
1566 lw(ToRegister(i), MemOperand(sp, 4*(NumSaved++)));
1567 }
1568 }
1569 addiu(sp, sp, Operand(4*NumSaved));
1570 }
1571
1572 // Exception-generating instructions and debugging support
1573 void Assembler::stop(const char* msg) {
1574 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
1575 // We use the 0x54321 value to be able to find it easily when reading memory.
1576 break_(0x54321);
1577 }
1578
1579
1580
1581
1582 // Debugging
1583 void Assembler::RecordJSReturn() {
1584 WriteRecordedPositions();
1585 CheckBuffer();
1586 RecordRelocInfo(RelocInfo::JS_RETURN);
1587 }
1588
1589
1590 void Assembler::RecordComment(const char* msg) {
1591 if (FLAG_debug_code) {
1592 CheckBuffer();
1593 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1594 }
1595 }
1596
1597
1598 void Assembler::RecordPosition(int pos) {
1599 if (pos == RelocInfo::kNoPosition) return;
1600 ASSERT(pos >= 0);
1601 current_position_ = pos;
1602 }
1603
1604
1605 void Assembler::RecordStatementPosition(int pos) {
1606 if (pos == RelocInfo::kNoPosition) return;
1607 ASSERT(pos >= 0);
1608 current_statement_position_ = pos;
1609 }
1610
1611
1612 void Assembler::WriteRecordedPositions() {
1613 // Write the statement position if it is different from what was written last
1614 // time.
1615 if (current_statement_position_ != written_statement_position_) {
1616 CheckBuffer();
1617 RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1618 written_statement_position_ = current_statement_position_;
1619 }
1620
1621 // Write the position if it is different from what was written last time and
1622 // also different from the written statement position.
1623 if (current_position_ != written_position_ &&
1624 current_position_ != written_statement_position_) {
1625 CheckBuffer();
1626 RecordRelocInfo(RelocInfo::POSITION, current_position_);
1627 written_position_ = current_position_;
1628 }
1629 }
1630
1631
1632 void Assembler::GrowBuffer() {
1633 if (!own_buffer_) FATAL("external code buffer is too small");
1634
1635 // compute new buffer size
1636 CodeDesc desc; // the new buffer
1637 if (buffer_size_ < 4*KB) {
1638 desc.buffer_size = 4*KB;
1639 } else if (buffer_size_ < 1*MB) {
1640 desc.buffer_size = 2*buffer_size_;
1641 } else {
1642 desc.buffer_size = buffer_size_ + 1*MB;
1643 }
1644 CHECK_GT(desc.buffer_size, 0); // no overflow
1645
1646 // setup new buffer
1647 desc.buffer = NewArray<byte>(desc.buffer_size);
1648
1649 desc.instr_size = pc_offset();
1650 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1651
1652 // copy the data
1653 int pc_delta = desc.buffer - buffer_;
1654 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1655 memmove(desc.buffer, buffer_, desc.instr_size);
1656 memmove(reloc_info_writer.pos() + rc_delta,
1657 reloc_info_writer.pos(), desc.reloc_size);
1658
1659 // switch buffers
1660 DeleteArray(buffer_);
1661 buffer_ = desc.buffer;
1662 buffer_size_ = desc.buffer_size;
1663 pc_ += pc_delta;
1664 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1665 reloc_info_writer.last_pc() + pc_delta);
1666
1667
1668 // On ia32 or ARM pc relative addressing is used, and we thus need to apply a
1669 // shift by pc_delta. But on MIPS the target address it directly loaded, so
1670 // we do not need to relocate here.
1671
1672 ASSERT(!overflow());
1673 }
1674
1675
1676 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1677 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
1678 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
1679 // Adjust code for new modes
1680 ASSERT(RelocInfo::IsJSReturn(rmode)
1681 || RelocInfo::IsComment(rmode)
1682 || RelocInfo::IsPosition(rmode));
1683 // these modes do not need an entry in the constant pool
1684 }
1685 if (rinfo.rmode() != RelocInfo::NONE) {
1686 // Don't record external references unless the heap will be serialized.
1687 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1688 !Serializer::enabled() &&
1689 !FLAG_debug_code) {
1690 return;
1691 }
1692 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
1693 reloc_info_writer.Write(&rinfo);
1694 }
1695 }
1696
1697
1698 Address Assembler::target_address_at(Address pc) {
1699 // return Memory::Address_at(target_address_address_at(pc));
antonm 2010/01/21 13:10:45 remove?
Alexandre 2010/01/22 23:08:42 Removed. On 2010/01/21 13:10:45, antonm wrote:
1700 Instr instr1 = instr_at(pc);
1701 Instr instr2 = instr_at(pc + kInstrSize);
1702 // Check we have 2 instructions geneerated by li.
1703 ASSERT( ((instr1 & OpcodeMask)==LUI && (instr2 & OpcodeMask)==ORI) ||
1704 ((instr1==0) && ((instr2 & OpcodeMask)== ADDI ||
1705 (instr2 & OpcodeMask)== ORI ||
1706 (instr2 & OpcodeMask)== LUI ))
1707 );
1708 // Interpret these 2 instructions.
1709 if(instr1==0) {
1710 if((instr2 & OpcodeMask)== ADDI) {
1711 return (Address)(((instr2 & Imm16Mask)<<16)>>16);
1712 } else if ((instr2 & OpcodeMask)== ORI) {
1713 return (Address)(instr2 & Imm16Mask);
1714 } else if ((instr2 & OpcodeMask)== LUI) {
1715 return(Address)((instr2 & Imm16Mask)<<16);
1716 }
1717 } else if((instr1 & OpcodeMask)==LUI && (instr2 & OpcodeMask)==ORI) {
1718 // 32 bits value.
1719 return (Address)((instr1 & Imm16Mask)<<16 | (instr2 & Imm16Mask));
1720 }
1721
1722 // We should never get here.
antonm 2010/01/21 13:10:45 UNREACHABLE?
Alexandre 2010/01/22 23:08:42 Added UNREACHABLE() macro. On 2010/01/21 13:10:45,
1723 return (Address)0x0;
1724
antonm 2010/01/21 13:10:45 remove empty line?
Alexandre 2010/01/22 23:08:42 Style issue fixed. On 2010/01/21 13:10:45, antonm
1725 }
1726
1727
1728 void Assembler::set_target_address_at(Address pc, Address target) {
1729 // Memory::Address_at(target_address_address_at(pc)) = target;
1730 // On MIPS we need to patch the code to generate.
1731
1732 // First check we have a li
1733 // We use #define because using Instr would fail when building the release as
1734 // instr1 would be unused.
1735 // Instr instr1 = instr_at(pc);
1736 // Instr instr2 = instr_at(pc + kInstrSize);
1737 #define instr1 instr_at(pc)
1738 #define instr2 instr_at(pc + kInstrSize)
1739
1740 // Check we have indeed the result from a li with MustUse_at true.
1741 ASSERT( ((instr1 & OpcodeMask)==LUI && (instr2 & OpcodeMask)==ORI) ||
1742 ((instr1==0) && ((instr2 & OpcodeMask)== ADDIU ||
1743 (instr2 & OpcodeMask)== ORI ||
1744 (instr2 & OpcodeMask)== LUI ))
1745 );
1746
1747
1748 //______________________________________________________________________________
1749 uint32_t rd_code = (instr2 & (31<<16));
1750 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
1751 uint32_t itarget = (uint32_t)(target);
1752
1753 if(is_int16(itarget)){
1754 // nop();
1755 // addiu(rd, zero_reg, j);
1756 *p = 0x0;
1757 *(p+1) = ADDIU | rd_code | (itarget&LOMask);
1758 } else if(!(itarget & HIMask)) {
1759 // nop();
1760 // ori(rd, zero_reg, j);
1761 *p = 0x0;
1762 *(p+1) = ORI | rd_code | (itarget&LOMask);
1763 } else if(!(itarget & LOMask)) {
1764 // nop();
1765 // lui(rd, (HIMask & itarget)>>16);
1766 *p = 0x0;
1767 *(p+1) = LUI | rd_code | ((itarget&HIMask)>>16);
1768 } else {
1769 // lui(rd, (HIMask & itarget)>>16);
1770 // ori(rd, rd, (LOMask & itarget));
1771 *p = LUI | rd_code | ((itarget&HIMask)>>16);
1772 *(p+1) = ORI | rd_code | (rd_code<<5) | (itarget&LOMask);
1773 }
1774 //______________________________________________________________________________
1775 // Using this leads to an error in Object** HandleScope::Extend().
1776 // Disabling it for now.
1777 // static const int kLoadCodeSize = 2;
1778 //
1779 // // Get the target register code.
1780 // // Currently all possible 2nd instructions encode the destination register a t
1781 // // bit 16. (ori, addiu, lui)
1782 // uint16_t rd_code = (instr2 & 31<<16)>>16;
1783 //
1784 // // Create a code patcher
1785 // CodePatcher patcher(pc, kLoadCodeSize);
1786 //
1787 // // Add a label for checking the size of the code used for returning.
1788 ////#ifdef DEBUG
1789 //// Label check_codesize;
1790 //// patcher.masm()->bind(&check_codesize);
1791 ////#endif
1792 //
1793 // // Patch the code.
1794 // patcher.masm()->li(RegisterAllocator::ToRegister(rd_code),
1795 // Operand((uint32_t)target), true);
1796 //
1797 // // Check that the size of the code generated is as expected.
1798 // // The code seems to be patched correctly but the ASSERT fails.
1799 // // TODO: Have the ASSERT work or correct it.
1800 //// ASSERT_EQ(kLoadCodeSize,
1801 //// patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
1802 //______________________________________________________________________________
1803
1804 CPU::FlushICache(pc, 2* sizeof(int32_t));
1805 }
1806
1807
1808 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698