Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(106)

Side by Side Diff: src/mips/macro-assembler-mips.cc

Issue 1320006: Updates and fixes for MIPS support. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 28 matching lines...) Expand all
39 39
40 MacroAssembler::MacroAssembler(void* buffer, int size) 40 MacroAssembler::MacroAssembler(void* buffer, int size)
41 : Assembler(buffer, size), 41 : Assembler(buffer, size),
42 unresolved_(0), 42 unresolved_(0),
43 generating_stub_(false), 43 generating_stub_(false),
44 allow_stub_calls_(true), 44 allow_stub_calls_(true),
45 code_object_(Heap::undefined_value()) { 45 code_object_(Heap::undefined_value()) {
46 } 46 }
47 47
48 48
49 // Arguments macros
50 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
51 #define COND_ARGS cond, r1, r2
49 52
50 void MacroAssembler::Jump(Register target, Condition cond, 53 #define REGISTER_TARGET_BODY(Name) \
51 Register r1, const Operand& r2) { 54 void MacroAssembler::Name(Register target, \
52 Jump(Operand(target), cond, r1, r2); 55 bool ProtectBranchDelaySlot) { \
Søren Thygesen Gjesse 2010/05/25 09:00:56 ProtectBranchDelaySlot -> protect_branch_delay_slo
56 Name(Operand(target), ProtectBranchDelaySlot); \
57 } \
58 void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
59 bool ProtectBranchDelaySlot) { \
60 Name(Operand(target), COND_ARGS, ProtectBranchDelaySlot); \
61 }
62
63 #define INT_PTR_TARGET_BODY(Name) \
64 void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
65 bool ProtectBranchDelaySlot) { \
66 Name(Operand(target, rmode), ProtectBranchDelaySlot); \
67 } \
68 void MacroAssembler::Name(intptr_t target, \
69 RelocInfo::Mode rmode, \
70 COND_TYPED_ARGS, \
71 bool ProtectBranchDelaySlot) { \
72 Name(Operand(target, rmode), COND_ARGS, ProtectBranchDelaySlot); \
73 }
74
75 #define BYTE_PTR_TARGET_BODY(Name) \
76 void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
77 bool ProtectBranchDelaySlot) { \
78 Name(reinterpret_cast<intptr_t>(target), rmode, ProtectBranchDelaySlot); \
79 } \
80 void MacroAssembler::Name(byte* target, \
81 RelocInfo::Mode rmode, \
82 COND_TYPED_ARGS, \
83 bool ProtectBranchDelaySlot) { \
84 Name(reinterpret_cast<intptr_t>(target), \
85 rmode, \
86 COND_ARGS, \
87 ProtectBranchDelaySlot); \
88 }
89
90 #define CODE_TARGET_BODY(Name) \
91 void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
92 bool ProtectBranchDelaySlot) { \
93 Name(reinterpret_cast<intptr_t>(target.location()), \
94 rmode, ProtectBranchDelaySlot); \
95 } \
96 void MacroAssembler::Name(Handle<Code> target, \
97 RelocInfo::Mode rmode, \
98 COND_TYPED_ARGS, \
99 bool ProtectBranchDelaySlot) { \
100 Name(reinterpret_cast<intptr_t>(target.location()), \
101 rmode, \
102 COND_ARGS, \
103 ProtectBranchDelaySlot); \
104 }
105
106 REGISTER_TARGET_BODY(Jump)
107 REGISTER_TARGET_BODY(Call)
108 INT_PTR_TARGET_BODY(Jump)
109 INT_PTR_TARGET_BODY(Call)
110 BYTE_PTR_TARGET_BODY(Jump)
111 BYTE_PTR_TARGET_BODY(Call)
112 CODE_TARGET_BODY(Jump)
113 CODE_TARGET_BODY(Call)
114
115 #undef COND_TYPED_ARGS
116 #undef COND_ARGS
117 #undef REGISTER_TARGET_BODY
118 #undef BYTE_PTR_TARGET_BODY
119 #undef CODE_TARGET_BODY
120
121
122 void MacroAssembler::Ret(bool ProtectBranchDelaySlot) {
123 Jump(Operand(ra), ProtectBranchDelaySlot);
53 } 124 }
54 125
55 126
56 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, 127 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
57 Condition cond, Register r1, const Operand& r2) { 128 bool ProtectBranchDelaySlot) {
58 Jump(Operand(target, rmode), cond, r1, r2); 129 Jump(Operand(ra), cond, r1, r2, ProtectBranchDelaySlot);
59 }
60
61
62 void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
63 Condition cond, Register r1, const Operand& r2) {
64 ASSERT(!RelocInfo::IsCodeTarget(rmode));
65 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
66 }
67
68
69 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
70 Condition cond, Register r1, const Operand& r2) {
71 ASSERT(RelocInfo::IsCodeTarget(rmode));
72 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
73 }
74
75
76 void MacroAssembler::Call(Register target,
77 Condition cond, Register r1, const Operand& r2) {
78 Call(Operand(target), cond, r1, r2);
79 }
80
81
82 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
83 Condition cond, Register r1, const Operand& r2) {
84 Call(Operand(target, rmode), cond, r1, r2);
85 }
86
87
88 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
89 Condition cond, Register r1, const Operand& r2) {
90 ASSERT(!RelocInfo::IsCodeTarget(rmode));
91 Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
92 }
93
94
95 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
96 Condition cond, Register r1, const Operand& r2) {
97 ASSERT(RelocInfo::IsCodeTarget(rmode));
98 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
99 }
100
101
102 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
103 Jump(Operand(ra), cond, r1, r2);
104 } 130 }
105 131
106 132
107 void MacroAssembler::LoadRoot(Register destination, 133 void MacroAssembler::LoadRoot(Register destination,
108 Heap::RootListIndex index) { 134 Heap::RootListIndex index) {
109 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 135 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
110 } 136 }
111 137
112 void MacroAssembler::LoadRoot(Register destination, 138 void MacroAssembler::LoadRoot(Register destination,
113 Heap::RootListIndex index, 139 Heap::RootListIndex index,
114 Condition cond, 140 Condition cond,
115 Register src1, const Operand& src2) { 141 Register src1, const Operand& src2) {
116 Branch(NegateCondition(cond), 2, src1, src2); 142 Branch(2, NegateCondition(cond), src1, src2);
Søren Thygesen Gjesse 2010/05/25 09:00:56 Assert that lw generates 2 instructions? Or use a
117 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 143 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
118 } 144 }
119 145
120 146
121 void MacroAssembler::RecordWrite(Register object, Register offset, 147 void MacroAssembler::RecordWrite(Register object,
148 Register offset,
122 Register scratch) { 149 Register scratch) {
123 UNIMPLEMENTED_MIPS(); 150 UNIMPLEMENTED_MIPS();
124 } 151 }
125 152
126 153
127 // --------------------------------------------------------------------------- 154 // ---------------------------------------------------------------------------
128 // Instruction macros 155 // Instruction macros
129 156
130 void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
131 if (rt.is_reg()) {
132 add(rd, rs, rt.rm());
133 } else {
134 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
135 addi(rd, rs, rt.imm32_);
136 } else {
137 // li handles the relocation.
138 ASSERT(!rs.is(at));
139 li(at, rt);
140 add(rd, rs, at);
141 }
142 }
143 }
144
145
146 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { 157 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
147 if (rt.is_reg()) { 158 if (rt.is_reg()) {
148 addu(rd, rs, rt.rm()); 159 addu(rd, rs, rt.rm());
149 } else { 160 } else {
150 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 161 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
151 addiu(rd, rs, rt.imm32_); 162 addiu(rd, rs, rt.imm32_);
152 } else { 163 } else {
153 // li handles the relocation. 164 // li handles the relocation.
154 ASSERT(!rs.is(at)); 165 ASSERT(!rs.is(at));
155 li(at, rt); 166 li(at, rt);
156 addu(rd, rs, at); 167 addu(rd, rs, at);
157 } 168 }
158 } 169 }
159 } 170 }
171
172
173 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
174 if (rt.is_reg()) {
175 subu(rd, rs, rt.rm());
176 } else {
177 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
178 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
179 } else {
180 // li handles the relocation.
181 ASSERT(!rs.is(at));
182 li(at, rt);
183 subu(rd, rs, at);
184 }
185 }
186 }
160 187
161 188
162 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { 189 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
163 if (rt.is_reg()) { 190 if (rt.is_reg()) {
164 mul(rd, rs, rt.rm()); 191 mul(rd, rs, rt.rm());
165 } else { 192 } else {
166 // li handles the relocation. 193 // li handles the relocation.
167 ASSERT(!rs.is(at)); 194 ASSERT(!rs.is(at));
168 li(at, rt); 195 li(at, rt);
169 mul(rd, rs, at); 196 mul(rd, rs, at);
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
216 li(at, rt); 243 li(at, rt);
217 divu(rs, at); 244 divu(rs, at);
218 } 245 }
219 } 246 }
220 247
221 248
222 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { 249 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
223 if (rt.is_reg()) { 250 if (rt.is_reg()) {
224 and_(rd, rs, rt.rm()); 251 and_(rd, rs, rt.rm());
225 } else { 252 } else {
226 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 253 if (is_uint16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
227 andi(rd, rs, rt.imm32_); 254 andi(rd, rs, rt.imm32_);
228 } else { 255 } else {
229 // li handles the relocation. 256 // li handles the relocation.
230 ASSERT(!rs.is(at)); 257 ASSERT(!rs.is(at));
231 li(at, rt); 258 li(at, rt);
232 and_(rd, rs, at); 259 and_(rd, rs, at);
233 } 260 }
234 } 261 }
235 } 262 }
236 263
237 264
238 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { 265 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
239 if (rt.is_reg()) { 266 if (rt.is_reg()) {
240 or_(rd, rs, rt.rm()); 267 or_(rd, rs, rt.rm());
241 } else { 268 } else {
242 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 269 if (is_uint16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
243 ori(rd, rs, rt.imm32_); 270 ori(rd, rs, rt.imm32_);
244 } else { 271 } else {
245 // li handles the relocation. 272 // li handles the relocation.
246 ASSERT(!rs.is(at)); 273 ASSERT(!rs.is(at));
247 li(at, rt); 274 li(at, rt);
248 or_(rd, rs, at); 275 or_(rd, rs, at);
249 } 276 }
250 } 277 }
251 } 278 }
252 279
253 280
254 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { 281 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
255 if (rt.is_reg()) { 282 if (rt.is_reg()) {
256 xor_(rd, rs, rt.rm()); 283 xor_(rd, rs, rt.rm());
257 } else { 284 } else {
258 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 285 if (is_uint16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
259 xori(rd, rs, rt.imm32_); 286 xori(rd, rs, rt.imm32_);
260 } else { 287 } else {
261 // li handles the relocation. 288 // li handles the relocation.
262 ASSERT(!rs.is(at)); 289 ASSERT(!rs.is(at));
263 li(at, rt); 290 li(at, rt);
264 xor_(rd, rs, at); 291 xor_(rd, rs, at);
265 } 292 }
266 } 293 }
267 } 294 }
268 295
(...skipping 23 matching lines...) Expand all
292 slt(rd, rs, at); 319 slt(rd, rs, at);
293 } 320 }
294 } 321 }
295 } 322 }
296 323
297 324
298 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { 325 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
299 if (rt.is_reg()) { 326 if (rt.is_reg()) {
300 sltu(rd, rs, rt.rm()); 327 sltu(rd, rs, rt.rm());
301 } else { 328 } else {
302 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 329 if (is_uint16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
303 sltiu(rd, rs, rt.imm32_); 330 sltiu(rd, rs, rt.imm32_);
304 } else { 331 } else {
305 // li handles the relocation. 332 // li handles the relocation.
306 ASSERT(!rs.is(at)); 333 ASSERT(!rs.is(at));
307 li(at, rt); 334 li(at, rt);
308 sltu(rd, rs, at); 335 sltu(rd, rs, at);
309 } 336 }
310 } 337 }
311 } 338 }
312 339
313 340
314 //------------Pseudo-instructions------------- 341 //------------Pseudo-instructions-------------
315 342
316 void MacroAssembler::movn(Register rd, Register rt) {
317 addiu(at, zero_reg, -1); // Fill at with ones.
318 xor_(rd, rt, at);
319 }
320
321
322 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { 343 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
323 ASSERT(!j.is_reg()); 344 ASSERT(!j.is_reg());
324 345
325 if (!MustUseAt(j.rmode_) && !gen2instr) { 346 if (!MustUseAt(j.rmode_) && !gen2instr) {
326 // Normal load of an immediate value which does not need Relocation Info. 347 // Normal load of an immediate value which does not need Relocation Info.
327 if (is_int16(j.imm32_)) { 348 if (is_int16(j.imm32_)) {
328 addiu(rd, zero_reg, j.imm32_); 349 addiu(rd, zero_reg, j.imm32_);
329 } else if (!(j.imm32_ & HIMask)) { 350 } else if (!(j.imm32_ & HIMask)) {
330 ori(rd, zero_reg, j.imm32_); 351 ori(rd, zero_reg, j.imm32_);
331 } else if (!(j.imm32_ & LOMask)) { 352 } else if (!(j.imm32_ & LOMask)) {
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
409 for (int16_t i = kNumRegisters; i > 0; i--) { 430 for (int16_t i = kNumRegisters; i > 0; i--) {
410 if ((regs & (1 << i)) != 0) { 431 if ((regs & (1 << i)) != 0) {
411 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); 432 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
412 } 433 }
413 } 434 }
414 addiu(sp, sp, 4 * NumSaved); 435 addiu(sp, sp, 4 * NumSaved);
415 } 436 }
416 437
417 438
418 // Emulated condtional branches do not emit a nop in the branch delay slot. 439 // Emulated condtional branches do not emit a nop in the branch delay slot.
440 //
441 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
442 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
443 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
444 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
419 445
420 // Trashes the at register if no scratch register is provided. 446 void MacroAssembler::Branch(int16_t offset,
421 void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs, 447 bool ProtectBranchDelaySlot) {
422 const Operand& rt, Register scratch) { 448 b(offset);
449
450 // Emit a nop in the branch delay slot if required.
451 if (ProtectBranchDelaySlot)
452 nop();
453 }
454
455
456 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
457 const Operand& rt,
458 bool ProtectBranchDelaySlot) {
459 BRANCH_ARGS_CHECK(cond, rs, rt);
460 ASSERT(!rs.is(zero_reg));
423 Register r2 = no_reg; 461 Register r2 = no_reg;
462 Register scratch = at;
424 if (rt.is_reg()) { 463 if (rt.is_reg()) {
425 // We don't want any other register but scratch clobbered. 464 // We don't want any other register but scratch clobbered.
426 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_)); 465 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
427 r2 = rt.rm_; 466 r2 = rt.rm_;
428 } else if (cond != cc_always) { 467 } else if (cond != cc_always) {
429 // We don't want any other register but scratch clobbered. 468 // We don't want any other register but scratch clobbered.
430 ASSERT(!scratch.is(rs)); 469 ASSERT(!scratch.is(rs));
431 r2 = scratch; 470 r2 = scratch;
432 li(r2, rt); 471 li(r2, rt);
433 } 472 }
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
475 bne(scratch, zero_reg, offset); 514 bne(scratch, zero_reg, offset);
476 break; 515 break;
477 case Uless_equal: 516 case Uless_equal:
478 sltu(scratch, r2, rs); 517 sltu(scratch, r2, rs);
479 beq(scratch, zero_reg, offset); 518 beq(scratch, zero_reg, offset);
480 break; 519 break;
481 520
482 default: 521 default:
483 UNREACHABLE(); 522 UNREACHABLE();
484 } 523 }
485 // Emit a nop in the branch delay slot. 524 // Emit a nop in the branch delay slot if required.
486 nop(); 525 if (ProtectBranchDelaySlot)
Søren Thygesen Gjesse 2010/05/25 09:00:56 Either use {}'s or place the nop() on the same lin
526 nop();
487 } 527 }
488 528
489 529
490 void MacroAssembler::Branch(Condition cond, Label* L, Register rs, 530 void MacroAssembler::Branch(Label* L,
491 const Operand& rt, Register scratch) { 531 bool ProtectBranchDelaySlot) {
532 // We use branch_offset as an argument for the branch instructions to be sure
533 // it is called just before generating the branch instruction, as needed.
534
535 b(shifted_branch_offset(L, false));
536
537 // Emit a nop in the branch delay slot if required.
538 if (ProtectBranchDelaySlot)
539 nop();
540 }
541
542
543 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
544 const Operand& rt,
545 bool ProtectBranchDelaySlot) {
546 BRANCH_ARGS_CHECK(cond, rs, rt);
547
548 int32_t offset;
492 Register r2 = no_reg; 549 Register r2 = no_reg;
550 Register scratch = at;
493 if (rt.is_reg()) { 551 if (rt.is_reg()) {
494 r2 = rt.rm_; 552 r2 = rt.rm_;
495 } else if (cond != cc_always) { 553 } else if (cond != cc_always) {
496 r2 = scratch; 554 r2 = scratch;
497 li(r2, rt); 555 li(r2, rt);
498 } 556 }
499 557
500 // We use branch_offset as an argument for the branch instructions to be sure 558 // Be careful to always use shifted_branch_offset only just before the branch
501 // it is called just before generating the branch instruction, as needed. 559 // instruction, as the location will be remember for patching the target.
Søren Thygesen Gjesse 2010/05/25 09:00:56 remember -> remembered
502 560
503 switch (cond) { 561 switch (cond) {
504 case cc_always: 562 case cc_always:
505 b(shifted_branch_offset(L, false)); 563 offset = shifted_branch_offset(L, false);
564 b(offset);
506 break; 565 break;
507 case eq: 566 case eq:
508 beq(rs, r2, shifted_branch_offset(L, false)); 567 offset = shifted_branch_offset(L, false);
568 beq(rs, r2, offset);
509 break; 569 break;
510 case ne: 570 case ne:
511 bne(rs, r2, shifted_branch_offset(L, false)); 571 offset = shifted_branch_offset(L, false);
572 bne(rs, r2, offset);
512 break; 573 break;
513 574
514 // Signed comparison 575 // Signed comparison
515 case greater: 576 case greater:
516 slt(scratch, r2, rs); 577 slt(scratch, r2, rs);
517 bne(scratch, zero_reg, shifted_branch_offset(L, false)); 578 offset = shifted_branch_offset(L, false);
579 bne(scratch, zero_reg, offset);
518 break; 580 break;
519 case greater_equal: 581 case greater_equal:
520 slt(scratch, rs, r2); 582 slt(scratch, rs, r2);
521 beq(scratch, zero_reg, shifted_branch_offset(L, false)); 583 offset = shifted_branch_offset(L, false);
584 beq(scratch, zero_reg, offset);
522 break; 585 break;
523 case less: 586 case less:
524 slt(scratch, rs, r2); 587 slt(scratch, rs, r2);
525 bne(scratch, zero_reg, shifted_branch_offset(L, false)); 588 offset = shifted_branch_offset(L, false);
589 bne(scratch, zero_reg, offset);
526 break; 590 break;
527 case less_equal: 591 case less_equal:
528 slt(scratch, r2, rs); 592 slt(scratch, r2, rs);
529 beq(scratch, zero_reg, shifted_branch_offset(L, false)); 593 offset = shifted_branch_offset(L, false);
594 beq(scratch, zero_reg, offset);
530 break; 595 break;
531 596
532 // Unsigned comparison. 597 // Unsigned comparison.
533 case Ugreater: 598 case Ugreater:
534 sltu(scratch, r2, rs); 599 sltu(scratch, r2, rs);
535 bne(scratch, zero_reg, shifted_branch_offset(L, false)); 600 offset = shifted_branch_offset(L, false);
601 bne(scratch, zero_reg, offset);
536 break; 602 break;
537 case Ugreater_equal: 603 case Ugreater_equal:
538 sltu(scratch, rs, r2); 604 sltu(scratch, rs, r2);
539 beq(scratch, zero_reg, shifted_branch_offset(L, false)); 605 offset = shifted_branch_offset(L, false);
606 beq(scratch, zero_reg, offset);
540 break; 607 break;
541 case Uless: 608 case Uless:
542 sltu(scratch, rs, r2); 609 sltu(scratch, rs, r2);
543 bne(scratch, zero_reg, shifted_branch_offset(L, false)); 610 offset = shifted_branch_offset(L, false);
611 bne(scratch, zero_reg, offset);
544 break; 612 break;
545 case Uless_equal: 613 case Uless_equal:
546 sltu(scratch, r2, rs); 614 sltu(scratch, r2, rs);
547 beq(scratch, zero_reg, shifted_branch_offset(L, false)); 615 offset = shifted_branch_offset(L, false);
616 beq(scratch, zero_reg, offset);
548 break; 617 break;
549 618
550 default: 619 default:
551 UNREACHABLE(); 620 UNREACHABLE();
552 } 621 }
553 // Emit a nop in the branch delay slot. 622
554 nop(); 623 // Check that offset could actually hold on an int16_t.
624 ASSERT(is_int16(offset));
625
626 // Emit a nop in the branch delay slot if required.
627 if (ProtectBranchDelaySlot)
628 nop();
555 } 629 }
556 630
557 631
558 // Trashes the at register if no scratch register is provided.
559 // We need to use a bgezal or bltzal, but they can't be used directly with the 632 // We need to use a bgezal or bltzal, but they can't be used directly with the
560 // slt instructions. We could use sub or add instead but we would miss overflow 633 // slt instructions. We could use sub or add instead but we would miss overflow
561 // cases, so we keep slt and add an intermediate third instruction. 634 // cases, so we keep slt and add an intermediate third instruction.
562 void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs, 635 void MacroAssembler::BranchAndLink(int16_t offset,
563 const Operand& rt, Register scratch) { 636 bool ProtectBranchDelaySlot) {
637 bal(offset);
638
639 // Emit a nop in the branch delay slot if required.
640 if (ProtectBranchDelaySlot)
641 nop();
642 }
643
644
645 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
646 const Operand& rt,
647 bool ProtectBranchDelaySlot) {
648 BRANCH_ARGS_CHECK(cond, rs, rt);
564 Register r2 = no_reg; 649 Register r2 = no_reg;
650 Register scratch = at;
565 if (rt.is_reg()) { 651 if (rt.is_reg()) {
566 r2 = rt.rm_; 652 r2 = rt.rm_;
567 } else if (cond != cc_always) { 653 } else if (cond != cc_always) {
568 r2 = scratch; 654 r2 = scratch;
569 li(r2, rt); 655 li(r2, rt);
570 } 656 }
571 657
572 switch (cond) { 658 switch (cond) {
573 case cc_always: 659 case cc_always:
574 bal(offset); 660 bal(offset);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
624 break; 710 break;
625 case Uless_equal: 711 case Uless_equal:
626 sltu(scratch, r2, rs); 712 sltu(scratch, r2, rs);
627 addiu(scratch, scratch, -1); 713 addiu(scratch, scratch, -1);
628 bltzal(scratch, offset); 714 bltzal(scratch, offset);
629 break; 715 break;
630 716
631 default: 717 default:
632 UNREACHABLE(); 718 UNREACHABLE();
633 } 719 }
634 // Emit a nop in the branch delay slot. 720 // Emit a nop in the branch delay slot if required.
635 nop(); 721 if (ProtectBranchDelaySlot)
722 nop();
636 } 723 }
637 724
638 725
639 void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs, 726 void MacroAssembler::BranchAndLink(Label* L,
640 const Operand& rt, Register scratch) { 727 bool ProtectBranchDelaySlot) {
728 bal(shifted_branch_offset(L, false));
729
730 // Emit a nop in the branch delay slot if required.
731 if (ProtectBranchDelaySlot)
732 nop();
733 }
734
735
736 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
737 const Operand& rt,
738 bool ProtectBranchDelaySlot) {
739 BRANCH_ARGS_CHECK(cond, rs, rt);
740
741 int32_t offset;
641 Register r2 = no_reg; 742 Register r2 = no_reg;
743 Register scratch = at;
642 if (rt.is_reg()) { 744 if (rt.is_reg()) {
643 r2 = rt.rm_; 745 r2 = rt.rm_;
644 } else if (cond != cc_always) { 746 } else if (cond != cc_always) {
645 r2 = scratch; 747 r2 = scratch;
646 li(r2, rt); 748 li(r2, rt);
647 } 749 }
648 750
649 switch (cond) { 751 switch (cond) {
650 case cc_always: 752 case cc_always:
651 bal(shifted_branch_offset(L, false)); 753 offset = shifted_branch_offset(L, false);
754 bal(offset);
652 break; 755 break;
653 case eq: 756 case eq:
654 bne(rs, r2, 2); 757 bne(rs, r2, 2);
655 nop(); 758 nop();
656 bal(shifted_branch_offset(L, false)); 759 offset = shifted_branch_offset(L, false);
760 bal(offset);
657 break; 761 break;
658 case ne: 762 case ne:
659 beq(rs, r2, 2); 763 beq(rs, r2, 2);
660 nop(); 764 nop();
661 bal(shifted_branch_offset(L, false)); 765 offset = shifted_branch_offset(L, false);
766 bal(offset);
662 break; 767 break;
663 768
664 // Signed comparison 769 // Signed comparison
665 case greater: 770 case greater:
666 slt(scratch, r2, rs); 771 slt(scratch, r2, rs);
667 addiu(scratch, scratch, -1); 772 addiu(scratch, scratch, -1);
668 bgezal(scratch, shifted_branch_offset(L, false)); 773 offset = shifted_branch_offset(L, false);
774 bgezal(scratch, offset);
669 break; 775 break;
670 case greater_equal: 776 case greater_equal:
671 slt(scratch, rs, r2); 777 slt(scratch, rs, r2);
672 addiu(scratch, scratch, -1); 778 addiu(scratch, scratch, -1);
673 bltzal(scratch, shifted_branch_offset(L, false)); 779 offset = shifted_branch_offset(L, false);
780 bltzal(scratch, offset);
674 break; 781 break;
675 case less: 782 case less:
676 slt(scratch, rs, r2); 783 slt(scratch, rs, r2);
677 addiu(scratch, scratch, -1); 784 addiu(scratch, scratch, -1);
678 bgezal(scratch, shifted_branch_offset(L, false)); 785 offset = shifted_branch_offset(L, false);
786 bgezal(scratch, offset);
679 break; 787 break;
680 case less_equal: 788 case less_equal:
681 slt(scratch, r2, rs); 789 slt(scratch, r2, rs);
682 addiu(scratch, scratch, -1); 790 addiu(scratch, scratch, -1);
683 bltzal(scratch, shifted_branch_offset(L, false)); 791 offset = shifted_branch_offset(L, false);
792 bltzal(scratch, offset);
684 break; 793 break;
685 794
686 // Unsigned comparison. 795 // Unsigned comparison.
687 case Ugreater: 796 case Ugreater:
688 sltu(scratch, r2, rs); 797 sltu(scratch, r2, rs);
689 addiu(scratch, scratch, -1); 798 addiu(scratch, scratch, -1);
690 bgezal(scratch, shifted_branch_offset(L, false)); 799 offset = shifted_branch_offset(L, false);
800 bgezal(scratch, offset);
691 break; 801 break;
692 case Ugreater_equal: 802 case Ugreater_equal:
693 sltu(scratch, rs, r2); 803 sltu(scratch, rs, r2);
694 addiu(scratch, scratch, -1); 804 addiu(scratch, scratch, -1);
695 bltzal(scratch, shifted_branch_offset(L, false)); 805 offset = shifted_branch_offset(L, false);
806 bltzal(scratch, offset);
696 break; 807 break;
697 case Uless: 808 case Uless:
698 sltu(scratch, rs, r2); 809 sltu(scratch, rs, r2);
699 addiu(scratch, scratch, -1); 810 addiu(scratch, scratch, -1);
700 bgezal(scratch, shifted_branch_offset(L, false)); 811 offset = shifted_branch_offset(L, false);
812 bgezal(scratch, offset);
701 break; 813 break;
702 case Uless_equal: 814 case Uless_equal:
703 sltu(scratch, r2, rs); 815 sltu(scratch, r2, rs);
704 addiu(scratch, scratch, -1); 816 addiu(scratch, scratch, -1);
705 bltzal(scratch, shifted_branch_offset(L, false)); 817 offset = shifted_branch_offset(L, false);
818 bltzal(scratch, offset);
706 break; 819 break;
707 820
708 default: 821 default:
709 UNREACHABLE(); 822 UNREACHABLE();
710 } 823 }
711 // Emit a nop in the branch delay slot. 824
712 nop(); 825 // Check that offset could actually hold on an int16_t.
826 ASSERT(is_int16(offset));
827
828 // Emit a nop in the branch delay slot if required.
829 if (ProtectBranchDelaySlot)
830 nop();
713 } 831 }
714 832
715 833
716 void MacroAssembler::Jump(const Operand& target, 834 void MacroAssembler::Jump(const Operand& target,
717 Condition cond, Register rs, const Operand& rt) { 835 bool ProtectBranchDelaySlot) {
836 if (target.is_reg()) {
837 jr(target.rm());
838 } else { // !target.is_reg()
839 if (!MustUseAt(target.rmode_)) {
840 j(target.imm32_);
841 } else { // MustUseAt(target)
842 li(at, target);
843 jr(at);
844 }
845 }
846 // Emit a nop in the branch delay slot if required.
847 if (ProtectBranchDelaySlot)
848 nop();
849 }
850
851
852 void MacroAssembler::Jump(const Operand& target,
853 Condition cond, Register rs, const Operand& rt,
854 bool ProtectBranchDelaySlot) {
855 BRANCH_ARGS_CHECK(cond, rs, rt);
718 if (target.is_reg()) { 856 if (target.is_reg()) {
719 if (cond == cc_always) { 857 if (cond == cc_always) {
720 jr(target.rm()); 858 jr(target.rm());
721 } else { 859 } else {
722 Branch(NegateCondition(cond), 2, rs, rt); 860 Branch(2, NegateCondition(cond), rs, rt);
Søren Thygesen Gjesse 2010/05/25 09:00:56 Please use use either labels to jump or assert the
723 jr(target.rm()); 861 jr(target.rm());
724 } 862 }
725 } else { // !target.is_reg() 863 } else { // !target.is_reg()
726 if (!MustUseAt(target.rmode_)) { 864 if (!MustUseAt(target.rmode_)) {
727 if (cond == cc_always) { 865 if (cond == cc_always) {
728 j(target.imm32_); 866 j(target.imm32_);
729 } else { 867 } else {
730 Branch(NegateCondition(cond), 2, rs, rt); 868 Branch(2, NegateCondition(cond), rs, rt);
731 j(target.imm32_); // Will generate only one instruction. 869 j(target.imm32_); // Will generate only one instruction.
732 } 870 }
733 } else { // MustUseAt(target) 871 } else { // MustUseAt(target)
734 li(at, target); 872 li(at, target);
735 if (cond == cc_always) { 873 if (cond == cc_always) {
736 jr(at); 874 jr(at);
737 } else { 875 } else {
738 Branch(NegateCondition(cond), 2, rs, rt); 876 Branch(2, NegateCondition(cond), rs, rt);
739 jr(at); // Will generate only one instruction. 877 jr(at); // Will generate only one instruction.
740 } 878 }
741 } 879 }
742 } 880 }
743 // Emit a nop in the branch delay slot. 881 // Emit a nop in the branch delay slot if required.
744 nop(); 882 if (ProtectBranchDelaySlot)
883 nop();
745 } 884 }
746 885
747 886
748 void MacroAssembler::Call(const Operand& target, 887 void MacroAssembler::Call(const Operand& target,
749 Condition cond, Register rs, const Operand& rt) { 888 bool ProtectBranchDelaySlot) {
889 if (target.is_reg()) {
890 jalr(target.rm());
891 } else { // !target.is_reg()
892 if (!MustUseAt(target.rmode_)) {
893 jal(target.imm32_);
894 } else { // MustUseAt(target)
895 li(at, target);
896 jalr(at);
897 }
898 }
899 // Emit a nop in the branch delay slot if required.
900 if (ProtectBranchDelaySlot)
901 nop();
902 }
903
904
905 void MacroAssembler::Call(const Operand& target,
906 Condition cond, Register rs, const Operand& rt,
907 bool ProtectBranchDelaySlot) {
908 BRANCH_ARGS_CHECK(cond, rs, rt);
750 if (target.is_reg()) { 909 if (target.is_reg()) {
751 if (cond == cc_always) { 910 if (cond == cc_always) {
752 jalr(target.rm()); 911 jalr(target.rm());
753 } else { 912 } else {
754 Branch(NegateCondition(cond), 2, rs, rt); 913 Branch(2, NegateCondition(cond), rs, rt);
755 jalr(target.rm()); 914 jalr(target.rm());
756 } 915 }
757 } else { // !target.is_reg() 916 } else { // !target.is_reg()
758 if (!MustUseAt(target.rmode_)) { 917 if (!MustUseAt(target.rmode_)) {
759 if (cond == cc_always) { 918 if (cond == cc_always) {
760 jal(target.imm32_); 919 jal(target.imm32_);
761 } else { 920 } else {
762 Branch(NegateCondition(cond), 2, rs, rt); 921 Branch(2, NegateCondition(cond), rs, rt);
763 jal(target.imm32_); // Will generate only one instruction. 922 jal(target.imm32_); // Will generate only one instruction.
764 } 923 }
765 } else { // MustUseAt(target) 924 } else { // MustUseAt(target)
766 li(at, target); 925 li(at, target);
767 if (cond == cc_always) { 926 if (cond == cc_always) {
768 jalr(at); 927 jalr(at);
769 } else { 928 } else {
770 Branch(NegateCondition(cond), 2, rs, rt); 929 Branch(2, NegateCondition(cond), rs, rt);
771 jalr(at); // Will generate only one instruction. 930 jalr(at); // Will generate only one instruction.
772 } 931 }
773 } 932 }
774 } 933 }
775 // Emit a nop in the branch delay slot. 934 // Emit a nop in the branch delay slot if required.
776 nop(); 935 if (ProtectBranchDelaySlot)
936 nop();
777 } 937 }
778 938
939
779 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { 940 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
780 UNIMPLEMENTED_MIPS(); 941 UNIMPLEMENTED_MIPS();
781 } 942 }
782 943
783 944
784 void MacroAssembler::Drop(int count, Condition cond) { 945 void MacroAssembler::Drop(int count, Condition cond) {
785 UNIMPLEMENTED_MIPS(); 946 UNIMPLEMENTED_MIPS();
786 } 947 }
787 948
788 949
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
871 // Activation frames 1032 // Activation frames
872 1033
873 void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) { 1034 void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
874 Label extra_push, end; 1035 Label extra_push, end;
875 1036
876 andi(scratch, sp, 7); 1037 andi(scratch, sp, 7);
877 1038
878 // We check for args and receiver size on the stack, all of them word sized. 1039 // We check for args and receiver size on the stack, all of them word sized.
879 // We add one for sp, that we also want to store on the stack. 1040 // We add one for sp, that we also want to store on the stack.
880 if (((arg_count + 1) % kPointerSizeLog2) == 0) { 1041 if (((arg_count + 1) % kPointerSizeLog2) == 0) {
881 Branch(ne, &extra_push, at, Operand(zero_reg)); 1042 Branch(&extra_push, ne, scratch, Operand(zero_reg));
882 } else { // ((arg_count + 1) % 2) == 1 1043 } else { // ((arg_count + 1) % 2) == 1
883 Branch(eq, &extra_push, at, Operand(zero_reg)); 1044 Branch(&extra_push, eq, scratch, Operand(zero_reg));
884 } 1045 }
885 1046
886 // Save sp on the stack. 1047 // Save sp on the stack.
887 mov(scratch, sp); 1048 mov(scratch, sp);
888 Push(scratch); 1049 Push(scratch);
889 b(&end); 1050 jmp(&end);
890 1051
891 // Align before saving sp on the stack. 1052 // Align before saving sp on the stack.
892 bind(&extra_push); 1053 bind(&extra_push);
893 mov(scratch, sp); 1054 mov(scratch, sp);
894 addiu(sp, sp, -8); 1055 addiu(sp, sp, -8);
895 sw(scratch, MemOperand(sp)); 1056 sw(scratch, MemOperand(sp));
896 1057
897 // The stack is aligned and sp is stored on the top. 1058 // The stack is aligned and sp is stored on the top.
898 bind(&end); 1059 bind(&end);
899 } 1060 }
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
941 // Don't worry about adapting arguments for builtins that 1102 // Don't worry about adapting arguments for builtins that
942 // don't want that done. Skip adaption code by making it look 1103 // don't want that done. Skip adaption code by making it look
943 // like we have a match between expected and actual number of 1104 // like we have a match between expected and actual number of
944 // arguments. 1105 // arguments.
945 definitely_matches = true; 1106 definitely_matches = true;
946 } else { 1107 } else {
947 li(a2, Operand(expected.immediate())); 1108 li(a2, Operand(expected.immediate()));
948 } 1109 }
949 } 1110 }
950 } else if (actual.is_immediate()) { 1111 } else if (actual.is_immediate()) {
951 Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate())); 1112 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
952 li(a0, Operand(actual.immediate())); 1113 li(a0, Operand(actual.immediate()));
953 } else { 1114 } else {
954 Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg())); 1115 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
955 } 1116 }
956 1117
957 if (!definitely_matches) { 1118 if (!definitely_matches) {
958 if (!code_constant.is_null()) { 1119 if (!code_constant.is_null()) {
959 li(a3, Operand(code_constant)); 1120 li(a3, Operand(code_constant));
960 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); 1121 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
961 } 1122 }
962 1123
963 ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline); 1124 ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
964 if (flag == CALL_FUNCTION) { 1125 if (flag == CALL_FUNCTION) {
965 CallBuiltin(adaptor); 1126 CallBuiltin(adaptor);
966 b(done); 1127 jmp(done);
967 nop();
968 } else { 1128 } else {
969 JumpToBuiltin(adaptor); 1129 JumpToBuiltin(adaptor);
970 } 1130 }
971 bind(&regular_invoke); 1131 bind(&regular_invoke);
972 } 1132 }
973 } 1133 }
974 1134
975 void MacroAssembler::InvokeCode(Register code, 1135 void MacroAssembler::InvokeCode(Register code,
976 const ParameterCount& expected, 1136 const ParameterCount& expected,
977 const ParameterCount& actual, 1137 const ParameterCount& actual,
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
1044 1204
1045 1205
1046 void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) { 1206 void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
1047 // Load builtin address. 1207 // Load builtin address.
1048 LoadExternalReference(t9, builtin_entry); 1208 LoadExternalReference(t9, builtin_entry);
1049 lw(t9, MemOperand(t9)); // Deref address. 1209 lw(t9, MemOperand(t9)); // Deref address.
1050 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); 1210 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1051 // Call and allocate arguments slots. 1211 // Call and allocate arguments slots.
1052 jalr(t9); 1212 jalr(t9);
1053 // Use the branch delay slot to allocated argument slots. 1213 // Use the branch delay slot to allocated argument slots.
1054 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); 1214 addiu(sp, sp, -StandardFrameConstants::kBArgsSlotsSize);
Søren Thygesen Gjesse 2010/05/25 09:00:56 Please add a comment that the second addiu will be
1055 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); 1215 addiu(sp, sp, StandardFrameConstants::kBArgsSlotsSize);
1056 } 1216 }
1057 1217
1058 1218
1059 void MacroAssembler::CallBuiltin(Register target) { 1219 void MacroAssembler::CallBuiltin(Register target) {
1060 // Target already holds target address. 1220 // Target already holds target address.
1061 // Call and allocate arguments slots. 1221 // Call and allocate arguments slots.
1062 jalr(target); 1222 jalr(target);
1063 // Use the branch delay slot to allocated argument slots. 1223 // Use the branch delay slot to allocated argument slots.
1064 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); 1224 addiu(sp, sp, -StandardFrameConstants::kBArgsSlotsSize);
Søren Thygesen Gjesse 2010/05/25 09:00:56 Ditto.
1065 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); 1225 addiu(sp, sp, StandardFrameConstants::kBArgsSlotsSize);
1066 } 1226 }
1067 1227
1068 1228
1069 void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) { 1229 void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
1070 // Load builtin address. 1230 // Load builtin address.
1071 LoadExternalReference(t9, builtin_entry); 1231 LoadExternalReference(t9, builtin_entry);
1072 lw(t9, MemOperand(t9)); // Deref address. 1232 lw(t9, MemOperand(t9)); // Deref address.
1073 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); 1233 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1074 // Call and allocate arguments slots. 1234 // Call and allocate arguments slots.
1075 jr(t9); 1235 jr(t9);
1076 // Use the branch delay slot to allocated argument slots. 1236 // Use the branch delay slot to allocated argument slots.
1077 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); 1237 addiu(sp, sp, -StandardFrameConstants::kBArgsSlotsSize);
1078 } 1238 }
1079 1239
1080 1240
1081 void MacroAssembler::JumpToBuiltin(Register target) { 1241 void MacroAssembler::JumpToBuiltin(Register target) {
1082 // t9 already holds target address. 1242 // t9 already holds target address.
1083 // Call and allocate arguments slots. 1243 // Call and allocate arguments slots.
1084 jr(t9); 1244 jr(t9);
1085 // Use the branch delay slot to allocated argument slots. 1245 // Use the branch delay slot to allocated argument slots.
1086 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); 1246 addiu(sp, sp, -StandardFrameConstants::kBArgsSlotsSize);
1087 } 1247 }
1088 1248
1089 1249
1090 // ----------------------------------------------------------------------------- 1250 // -----------------------------------------------------------------------------
1091 // Runtime calls 1251 // Runtime calls
1092 1252
1093 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, 1253 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
1094 Register r1, const Operand& r2) { 1254 Register r1, const Operand& r2) {
1095 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. 1255 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1096 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); 1256 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
1207 } 1367 }
1208 1368
1209 1369
1210 void MacroAssembler::Abort(const char* msg) { 1370 void MacroAssembler::Abort(const char* msg) {
1211 UNIMPLEMENTED_MIPS(); 1371 UNIMPLEMENTED_MIPS();
1212 } 1372 }
1213 1373
1214 1374
1215 void MacroAssembler::EnterFrame(StackFrame::Type type) { 1375 void MacroAssembler::EnterFrame(StackFrame::Type type) {
1216 addiu(sp, sp, -5 * kPointerSize); 1376 addiu(sp, sp, -5 * kPointerSize);
1217 li(t0, Operand(Smi::FromInt(type))); 1377 li(t8, Operand(Smi::FromInt(type)));
1218 li(t1, Operand(CodeObject())); 1378 li(t9, Operand(CodeObject()));
1219 sw(ra, MemOperand(sp, 4 * kPointerSize)); 1379 sw(ra, MemOperand(sp, 4 * kPointerSize));
1220 sw(fp, MemOperand(sp, 3 * kPointerSize)); 1380 sw(fp, MemOperand(sp, 3 * kPointerSize));
1221 sw(cp, MemOperand(sp, 2 * kPointerSize)); 1381 sw(cp, MemOperand(sp, 2 * kPointerSize));
1222 sw(t0, MemOperand(sp, 1 * kPointerSize)); 1382 sw(t8, MemOperand(sp, 1 * kPointerSize));
1223 sw(t1, MemOperand(sp, 0 * kPointerSize)); 1383 sw(t9, MemOperand(sp, 0 * kPointerSize));
1224 addiu(fp, sp, 3 * kPointerSize); 1384 addiu(fp, sp, 3 * kPointerSize);
1225 } 1385 }
1226 1386
1227 1387
1228 void MacroAssembler::LeaveFrame(StackFrame::Type type) { 1388 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1229 mov(sp, fp); 1389 mov(sp, fp);
1230 lw(fp, MemOperand(sp, 0 * kPointerSize)); 1390 lw(fp, MemOperand(sp, 0 * kPointerSize));
1231 lw(ra, MemOperand(sp, 1 * kPointerSize)); 1391 lw(ra, MemOperand(sp, 1 * kPointerSize));
1232 addiu(sp, sp, 2 * kPointerSize); 1392 addiu(sp, sp, 2 * kPointerSize);
1233 } 1393 }
1234 1394
1235 1395
1236 void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, 1396 void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
1237 Register hold_argc, 1397 Register hold_argc,
1238 Register hold_argv, 1398 Register hold_argv,
1239 Register hold_function) { 1399 Register hold_function) {
1240 // Compute the argv pointer and keep it in a callee-saved register. 1400 // Compute the argv pointer and keep it in a callee-saved register.
1241 // a0 is argc. 1401 // a0 is argc.
1242 sll(t0, a0, kPointerSizeLog2); 1402 sll(t8, a0, kPointerSizeLog2);
1243 add(hold_argv, sp, t0); 1403 addu(hold_argv, sp, t8);
1244 addi(hold_argv, hold_argv, -kPointerSize); 1404 addiu(hold_argv, hold_argv, -kPointerSize);
1245 1405
1246 // Compute callee's stack pointer before making changes and save it as 1406 // Compute callee's stack pointer before making changes and save it as
1247 // t1 register so that it is restored as sp register on exit, thereby 1407 // t9 register so that it is restored as sp register on exit, thereby
1248 // popping the args. 1408 // popping the args.
1249 // t1 = sp + kPointerSize * #args 1409 // t9 = sp + kPointerSize * #args
1250 add(t1, sp, t0); 1410 addu(t9, sp, t8);
1251 1411
1252 // Align the stack at this point. 1412 // Align the stack at this point.
1253 AlignStack(0); 1413 AlignStack(0);
1254 1414
1255 // Save registers. 1415 // Save registers.
1256 addiu(sp, sp, -12); 1416 addiu(sp, sp, -12);
1257 sw(t1, MemOperand(sp, 8)); 1417 sw(t9, MemOperand(sp, 8));
1258 sw(ra, MemOperand(sp, 4)); 1418 sw(ra, MemOperand(sp, 4));
1259 sw(fp, MemOperand(sp, 0)); 1419 sw(fp, MemOperand(sp, 0));
1260 mov(fp, sp); // Setup new frame pointer. 1420 mov(fp, sp); // Setup new frame pointer.
1261 1421
1262 // Push debug marker. 1422 // Push debug marker.
1263 if (mode == ExitFrame::MODE_DEBUG) { 1423 if (mode == ExitFrame::MODE_DEBUG) {
1264 Push(zero_reg); 1424 Push(zero_reg);
1265 } else { 1425 } else {
1266 li(t0, Operand(CodeObject())); 1426 li(t8, Operand(CodeObject()));
1267 Push(t0); 1427 Push(t8);
1268 } 1428 }
1269 1429
1270 // Save the frame pointer and the context in top. 1430 // Save the frame pointer and the context in top.
1271 LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); 1431 LoadExternalReference(t8, ExternalReference(Top::k_c_entry_fp_address));
1272 sw(fp, MemOperand(t0)); 1432 sw(fp, MemOperand(t8));
1273 LoadExternalReference(t0, ExternalReference(Top::k_context_address)); 1433 LoadExternalReference(t8, ExternalReference(Top::k_context_address));
1274 sw(cp, MemOperand(t0)); 1434 sw(cp, MemOperand(t8));
1275 1435
1276 // Setup argc and the builtin function in callee-saved registers. 1436 // Setup argc and the builtin function in callee-saved registers.
1277 mov(hold_argc, a0); 1437 mov(hold_argc, a0);
1278 mov(hold_function, a1); 1438 mov(hold_function, a1);
1279 } 1439 }
1280 1440
1281 1441
1282 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { 1442 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
1283 // Clear top frame. 1443 // Clear top frame.
1284 LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); 1444 LoadExternalReference(t8, ExternalReference(Top::k_c_entry_fp_address));
1285 sw(zero_reg, MemOperand(t0)); 1445 sw(zero_reg, MemOperand(t8));
1286 1446
1287 // Restore current context from top and clear it in debug mode. 1447 // Restore current context from top and clear it in debug mode.
1288 LoadExternalReference(t0, ExternalReference(Top::k_context_address)); 1448 LoadExternalReference(t8, ExternalReference(Top::k_context_address));
1289 lw(cp, MemOperand(t0)); 1449 lw(cp, MemOperand(t8));
1290 #ifdef DEBUG 1450 #ifdef DEBUG
1291 sw(a3, MemOperand(t0)); 1451 sw(a3, MemOperand(t8));
1292 #endif 1452 #endif
1293 1453
1294 // Pop the arguments, restore registers, and return. 1454 // Pop the arguments, restore registers, and return.
1295 mov(sp, fp); // Respect ABI stack constraint. 1455 mov(sp, fp); // Respect ABI stack constraint.
1296 lw(fp, MemOperand(sp, 0)); 1456 lw(fp, MemOperand(sp, 0));
1297 lw(ra, MemOperand(sp, 4)); 1457 lw(ra, MemOperand(sp, 4));
1298 lw(sp, MemOperand(sp, 8)); 1458 lw(sp, MemOperand(sp, 8));
1299 jr(ra); 1459 jr(ra);
1300 nop(); // Branch delay slot nop. 1460 nop(); // Branch delay slot nop.
1301 } 1461 }
1302 1462
1303 1463
1304 void MacroAssembler::AlignStack(int offset) { 1464 void MacroAssembler::AlignStack(int offset) {
1305 // On MIPS an offset of 0 aligns to 0 modulo 8 bytes, 1465 // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
1306 // and an offset of 1 aligns to 4 modulo 8 bytes. 1466 // and an offset of 1 aligns to 4 modulo 8 bytes.
1467 #if defined(V8_HOST_ARCH_MIPS)
1468 // Running on the real platform. Use the alignment as mandated by the local
1469 // environment.
1470 // Note: This will break if we ever start generating snapshots on one MIPS
1471 // platform for another MIPS platform with a different alignment.
1307 int activation_frame_alignment = OS::ActivationFrameAlignment(); 1472 int activation_frame_alignment = OS::ActivationFrameAlignment();
1473 #else // defined(V8_HOST_ARCH_MIPS)
1474 // If we are using the simulator then we should always align to the expected
1475 // alignment. As the simulator is used to generate snapshots we do not know
1476 // if the target platform will need alignment, so we will always align at
1477 // this point here.
1478 int activation_frame_alignment = 2 * kPointerSize;
1479 #endif // defined(V8_HOST_ARCH_MIPS)
1308 if (activation_frame_alignment != kPointerSize) { 1480 if (activation_frame_alignment != kPointerSize) {
1309 // This code needs to be made more general if this assert doesn't hold. 1481 // This code needs to be made more general if this assert doesn't hold.
1310 ASSERT(activation_frame_alignment == 2 * kPointerSize); 1482 ASSERT(activation_frame_alignment == 2 * kPointerSize);
1311 if (offset == 0) { 1483 if (offset == 0) {
1312 andi(t0, sp, activation_frame_alignment - 1); 1484 andi(t8, sp, activation_frame_alignment - 1);
1313 Push(zero_reg, eq, t0, zero_reg); 1485 Push(zero_reg, eq, t8, zero_reg);
1314 } else { 1486 } else {
1315 andi(t0, sp, activation_frame_alignment - 1); 1487 andi(t8, sp, activation_frame_alignment - 1);
1316 addiu(t0, t0, -4); 1488 addiu(t8, t8, -4);
1317 Push(zero_reg, eq, t0, zero_reg); 1489 Push(zero_reg, eq, t8, zero_reg);
1318 } 1490 }
1319 } 1491 }
1320 } 1492 }
1321 1493
1322 } } // namespace v8::internal 1494 } } // namespace v8::internal
1323 1495
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698