Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(189)

Side by Side Diff: src/mips/macro-assembler-mips.cc

Issue 6759025: Version 3.2.6 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/macro-assembler-mips.h ('k') | src/mips/regexp-macro-assembler-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 28 #include <limits.h> // For LONG_MIN, LONG_MAX
29 29
30 #include "v8.h" 30 #include "v8.h"
31 31
32 #if defined(V8_TARGET_ARCH_MIPS) 32 #if defined(V8_TARGET_ARCH_MIPS)
33 33
34 #include "bootstrapper.h" 34 #include "bootstrapper.h"
35 #include "codegen-inl.h" 35 #include "codegen-inl.h"
36 #include "debug.h" 36 #include "debug.h"
37 #include "runtime.h" 37 #include "runtime.h"
38 38
39 namespace v8 { 39 namespace v8 {
40 namespace internal { 40 namespace internal {
41 41
42 MacroAssembler::MacroAssembler(void* buffer, int size) 42 MacroAssembler::MacroAssembler(void* buffer, int size)
43 : Assembler(buffer, size), 43 : Assembler(buffer, size),
44 unresolved_(0),
45 generating_stub_(false), 44 generating_stub_(false),
46 allow_stub_calls_(true), 45 allow_stub_calls_(true),
47 code_object_(Heap::undefined_value()) { 46 code_object_(HEAP->undefined_value()) {
48 } 47 }
49 48
50 49
50 // Arguments macros
51 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
52 #define COND_ARGS cond, r1, r2
51 53
52 void MacroAssembler::Jump(Register target, Condition cond, 54 #define REGISTER_TARGET_BODY(Name) \
53 Register r1, const Operand& r2) { 55 void MacroAssembler::Name(Register target, \
54 Jump(Operand(target), cond, r1, r2); 56 BranchDelaySlot bd) { \
57 Name(Operand(target), bd); \
58 } \
59 void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
60 BranchDelaySlot bd) { \
61 Name(Operand(target), COND_ARGS, bd); \
55 } 62 }
56 63
57 64
58 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, 65 #define INT_PTR_TARGET_BODY(Name) \
59 Condition cond, Register r1, const Operand& r2) { 66 void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
60 Jump(Operand(target, rmode), cond, r1, r2); 67 BranchDelaySlot bd) { \
68 Name(Operand(target, rmode), bd); \
69 } \
70 void MacroAssembler::Name(intptr_t target, \
71 RelocInfo::Mode rmode, \
72 COND_TYPED_ARGS, \
73 BranchDelaySlot bd) { \
74 Name(Operand(target, rmode), COND_ARGS, bd); \
61 } 75 }
62 76
63 77
64 void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode, 78 #define BYTE_PTR_TARGET_BODY(Name) \
65 Condition cond, Register r1, const Operand& r2) { 79 void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
66 ASSERT(!RelocInfo::IsCodeTarget(rmode)); 80 BranchDelaySlot bd) { \
67 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); 81 Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
82 } \
83 void MacroAssembler::Name(byte* target, \
84 RelocInfo::Mode rmode, \
85 COND_TYPED_ARGS, \
86 BranchDelaySlot bd) { \
87 Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
68 } 88 }
69 89
70 90
71 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, 91 #define CODE_TARGET_BODY(Name) \
72 Condition cond, Register r1, const Operand& r2) { 92 void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
73 ASSERT(RelocInfo::IsCodeTarget(rmode)); 93 BranchDelaySlot bd) { \
74 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); 94 Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
95 } \
96 void MacroAssembler::Name(Handle<Code> target, \
97 RelocInfo::Mode rmode, \
98 COND_TYPED_ARGS, \
99 BranchDelaySlot bd) { \
100 Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
75 } 101 }
76 102
77 103
78 void MacroAssembler::Call(Register target, 104 REGISTER_TARGET_BODY(Jump)
79 Condition cond, Register r1, const Operand& r2) { 105 REGISTER_TARGET_BODY(Call)
80 Call(Operand(target), cond, r1, r2); 106 INT_PTR_TARGET_BODY(Jump)
107 INT_PTR_TARGET_BODY(Call)
108 BYTE_PTR_TARGET_BODY(Jump)
109 BYTE_PTR_TARGET_BODY(Call)
110 CODE_TARGET_BODY(Jump)
111 CODE_TARGET_BODY(Call)
112
113 #undef COND_TYPED_ARGS
114 #undef COND_ARGS
115 #undef REGISTER_TARGET_BODY
116 #undef BYTE_PTR_TARGET_BODY
117 #undef CODE_TARGET_BODY
118
119
120 void MacroAssembler::Ret(BranchDelaySlot bd) {
121 Jump(Operand(ra), bd);
81 } 122 }
82 123
83 124
84 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, 125 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
85 Condition cond, Register r1, const Operand& r2) { 126 BranchDelaySlot bd) {
86 Call(Operand(target, rmode), cond, r1, r2); 127 Jump(Operand(ra), cond, r1, r2, bd);
87 } 128 }
88 129
89 130
90 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
91 Condition cond, Register r1, const Operand& r2) {
92 ASSERT(!RelocInfo::IsCodeTarget(rmode));
93 Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
94 }
95
96
97 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
98 Condition cond, Register r1, const Operand& r2) {
99 ASSERT(RelocInfo::IsCodeTarget(rmode));
100 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
101 }
102
103
104 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
105 Jump(Operand(ra), cond, r1, r2);
106 }
107
108
109 void MacroAssembler::LoadRoot(Register destination, 131 void MacroAssembler::LoadRoot(Register destination,
110 Heap::RootListIndex index) { 132 Heap::RootListIndex index) {
111 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 133 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
112 } 134 }
113 135
136
114 void MacroAssembler::LoadRoot(Register destination, 137 void MacroAssembler::LoadRoot(Register destination,
115 Heap::RootListIndex index, 138 Heap::RootListIndex index,
116 Condition cond, 139 Condition cond,
117 Register src1, const Operand& src2) { 140 Register src1, const Operand& src2) {
118 Branch(NegateCondition(cond), 2, src1, src2); 141 Branch(2, NegateCondition(cond), src1, src2);
119 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 142 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
120 } 143 }
121 144
122 145
123 void MacroAssembler::RecordWrite(Register object, Register offset, 146 void MacroAssembler::StoreRoot(Register source,
147 Heap::RootListIndex index) {
148 sw(source, MemOperand(s6, index << kPointerSizeLog2));
149 }
150
151
152 void MacroAssembler::StoreRoot(Register source,
153 Heap::RootListIndex index,
154 Condition cond,
155 Register src1, const Operand& src2) {
156 Branch(2, NegateCondition(cond), src1, src2);
157 sw(source, MemOperand(s6, index << kPointerSizeLog2));
158 }
159
160
161 void MacroAssembler::RecordWriteHelper(Register object,
162 Register address,
163 Register scratch) {
164 if (FLAG_debug_code) {
165 // Check that the object is not in new space.
166 Label not_in_new_space;
167 InNewSpace(object, scratch, ne, &not_in_new_space);
168 Abort("new-space object passed to RecordWriteHelper");
169 bind(&not_in_new_space);
170 }
171
172 // Calculate page address: Clear bits from 0 to kPageSizeBits.
173 if (mips32r2) {
174 Ins(object, zero_reg, 0, kPageSizeBits);
175 } else {
176 // The Ins macro is slow on r1, so use shifts instead.
177 srl(object, object, kPageSizeBits);
178 sll(object, object, kPageSizeBits);
179 }
180
181 // Calculate region number.
182 Ext(address, address, Page::kRegionSizeLog2,
183 kPageSizeBits - Page::kRegionSizeLog2);
184
185 // Mark region dirty.
186 lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
187 li(at, Operand(1));
188 sllv(at, at, address);
189 or_(scratch, scratch, at);
190 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
191 }
192
193
194 void MacroAssembler::InNewSpace(Register object,
195 Register scratch,
196 Condition cc,
197 Label* branch) {
198 ASSERT(cc == eq || cc == ne);
199 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
200 Branch(branch, cc, scratch,
201 Operand(ExternalReference::new_space_start(isolate())));
202 }
203
204
205 // Will clobber 4 registers: object, scratch0, scratch1, at. The
206 // register 'object' contains a heap object pointer. The heap object
207 // tag is shifted away.
208 void MacroAssembler::RecordWrite(Register object,
209 Operand offset,
210 Register scratch0,
211 Register scratch1) {
212 // The compiled code assumes that record write doesn't change the
213 // context register, so we check that none of the clobbered
214 // registers are cp.
215 ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
216
217 Label done;
218
219 // First, test that the object is not in the new space. We cannot set
220 // region marks for new space pages.
221 InNewSpace(object, scratch0, eq, &done);
222
223 // Add offset into the object.
224 Addu(scratch0, object, offset);
225
226 // Record the actual write.
227 RecordWriteHelper(object, scratch0, scratch1);
228
229 bind(&done);
230
231 // Clobber all input registers when running with the debug-code flag
232 // turned on to provoke errors.
233 if (FLAG_debug_code) {
234 li(object, Operand(BitCast<int32_t>(kZapValue)));
235 li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
236 li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
237 }
238 }
239
240
241 // Will clobber 4 registers: object, address, scratch, ip. The
242 // register 'object' contains a heap object pointer. The heap object
243 // tag is shifted away.
244 void MacroAssembler::RecordWrite(Register object,
245 Register address,
124 Register scratch) { 246 Register scratch) {
125 UNIMPLEMENTED_MIPS(); 247 // The compiled code assumes that record write doesn't change the
248 // context register, so we check that none of the clobbered
249 // registers are cp.
250 ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
251
252 Label done;
253
254 // First, test that the object is not in the new space. We cannot set
255 // region marks for new space pages.
256 InNewSpace(object, scratch, eq, &done);
257
258 // Record the actual write.
259 RecordWriteHelper(object, address, scratch);
260
261 bind(&done);
262
263 // Clobber all input registers when running with the debug-code flag
264 // turned on to provoke errors.
265 if (FLAG_debug_code) {
266 li(object, Operand(BitCast<int32_t>(kZapValue)));
267 li(address, Operand(BitCast<int32_t>(kZapValue)));
268 li(scratch, Operand(BitCast<int32_t>(kZapValue)));
269 }
270 }
271
272
273 // -----------------------------------------------------------------------------
274 // Allocation support
275
276
277 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
278 Register scratch,
279 Label* miss) {
280 Label same_contexts;
281
282 ASSERT(!holder_reg.is(scratch));
283 ASSERT(!holder_reg.is(at));
284 ASSERT(!scratch.is(at));
285
286 // Load current lexical context from the stack frame.
287 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
288 // In debug mode, make sure the lexical context is set.
289 #ifdef DEBUG
290 Check(ne, "we should not have an empty lexical context",
291 scratch, Operand(zero_reg));
292 #endif
293
294 // Load the global context of the current context.
295 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
296 lw(scratch, FieldMemOperand(scratch, offset));
297 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
298
299 // Check the context is a global context.
300 if (FLAG_debug_code) {
301 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
302 Push(holder_reg); // Temporarily save holder on the stack.
303 // Read the first word and compare to the global_context_map.
304 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
305 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
306 Check(eq, "JSGlobalObject::global_context should be a global context.",
307 holder_reg, Operand(at));
308 Pop(holder_reg); // Restore holder.
309 }
310
311 // Check if both contexts are the same.
312 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
313 Branch(&same_contexts, eq, scratch, Operand(at));
314
315 // Check the context is a global context.
316 if (FLAG_debug_code) {
317 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
318 Push(holder_reg); // Temporarily save holder on the stack.
319 mov(holder_reg, at); // Move at to its holding place.
320 LoadRoot(at, Heap::kNullValueRootIndex);
321 Check(ne, "JSGlobalProxy::context() should not be null.",
322 holder_reg, Operand(at));
323
324 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
325 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
326 Check(eq, "JSGlobalObject::global_context should be a global context.",
327 holder_reg, Operand(at));
328 // Restore at is not needed. at is reloaded below.
329 Pop(holder_reg); // Restore holder.
330 // Restore at to holder's context.
331 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
332 }
333
334 // Check that the security token in the calling global object is
335 // compatible with the security token in the receiving global
336 // object.
337 int token_offset = Context::kHeaderSize +
338 Context::SECURITY_TOKEN_INDEX * kPointerSize;
339
340 lw(scratch, FieldMemOperand(scratch, token_offset));
341 lw(at, FieldMemOperand(at, token_offset));
342 Branch(miss, ne, scratch, Operand(at));
343
344 bind(&same_contexts);
126 } 345 }
127 346
128 347
129 // --------------------------------------------------------------------------- 348 // ---------------------------------------------------------------------------
130 // Instruction macros 349 // Instruction macros
131 350
132 void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
133 if (rt.is_reg()) {
134 add(rd, rs, rt.rm());
135 } else {
136 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
137 addi(rd, rs, rt.imm32_);
138 } else {
139 // li handles the relocation.
140 ASSERT(!rs.is(at));
141 li(at, rt);
142 add(rd, rs, at);
143 }
144 }
145 }
146
147
148 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { 351 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
149 if (rt.is_reg()) { 352 if (rt.is_reg()) {
150 addu(rd, rs, rt.rm()); 353 addu(rd, rs, rt.rm());
151 } else { 354 } else {
152 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 355 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
153 addiu(rd, rs, rt.imm32_); 356 addiu(rd, rs, rt.imm32_);
154 } else { 357 } else {
155 // li handles the relocation. 358 // li handles the relocation.
156 ASSERT(!rs.is(at)); 359 ASSERT(!rs.is(at));
157 li(at, rt); 360 li(at, rt);
158 addu(rd, rs, at); 361 addu(rd, rs, at);
159 } 362 }
160 } 363 }
161 } 364 }
365
366
367 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
368 if (rt.is_reg()) {
369 subu(rd, rs, rt.rm());
370 } else {
371 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
372 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
373 } else {
374 // li handles the relocation.
375 ASSERT(!rs.is(at));
376 li(at, rt);
377 subu(rd, rs, at);
378 }
379 }
380 }
162 381
163 382
164 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { 383 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
165 if (rt.is_reg()) { 384 if (rt.is_reg()) {
166 mul(rd, rs, rt.rm()); 385 mul(rd, rs, rt.rm());
167 } else { 386 } else {
168 // li handles the relocation. 387 // li handles the relocation.
169 ASSERT(!rs.is(at)); 388 ASSERT(!rs.is(at));
170 li(at, rt); 389 li(at, rt);
171 mul(rd, rs, at); 390 mul(rd, rs, at);
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
218 li(at, rt); 437 li(at, rt);
219 divu(rs, at); 438 divu(rs, at);
220 } 439 }
221 } 440 }
222 441
223 442
224 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { 443 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
225 if (rt.is_reg()) { 444 if (rt.is_reg()) {
226 and_(rd, rs, rt.rm()); 445 and_(rd, rs, rt.rm());
227 } else { 446 } else {
228 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 447 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
229 andi(rd, rs, rt.imm32_); 448 andi(rd, rs, rt.imm32_);
230 } else { 449 } else {
231 // li handles the relocation. 450 // li handles the relocation.
232 ASSERT(!rs.is(at)); 451 ASSERT(!rs.is(at));
233 li(at, rt); 452 li(at, rt);
234 and_(rd, rs, at); 453 and_(rd, rs, at);
235 } 454 }
236 } 455 }
237 } 456 }
238 457
239 458
240 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { 459 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
241 if (rt.is_reg()) { 460 if (rt.is_reg()) {
242 or_(rd, rs, rt.rm()); 461 or_(rd, rs, rt.rm());
243 } else { 462 } else {
244 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 463 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
245 ori(rd, rs, rt.imm32_); 464 ori(rd, rs, rt.imm32_);
246 } else { 465 } else {
247 // li handles the relocation. 466 // li handles the relocation.
248 ASSERT(!rs.is(at)); 467 ASSERT(!rs.is(at));
249 li(at, rt); 468 li(at, rt);
250 or_(rd, rs, at); 469 or_(rd, rs, at);
251 } 470 }
252 } 471 }
253 } 472 }
254 473
255 474
256 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { 475 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
257 if (rt.is_reg()) { 476 if (rt.is_reg()) {
258 xor_(rd, rs, rt.rm()); 477 xor_(rd, rs, rt.rm());
259 } else { 478 } else {
260 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 479 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
261 xori(rd, rs, rt.imm32_); 480 xori(rd, rs, rt.imm32_);
262 } else { 481 } else {
263 // li handles the relocation. 482 // li handles the relocation.
264 ASSERT(!rs.is(at)); 483 ASSERT(!rs.is(at));
265 li(at, rt); 484 li(at, rt);
266 xor_(rd, rs, at); 485 xor_(rd, rs, at);
267 } 486 }
268 } 487 }
269 } 488 }
270 489
271 490
272 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { 491 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
273 if (rt.is_reg()) { 492 if (rt.is_reg()) {
274 nor(rd, rs, rt.rm()); 493 nor(rd, rs, rt.rm());
275 } else { 494 } else {
276 // li handles the relocation. 495 // li handles the relocation.
277 ASSERT(!rs.is(at)); 496 ASSERT(!rs.is(at));
278 li(at, rt); 497 li(at, rt);
279 nor(rd, rs, at); 498 nor(rd, rs, at);
280 } 499 }
281 } 500 }
282 501
283 502
284 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { 503 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
285 if (rt.is_reg()) { 504 if (rt.is_reg()) {
286 slt(rd, rs, rt.rm()); 505 slt(rd, rs, rt.rm());
287 } else { 506 } else {
288 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 507 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
289 slti(rd, rs, rt.imm32_); 508 slti(rd, rs, rt.imm32_);
290 } else { 509 } else {
291 // li handles the relocation. 510 // li handles the relocation.
292 ASSERT(!rs.is(at)); 511 ASSERT(!rs.is(at));
293 li(at, rt); 512 li(at, rt);
294 slt(rd, rs, at); 513 slt(rd, rs, at);
295 } 514 }
296 } 515 }
297 } 516 }
298 517
299 518
300 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { 519 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
301 if (rt.is_reg()) { 520 if (rt.is_reg()) {
302 sltu(rd, rs, rt.rm()); 521 sltu(rd, rs, rt.rm());
303 } else { 522 } else {
304 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 523 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
305 sltiu(rd, rs, rt.imm32_); 524 sltiu(rd, rs, rt.imm32_);
306 } else { 525 } else {
307 // li handles the relocation. 526 // li handles the relocation.
308 ASSERT(!rs.is(at)); 527 ASSERT(!rs.is(at));
309 li(at, rt); 528 li(at, rt);
310 sltu(rd, rs, at); 529 sltu(rd, rs, at);
311 } 530 }
312 } 531 }
313 } 532 }
314 533
315 534
535 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
536 if (mips32r2) {
537 if (rt.is_reg()) {
538 rotrv(rd, rs, rt.rm());
539 } else {
540 rotr(rd, rs, rt.imm32_);
541 }
542 } else {
543 if (rt.is_reg()) {
544 subu(at, zero_reg, rt.rm());
545 sllv(at, rs, at);
546 srlv(rd, rs, rt.rm());
547 or_(rd, rd, at);
548 } else {
549 if (rt.imm32_ == 0) {
550 srl(rd, rs, 0);
551 } else {
552 srl(at, rs, rt.imm32_);
553 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
554 or_(rd, rd, at);
555 }
556 }
557 }
558 }
559
560
316 //------------Pseudo-instructions------------- 561 //------------Pseudo-instructions-------------
317 562
318 void MacroAssembler::movn(Register rd, Register rt) {
319 addiu(at, zero_reg, -1); // Fill at with ones.
320 xor_(rd, rt, at);
321 }
322
323
324 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { 563 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
325 ASSERT(!j.is_reg()); 564 ASSERT(!j.is_reg());
326 565 BlockTrampolinePoolScope block_trampoline_pool(this);
327 if (!MustUseAt(j.rmode_) && !gen2instr) { 566 if (!MustUseReg(j.rmode_) && !gen2instr) {
328 // Normal load of an immediate value which does not need Relocation Info. 567 // Normal load of an immediate value which does not need Relocation Info.
329 if (is_int16(j.imm32_)) { 568 if (is_int16(j.imm32_)) {
330 addiu(rd, zero_reg, j.imm32_); 569 addiu(rd, zero_reg, j.imm32_);
331 } else if (!(j.imm32_ & HIMask)) { 570 } else if (!(j.imm32_ & kHiMask)) {
332 ori(rd, zero_reg, j.imm32_); 571 ori(rd, zero_reg, j.imm32_);
333 } else if (!(j.imm32_ & LOMask)) { 572 } else if (!(j.imm32_ & kImm16Mask)) {
334 lui(rd, (HIMask & j.imm32_) >> 16); 573 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
335 } else { 574 } else {
336 lui(rd, (HIMask & j.imm32_) >> 16); 575 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
337 ori(rd, rd, (LOMask & j.imm32_)); 576 ori(rd, rd, (j.imm32_ & kImm16Mask));
338 } 577 }
339 } else if (MustUseAt(j.rmode_) || gen2instr) { 578 } else if (MustUseReg(j.rmode_) || gen2instr) {
340 if (MustUseAt(j.rmode_)) { 579 if (MustUseReg(j.rmode_)) {
341 RecordRelocInfo(j.rmode_, j.imm32_); 580 RecordRelocInfo(j.rmode_, j.imm32_);
342 } 581 }
343 // We need always the same number of instructions as we may need to patch 582 // We need always the same number of instructions as we may need to patch
344 // this code to load another value which may need 2 instructions to load. 583 // this code to load another value which may need 2 instructions to load.
345 if (is_int16(j.imm32_)) { 584 if (is_int16(j.imm32_)) {
346 nop(); 585 nop();
347 addiu(rd, zero_reg, j.imm32_); 586 addiu(rd, zero_reg, j.imm32_);
348 } else if (!(j.imm32_ & HIMask)) { 587 } else if (!(j.imm32_ & kHiMask)) {
349 nop(); 588 nop();
350 ori(rd, zero_reg, j.imm32_); 589 ori(rd, zero_reg, j.imm32_);
351 } else if (!(j.imm32_ & LOMask)) { 590 } else if (!(j.imm32_ & kImm16Mask)) {
352 nop(); 591 nop();
353 lui(rd, (HIMask & j.imm32_) >> 16); 592 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
354 } else { 593 } else {
355 lui(rd, (HIMask & j.imm32_) >> 16); 594 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
356 ori(rd, rd, (LOMask & j.imm32_)); 595 ori(rd, rd, (j.imm32_ & kImm16Mask));
357 } 596 }
358 } 597 }
359 } 598 }
360 599
361 600
362 // Exception-generating instructions and debugging support 601 // Exception-generating instructions and debugging support
363 void MacroAssembler::stop(const char* msg) { 602 void MacroAssembler::stop(const char* msg) {
364 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it. 603 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
365 // We use the 0x54321 value to be able to find it easily when reading memory. 604 // We use the 0x54321 value to be able to find it easily when reading memory.
366 break_(0x54321); 605 break_(0x54321);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
410 649
411 for (int16_t i = kNumRegisters; i > 0; i--) { 650 for (int16_t i = kNumRegisters; i > 0; i--) {
412 if ((regs & (1 << i)) != 0) { 651 if ((regs & (1 << i)) != 0) {
413 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); 652 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
414 } 653 }
415 } 654 }
416 addiu(sp, sp, 4 * NumSaved); 655 addiu(sp, sp, 4 * NumSaved);
417 } 656 }
418 657
419 658
659 void MacroAssembler::Ext(Register rt,
660 Register rs,
661 uint16_t pos,
662 uint16_t size) {
663 ASSERT(pos < 32);
664 ASSERT(pos + size < 32);
665
666 if (mips32r2) {
667 ext_(rt, rs, pos, size);
668 } else {
669 // Move rs to rt and shift it left then right to get the
670 // desired bitfield on the right side and zeroes on the left.
671 sll(rt, rs, 32 - (pos + size));
672 srl(rt, rt, 32 - size);
673 }
674 }
675
676
677 void MacroAssembler::Ins(Register rt,
678 Register rs,
679 uint16_t pos,
680 uint16_t size) {
681 ASSERT(pos < 32);
682 ASSERT(pos + size < 32);
683
684 if (mips32r2) {
685 ins_(rt, rs, pos, size);
686 } else {
687 ASSERT(!rt.is(t8) && !rs.is(t8));
688
689 srl(t8, rt, pos + size);
690 // The left chunk from rt that needs to
691 // be saved is on the right side of t8.
692 sll(at, t8, pos + size);
693 // The 'at' register now contains the left chunk on
694 // the left (proper position) and zeroes.
695 sll(t8, rt, 32 - pos);
696 // t8 now contains the right chunk on the left and zeroes.
697 srl(t8, t8, 32 - pos);
698 // t8 now contains the right chunk on
699 // the right (proper position) and zeroes.
700 or_(rt, at, t8);
701 // rt now contains the left and right chunks from the original rt
702 // in their proper position and zeroes in the middle.
703 sll(t8, rs, 32 - size);
704 // t8 now contains the chunk from rs on the left and zeroes.
705 srl(t8, t8, 32 - size - pos);
706 // t8 now contains the original chunk from rs in
707 // the middle (proper position).
708 or_(rt, rt, t8);
709 // rt now contains the result of the ins instruction in R2 mode.
710 }
711 }
712
713
714 void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
715 // Move the data from fs to t4.
716 mfc1(t4, fs);
717 return Cvt_d_uw(fd, t4);
718 }
719
720
721 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
722 // Convert rs to a FP value in fd (and fd + 1).
723 // We do this by converting rs minus the MSB to avoid sign conversion,
724 // then adding 2^31-1 and 1 to the result.
725
726 ASSERT(!fd.is(f20));
727 ASSERT(!rs.is(t9));
728 ASSERT(!rs.is(t8));
729
730 // Save rs's MSB to t8
731 And(t8, rs, 0x80000000);
732 // Remove rs's MSB.
733 And(t9, rs, 0x7FFFFFFF);
734 // Move t9 to fd
735 mtc1(t9, fd);
736
737 // Convert fd to a real FP value.
738 cvt_d_w(fd, fd);
739
740 Label conversion_done;
741
742 // If rs's MSB was 0, it's done.
743 // Otherwise we need to add that to the FP register.
744 Branch(&conversion_done, eq, t8, Operand(zero_reg));
745
746 // First load 2^31 - 1 into f20.
747 Or(t9, zero_reg, 0x7FFFFFFF);
748 mtc1(t9, f20);
749
750 // Convert it to FP and add it to fd.
751 cvt_d_w(f20, f20);
752 add_d(fd, fd, f20);
753 // Now add 1.
754 Or(t9, zero_reg, 1);
755 mtc1(t9, f20);
756
757 cvt_d_w(f20, f20);
758 add_d(fd, fd, f20);
759 bind(&conversion_done);
760 }
761
762
763 void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
764 Trunc_uw_d(fs, t4);
765 mtc1(t4, fd);
766 }
767
768
769 void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
770 ASSERT(!fd.is(f22));
771 ASSERT(!rs.is(t6));
772
773 // Load 2^31 into f22.
774 Or(t6, zero_reg, 0x80000000);
775 Cvt_d_uw(f22, t6);
776
777 // Test if f22 > fd.
778 c(OLT, D, fd, f22);
779
780 Label simple_convert;
781 // If fd < 2^31 we can convert it normally.
782 bc1t(&simple_convert);
783
784 // First we subtract 2^31 from fd, then trunc it to rs
785 // and add 2^31 to rs.
786
787 sub_d(f22, fd, f22);
788 trunc_w_d(f22, f22);
789 mfc1(rs, f22);
790 or_(rs, rs, t6);
791
792 Label done;
793 Branch(&done);
794 // Simple conversion.
795 bind(&simple_convert);
796 trunc_w_d(f22, fd);
797 mfc1(rs, f22);
798
799 bind(&done);
800 }
801
802
803 // Tries to get a signed int32 out of a double precision floating point heap
804 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
805 // 32bits signed integer range.
806 // This method implementation differs from the ARM version for performance
807 // reasons.
808 void MacroAssembler::ConvertToInt32(Register source,
809 Register dest,
810 Register scratch,
811 Register scratch2,
812 FPURegister double_scratch,
813 Label *not_int32) {
814 Label right_exponent, done;
815 // Get exponent word (ENDIAN issues).
816 lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
817 // Get exponent alone in scratch2.
818 And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
819 // Load dest with zero. We use this either for the final shift or
820 // for the answer.
821 mov(dest, zero_reg);
822 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
823 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
824 // the exponent that we are fastest at and also the highest exponent we can
825 // handle here.
826 const uint32_t non_smi_exponent =
827 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
828 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
829 Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
830 // If the exponent is higher than that then go to not_int32 case. This
831 // catches numbers that don't fit in a signed int32, infinities and NaNs.
832 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
833
834 // We know the exponent is smaller than 30 (biased). If it is less than
835 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
836 // it rounds to zero.
837 const uint32_t zero_exponent =
838 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
839 Subu(scratch2, scratch2, Operand(zero_exponent));
840 // Dest already has a Smi zero.
841 Branch(&done, lt, scratch2, Operand(zero_reg));
842 if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
843 // We have a shifted exponent between 0 and 30 in scratch2.
844 srl(dest, scratch2, HeapNumber::kExponentShift);
845 // We now have the exponent in dest. Subtract from 30 to get
846 // how much to shift down.
847 li(at, Operand(30));
848 subu(dest, at, dest);
849 }
850 bind(&right_exponent);
851 if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
852 CpuFeatures::Scope scope(FPU);
853 // MIPS FPU instructions implementing double precision to integer
854 // conversion using round to zero. Since the FP value was qualified
855 // above, the resulting integer should be a legal int32.
856 // The original 'Exponent' word is still in scratch.
857 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
858 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
859 trunc_w_d(double_scratch, double_scratch);
860 mfc1(dest, double_scratch);
861 } else {
862 // On entry, dest has final downshift, scratch has original sign/exp/mant.
863 // Save sign bit in top bit of dest.
864 And(scratch2, scratch, Operand(0x80000000));
865 Or(dest, dest, Operand(scratch2));
866 // Put back the implicit 1, just above mantissa field.
867 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
868
869 // Shift up the mantissa bits to take up the space the exponent used to
870 // take. We just orred in the implicit bit so that took care of one and
871 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
872 // distance. But we want to clear the sign-bit so shift one more bit
873 // left, then shift right one bit.
874 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
875 sll(scratch, scratch, shift_distance + 1);
876 srl(scratch, scratch, 1);
877
878 // Get the second half of the double. For some exponents we don't
879 // actually need this because the bits get shifted out again, but
880 // it's probably slower to test than just to do it.
881 lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
882 // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
883 // The width of the field here is the same as the shift amount above.
884 const int field_width = shift_distance;
885 Ext(scratch2, scratch2, 32-shift_distance, field_width);
886 Ins(scratch, scratch2, 0, field_width);
887 // Move down according to the exponent.
888 srlv(scratch, scratch, dest);
889 // Prepare the negative version of our integer.
890 subu(scratch2, zero_reg, scratch);
891 // Trick to check sign bit (msb) held in dest, count leading zero.
892 // 0 indicates negative, save negative version with conditional move.
893 clz(dest, dest);
894 movz(scratch, scratch2, dest);
895 mov(dest, scratch);
896 }
897 bind(&done);
898 }
899
900
420 // Emulated condtional branches do not emit a nop in the branch delay slot. 901 // Emulated condtional branches do not emit a nop in the branch delay slot.
421 902 //
422 // Trashes the at register if no scratch register is provided. 903 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
423 void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs, 904 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
424 const Operand& rt, Register scratch) { 905 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
906 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
907
908
909 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
910 b(offset);
911
912 // Emit a nop in the branch delay slot if required.
913 if (bdslot == PROTECT)
914 nop();
915 }
916
917
918 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
919 const Operand& rt,
920 BranchDelaySlot bdslot) {
921 BRANCH_ARGS_CHECK(cond, rs, rt);
922 ASSERT(!rs.is(zero_reg));
425 Register r2 = no_reg; 923 Register r2 = no_reg;
924 Register scratch = at;
925
426 if (rt.is_reg()) { 926 if (rt.is_reg()) {
427 // We don't want any other register but scratch clobbered. 927 // We don't want any other register but scratch clobbered.
428 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_)); 928 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
429 r2 = rt.rm_; 929 r2 = rt.rm_;
430 } else if (cond != cc_always) { 930 switch (cond) {
431 // We don't want any other register but scratch clobbered. 931 case cc_always:
432 ASSERT(!scratch.is(rs)); 932 b(offset);
433 r2 = scratch; 933 break;
434 li(r2, rt); 934 case eq:
935 beq(rs, r2, offset);
936 break;
937 case ne:
938 bne(rs, r2, offset);
939 break;
940 // Signed comparison
941 case greater:
942 if (r2.is(zero_reg)) {
943 bgtz(rs, offset);
944 } else {
945 slt(scratch, r2, rs);
946 bne(scratch, zero_reg, offset);
947 }
948 break;
949 case greater_equal:
950 if (r2.is(zero_reg)) {
951 bgez(rs, offset);
952 } else {
953 slt(scratch, rs, r2);
954 beq(scratch, zero_reg, offset);
955 }
956 break;
957 case less:
958 if (r2.is(zero_reg)) {
959 bltz(rs, offset);
960 } else {
961 slt(scratch, rs, r2);
962 bne(scratch, zero_reg, offset);
963 }
964 break;
965 case less_equal:
966 if (r2.is(zero_reg)) {
967 blez(rs, offset);
968 } else {
969 slt(scratch, r2, rs);
970 beq(scratch, zero_reg, offset);
971 }
972 break;
973 // Unsigned comparison.
974 case Ugreater:
975 if (r2.is(zero_reg)) {
976 bgtz(rs, offset);
977 } else {
978 sltu(scratch, r2, rs);
979 bne(scratch, zero_reg, offset);
980 }
981 break;
982 case Ugreater_equal:
983 if (r2.is(zero_reg)) {
984 bgez(rs, offset);
985 } else {
986 sltu(scratch, rs, r2);
987 beq(scratch, zero_reg, offset);
988 }
989 break;
990 case Uless:
991 if (r2.is(zero_reg)) {
992 b(offset);
993 } else {
994 sltu(scratch, rs, r2);
995 bne(scratch, zero_reg, offset);
996 }
997 break;
998 case Uless_equal:
999 if (r2.is(zero_reg)) {
1000 b(offset);
1001 } else {
1002 sltu(scratch, r2, rs);
1003 beq(scratch, zero_reg, offset);
1004 }
1005 break;
1006 default:
1007 UNREACHABLE();
1008 }
1009 } else {
1010 // Be careful to always use shifted_branch_offset only just before the
1011 // branch instruction, as the location will be remember for patching the
1012 // target.
1013 switch (cond) {
1014 case cc_always:
1015 b(offset);
1016 break;
1017 case eq:
1018 // We don't want any other register but scratch clobbered.
1019 ASSERT(!scratch.is(rs));
1020 r2 = scratch;
1021 li(r2, rt);
1022 beq(rs, r2, offset);
1023 break;
1024 case ne:
1025 // We don't want any other register but scratch clobbered.
1026 ASSERT(!scratch.is(rs));
1027 r2 = scratch;
1028 li(r2, rt);
1029 bne(rs, r2, offset);
1030 break;
1031 // Signed comparison
1032 case greater:
1033 if (rt.imm32_ == 0) {
1034 bgtz(rs, offset);
1035 } else {
1036 r2 = scratch;
1037 li(r2, rt);
1038 slt(scratch, r2, rs);
1039 bne(scratch, zero_reg, offset);
1040 }
1041 break;
1042 case greater_equal:
1043 if (rt.imm32_ == 0) {
1044 bgez(rs, offset);
1045 } else if (is_int16(rt.imm32_)) {
1046 slti(scratch, rs, rt.imm32_);
1047 beq(scratch, zero_reg, offset);
1048 } else {
1049 r2 = scratch;
1050 li(r2, rt);
1051 sltu(scratch, rs, r2);
1052 beq(scratch, zero_reg, offset);
1053 }
1054 break;
1055 case less:
1056 if (rt.imm32_ == 0) {
1057 bltz(rs, offset);
1058 } else if (is_int16(rt.imm32_)) {
1059 slti(scratch, rs, rt.imm32_);
1060 bne(scratch, zero_reg, offset);
1061 } else {
1062 r2 = scratch;
1063 li(r2, rt);
1064 slt(scratch, rs, r2);
1065 bne(scratch, zero_reg, offset);
1066 }
1067 break;
1068 case less_equal:
1069 if (rt.imm32_ == 0) {
1070 blez(rs, offset);
1071 } else {
1072 r2 = scratch;
1073 li(r2, rt);
1074 slt(scratch, r2, rs);
1075 beq(scratch, zero_reg, offset);
1076 }
1077 break;
1078 // Unsigned comparison.
1079 case Ugreater:
1080 if (rt.imm32_ == 0) {
1081 bgtz(rs, offset);
1082 } else {
1083 r2 = scratch;
1084 li(r2, rt);
1085 sltu(scratch, r2, rs);
1086 bne(scratch, zero_reg, offset);
1087 }
1088 break;
1089 case Ugreater_equal:
1090 if (rt.imm32_ == 0) {
1091 bgez(rs, offset);
1092 } else if (is_int16(rt.imm32_)) {
1093 sltiu(scratch, rs, rt.imm32_);
1094 beq(scratch, zero_reg, offset);
1095 } else {
1096 r2 = scratch;
1097 li(r2, rt);
1098 sltu(scratch, rs, r2);
1099 beq(scratch, zero_reg, offset);
1100 }
1101 break;
1102 case Uless:
1103 if (rt.imm32_ == 0) {
1104 b(offset);
1105 } else if (is_int16(rt.imm32_)) {
1106 sltiu(scratch, rs, rt.imm32_);
1107 bne(scratch, zero_reg, offset);
1108 } else {
1109 r2 = scratch;
1110 li(r2, rt);
1111 sltu(scratch, rs, r2);
1112 bne(scratch, zero_reg, offset);
1113 }
1114 break;
1115 case Uless_equal:
1116 if (rt.imm32_ == 0) {
1117 b(offset);
1118 } else {
1119 r2 = scratch;
1120 li(r2, rt);
1121 sltu(scratch, r2, rs);
1122 beq(scratch, zero_reg, offset);
1123 }
1124 break;
1125 default:
1126 UNREACHABLE();
1127 }
435 } 1128 }
436 1129 // Emit a nop in the branch delay slot if required.
437 switch (cond) { 1130 if (bdslot == PROTECT)
438 case cc_always: 1131 nop();
439 b(offset); 1132 }
440 break; 1133
441 case eq: 1134
442 beq(rs, r2, offset); 1135 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
443 break; 1136 // We use branch_offset as an argument for the branch instructions to be sure
444 case ne: 1137 // it is called just before generating the branch instruction, as needed.
445 bne(rs, r2, offset); 1138
446 break; 1139 b(shifted_branch_offset(L, false));
447 1140
1141 // Emit a nop in the branch delay slot if required.
1142 if (bdslot == PROTECT)
1143 nop();
1144 }
1145
1146
1147 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1148 const Operand& rt,
1149 BranchDelaySlot bdslot) {
1150 BRANCH_ARGS_CHECK(cond, rs, rt);
1151
1152 int32_t offset;
1153 Register r2 = no_reg;
1154 Register scratch = at;
1155 if (rt.is_reg()) {
1156 r2 = rt.rm_;
1157 // Be careful to always use shifted_branch_offset only just before the
1158 // branch instruction, as the location will be remember for patching the
1159 // target.
1160 switch (cond) {
1161 case cc_always:
1162 offset = shifted_branch_offset(L, false);
1163 b(offset);
1164 break;
1165 case eq:
1166 offset = shifted_branch_offset(L, false);
1167 beq(rs, r2, offset);
1168 break;
1169 case ne:
1170 offset = shifted_branch_offset(L, false);
1171 bne(rs, r2, offset);
1172 break;
448 // Signed comparison 1173 // Signed comparison
449 case greater: 1174 case greater:
450 slt(scratch, r2, rs); 1175 if (r2.is(zero_reg)) {
451 bne(scratch, zero_reg, offset); 1176 offset = shifted_branch_offset(L, false);
452 break; 1177 bgtz(rs, offset);
453 case greater_equal: 1178 } else {
454 slt(scratch, rs, r2); 1179 slt(scratch, r2, rs);
455 beq(scratch, zero_reg, offset); 1180 offset = shifted_branch_offset(L, false);
456 break; 1181 bne(scratch, zero_reg, offset);
457 case less: 1182 }
458 slt(scratch, rs, r2); 1183 break;
459 bne(scratch, zero_reg, offset); 1184 case greater_equal:
460 break; 1185 if (r2.is(zero_reg)) {
461 case less_equal: 1186 offset = shifted_branch_offset(L, false);
462 slt(scratch, r2, rs); 1187 bgez(rs, offset);
463 beq(scratch, zero_reg, offset); 1188 } else {
464 break; 1189 slt(scratch, rs, r2);
465 1190 offset = shifted_branch_offset(L, false);
1191 beq(scratch, zero_reg, offset);
1192 }
1193 break;
1194 case less:
1195 if (r2.is(zero_reg)) {
1196 offset = shifted_branch_offset(L, false);
1197 bltz(rs, offset);
1198 } else {
1199 slt(scratch, rs, r2);
1200 offset = shifted_branch_offset(L, false);
1201 bne(scratch, zero_reg, offset);
1202 }
1203 break;
1204 case less_equal:
1205 if (r2.is(zero_reg)) {
1206 offset = shifted_branch_offset(L, false);
1207 blez(rs, offset);
1208 } else {
1209 slt(scratch, r2, rs);
1210 offset = shifted_branch_offset(L, false);
1211 beq(scratch, zero_reg, offset);
1212 }
1213 break;
466 // Unsigned comparison. 1214 // Unsigned comparison.
467 case Ugreater: 1215 case Ugreater:
468 sltu(scratch, r2, rs); 1216 if (r2.is(zero_reg)) {
469 bne(scratch, zero_reg, offset); 1217 offset = shifted_branch_offset(L, false);
470 break; 1218 bgtz(rs, offset);
471 case Ugreater_equal: 1219 } else {
472 sltu(scratch, rs, r2); 1220 sltu(scratch, r2, rs);
473 beq(scratch, zero_reg, offset); 1221 offset = shifted_branch_offset(L, false);
474 break; 1222 bne(scratch, zero_reg, offset);
475 case Uless: 1223 }
476 sltu(scratch, rs, r2); 1224 break;
477 bne(scratch, zero_reg, offset); 1225 case Ugreater_equal:
478 break; 1226 if (r2.is(zero_reg)) {
479 case Uless_equal: 1227 offset = shifted_branch_offset(L, false);
480 sltu(scratch, r2, rs); 1228 bgez(rs, offset);
481 beq(scratch, zero_reg, offset); 1229 } else {
482 break; 1230 sltu(scratch, rs, r2);
483 1231 offset = shifted_branch_offset(L, false);
484 default: 1232 beq(scratch, zero_reg, offset);
485 UNREACHABLE(); 1233 }
1234 break;
1235 case Uless:
1236 if (r2.is(zero_reg)) {
1237 offset = shifted_branch_offset(L, false);
1238 b(offset);
1239 } else {
1240 sltu(scratch, rs, r2);
1241 offset = shifted_branch_offset(L, false);
1242 bne(scratch, zero_reg, offset);
1243 }
1244 break;
1245 case Uless_equal:
1246 if (r2.is(zero_reg)) {
1247 offset = shifted_branch_offset(L, false);
1248 b(offset);
1249 } else {
1250 sltu(scratch, r2, rs);
1251 offset = shifted_branch_offset(L, false);
1252 beq(scratch, zero_reg, offset);
1253 }
1254 break;
1255 default:
1256 UNREACHABLE();
1257 }
1258 } else {
1259 // Be careful to always use shifted_branch_offset only just before the
1260 // branch instruction, as the location will be remember for patching the
1261 // target.
1262 switch (cond) {
1263 case cc_always:
1264 offset = shifted_branch_offset(L, false);
1265 b(offset);
1266 break;
1267 case eq:
1268 r2 = scratch;
1269 li(r2, rt);
1270 offset = shifted_branch_offset(L, false);
1271 beq(rs, r2, offset);
1272 break;
1273 case ne:
1274 r2 = scratch;
1275 li(r2, rt);
1276 offset = shifted_branch_offset(L, false);
1277 bne(rs, r2, offset);
1278 break;
1279 // Signed comparison
1280 case greater:
1281 if (rt.imm32_ == 0) {
1282 offset = shifted_branch_offset(L, false);
1283 bgtz(rs, offset);
1284 } else {
1285 r2 = scratch;
1286 li(r2, rt);
1287 slt(scratch, r2, rs);
1288 offset = shifted_branch_offset(L, false);
1289 bne(scratch, zero_reg, offset);
1290 }
1291 break;
1292 case greater_equal:
1293 if (rt.imm32_ == 0) {
1294 offset = shifted_branch_offset(L, false);
1295 bgez(rs, offset);
1296 } else if (is_int16(rt.imm32_)) {
1297 slti(scratch, rs, rt.imm32_);
1298 offset = shifted_branch_offset(L, false);
1299 beq(scratch, zero_reg, offset);
1300 } else {
1301 r2 = scratch;
1302 li(r2, rt);
1303 sltu(scratch, rs, r2);
1304 offset = shifted_branch_offset(L, false);
1305 beq(scratch, zero_reg, offset);
1306 }
1307 break;
1308 case less:
1309 if (rt.imm32_ == 0) {
1310 offset = shifted_branch_offset(L, false);
1311 bltz(rs, offset);
1312 } else if (is_int16(rt.imm32_)) {
1313 slti(scratch, rs, rt.imm32_);
1314 offset = shifted_branch_offset(L, false);
1315 bne(scratch, zero_reg, offset);
1316 } else {
1317 r2 = scratch;
1318 li(r2, rt);
1319 slt(scratch, rs, r2);
1320 offset = shifted_branch_offset(L, false);
1321 bne(scratch, zero_reg, offset);
1322 }
1323 break;
1324 case less_equal:
1325 if (rt.imm32_ == 0) {
1326 offset = shifted_branch_offset(L, false);
1327 blez(rs, offset);
1328 } else {
1329 r2 = scratch;
1330 li(r2, rt);
1331 slt(scratch, r2, rs);
1332 offset = shifted_branch_offset(L, false);
1333 beq(scratch, zero_reg, offset);
1334 }
1335 break;
1336 // Unsigned comparison.
1337 case Ugreater:
1338 if (rt.imm32_ == 0) {
1339 offset = shifted_branch_offset(L, false);
1340 bgtz(rs, offset);
1341 } else {
1342 r2 = scratch;
1343 li(r2, rt);
1344 sltu(scratch, r2, rs);
1345 offset = shifted_branch_offset(L, false);
1346 bne(scratch, zero_reg, offset);
1347 }
1348 break;
1349 case Ugreater_equal:
1350 if (rt.imm32_ == 0) {
1351 offset = shifted_branch_offset(L, false);
1352 bgez(rs, offset);
1353 } else if (is_int16(rt.imm32_)) {
1354 sltiu(scratch, rs, rt.imm32_);
1355 offset = shifted_branch_offset(L, false);
1356 beq(scratch, zero_reg, offset);
1357 } else {
1358 r2 = scratch;
1359 li(r2, rt);
1360 sltu(scratch, rs, r2);
1361 offset = shifted_branch_offset(L, false);
1362 beq(scratch, zero_reg, offset);
1363 }
1364 break;
1365 case Uless:
1366 if (rt.imm32_ == 0) {
1367 offset = shifted_branch_offset(L, false);
1368 b(offset);
1369 } else if (is_int16(rt.imm32_)) {
1370 sltiu(scratch, rs, rt.imm32_);
1371 offset = shifted_branch_offset(L, false);
1372 bne(scratch, zero_reg, offset);
1373 } else {
1374 r2 = scratch;
1375 li(r2, rt);
1376 sltu(scratch, rs, r2);
1377 offset = shifted_branch_offset(L, false);
1378 bne(scratch, zero_reg, offset);
1379 }
1380 break;
1381 case Uless_equal:
1382 if (rt.imm32_ == 0) {
1383 offset = shifted_branch_offset(L, false);
1384 b(offset);
1385 } else {
1386 r2 = scratch;
1387 li(r2, rt);
1388 sltu(scratch, r2, rs);
1389 offset = shifted_branch_offset(L, false);
1390 beq(scratch, zero_reg, offset);
1391 }
1392 break;
1393 default:
1394 UNREACHABLE();
1395 }
486 } 1396 }
487 // Emit a nop in the branch delay slot. 1397 // Check that offset could actually hold on an int16_t.
488 nop(); 1398 ASSERT(is_int16(offset));
489 } 1399 // Emit a nop in the branch delay slot if required.
490 1400 if (bdslot == PROTECT)
491 1401 nop();
492 void MacroAssembler::Branch(Condition cond, Label* L, Register rs, 1402 }
493 const Operand& rt, Register scratch) { 1403
1404
1405 // We need to use a bgezal or bltzal, but they can't be used directly with the
1406 // slt instructions. We could use sub or add instead but we would miss overflow
1407 // cases, so we keep slt and add an intermediate third instruction.
1408 void MacroAssembler::BranchAndLink(int16_t offset,
1409 BranchDelaySlot bdslot) {
1410 bal(offset);
1411
1412 // Emit a nop in the branch delay slot if required.
1413 if (bdslot == PROTECT)
1414 nop();
1415 }
1416
1417
1418 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1419 const Operand& rt,
1420 BranchDelaySlot bdslot) {
1421 BRANCH_ARGS_CHECK(cond, rs, rt);
494 Register r2 = no_reg; 1422 Register r2 = no_reg;
1423 Register scratch = at;
1424
495 if (rt.is_reg()) { 1425 if (rt.is_reg()) {
496 r2 = rt.rm_; 1426 r2 = rt.rm_;
497 } else if (cond != cc_always) { 1427 } else if (cond != cc_always) {
498 r2 = scratch;
499 li(r2, rt);
500 }
501
502 // We use branch_offset as an argument for the branch instructions to be sure
503 // it is called just before generating the branch instruction, as needed.
504
505 switch (cond) {
506 case cc_always:
507 b(shifted_branch_offset(L, false));
508 break;
509 case eq:
510 beq(rs, r2, shifted_branch_offset(L, false));
511 break;
512 case ne:
513 bne(rs, r2, shifted_branch_offset(L, false));
514 break;
515
516 // Signed comparison
517 case greater:
518 slt(scratch, r2, rs);
519 bne(scratch, zero_reg, shifted_branch_offset(L, false));
520 break;
521 case greater_equal:
522 slt(scratch, rs, r2);
523 beq(scratch, zero_reg, shifted_branch_offset(L, false));
524 break;
525 case less:
526 slt(scratch, rs, r2);
527 bne(scratch, zero_reg, shifted_branch_offset(L, false));
528 break;
529 case less_equal:
530 slt(scratch, r2, rs);
531 beq(scratch, zero_reg, shifted_branch_offset(L, false));
532 break;
533
534 // Unsigned comparison.
535 case Ugreater:
536 sltu(scratch, r2, rs);
537 bne(scratch, zero_reg, shifted_branch_offset(L, false));
538 break;
539 case Ugreater_equal:
540 sltu(scratch, rs, r2);
541 beq(scratch, zero_reg, shifted_branch_offset(L, false));
542 break;
543 case Uless:
544 sltu(scratch, rs, r2);
545 bne(scratch, zero_reg, shifted_branch_offset(L, false));
546 break;
547 case Uless_equal:
548 sltu(scratch, r2, rs);
549 beq(scratch, zero_reg, shifted_branch_offset(L, false));
550 break;
551
552 default:
553 UNREACHABLE();
554 }
555 // Emit a nop in the branch delay slot.
556 nop();
557 }
558
559
560 // Trashes the at register if no scratch register is provided.
561 // We need to use a bgezal or bltzal, but they can't be used directly with the
562 // slt instructions. We could use sub or add instead but we would miss overflow
563 // cases, so we keep slt and add an intermediate third instruction.
564 void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
565 const Operand& rt, Register scratch) {
566 Register r2 = no_reg;
567 if (rt.is_reg()) {
568 r2 = rt.rm_;
569 } else if (cond != cc_always) {
570 r2 = scratch; 1428 r2 = scratch;
571 li(r2, rt); 1429 li(r2, rt);
572 } 1430 }
573 1431
574 switch (cond) { 1432 switch (cond) {
575 case cc_always: 1433 case cc_always:
576 bal(offset); 1434 bal(offset);
577 break; 1435 break;
578 case eq: 1436 case eq:
579 bne(rs, r2, 2); 1437 bne(rs, r2, 2);
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
626 break; 1484 break;
627 case Uless_equal: 1485 case Uless_equal:
628 sltu(scratch, r2, rs); 1486 sltu(scratch, r2, rs);
629 addiu(scratch, scratch, -1); 1487 addiu(scratch, scratch, -1);
630 bltzal(scratch, offset); 1488 bltzal(scratch, offset);
631 break; 1489 break;
632 1490
633 default: 1491 default:
634 UNREACHABLE(); 1492 UNREACHABLE();
635 } 1493 }
636 // Emit a nop in the branch delay slot. 1494 // Emit a nop in the branch delay slot if required.
637 nop(); 1495 if (bdslot == PROTECT)
1496 nop();
638 } 1497 }
639 1498
640 1499
641 void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs, 1500 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
642 const Operand& rt, Register scratch) { 1501 bal(shifted_branch_offset(L, false));
1502
1503 // Emit a nop in the branch delay slot if required.
1504 if (bdslot == PROTECT)
1505 nop();
1506 }
1507
1508
1509 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1510 const Operand& rt,
1511 BranchDelaySlot bdslot) {
1512 BRANCH_ARGS_CHECK(cond, rs, rt);
1513
1514 int32_t offset;
643 Register r2 = no_reg; 1515 Register r2 = no_reg;
1516 Register scratch = at;
644 if (rt.is_reg()) { 1517 if (rt.is_reg()) {
645 r2 = rt.rm_; 1518 r2 = rt.rm_;
646 } else if (cond != cc_always) { 1519 } else if (cond != cc_always) {
647 r2 = scratch; 1520 r2 = scratch;
648 li(r2, rt); 1521 li(r2, rt);
649 } 1522 }
650 1523
651 switch (cond) { 1524 switch (cond) {
652 case cc_always: 1525 case cc_always:
653 bal(shifted_branch_offset(L, false)); 1526 offset = shifted_branch_offset(L, false);
1527 bal(offset);
654 break; 1528 break;
655 case eq: 1529 case eq:
656 bne(rs, r2, 2); 1530 bne(rs, r2, 2);
657 nop(); 1531 nop();
658 bal(shifted_branch_offset(L, false)); 1532 offset = shifted_branch_offset(L, false);
1533 bal(offset);
659 break; 1534 break;
660 case ne: 1535 case ne:
661 beq(rs, r2, 2); 1536 beq(rs, r2, 2);
662 nop(); 1537 nop();
663 bal(shifted_branch_offset(L, false)); 1538 offset = shifted_branch_offset(L, false);
1539 bal(offset);
664 break; 1540 break;
665 1541
666 // Signed comparison 1542 // Signed comparison
667 case greater: 1543 case greater:
668 slt(scratch, r2, rs); 1544 slt(scratch, r2, rs);
669 addiu(scratch, scratch, -1); 1545 addiu(scratch, scratch, -1);
670 bgezal(scratch, shifted_branch_offset(L, false)); 1546 offset = shifted_branch_offset(L, false);
1547 bgezal(scratch, offset);
671 break; 1548 break;
672 case greater_equal: 1549 case greater_equal:
673 slt(scratch, rs, r2); 1550 slt(scratch, rs, r2);
674 addiu(scratch, scratch, -1); 1551 addiu(scratch, scratch, -1);
675 bltzal(scratch, shifted_branch_offset(L, false)); 1552 offset = shifted_branch_offset(L, false);
1553 bltzal(scratch, offset);
676 break; 1554 break;
677 case less: 1555 case less:
678 slt(scratch, rs, r2); 1556 slt(scratch, rs, r2);
679 addiu(scratch, scratch, -1); 1557 addiu(scratch, scratch, -1);
680 bgezal(scratch, shifted_branch_offset(L, false)); 1558 offset = shifted_branch_offset(L, false);
1559 bgezal(scratch, offset);
681 break; 1560 break;
682 case less_equal: 1561 case less_equal:
683 slt(scratch, r2, rs); 1562 slt(scratch, r2, rs);
684 addiu(scratch, scratch, -1); 1563 addiu(scratch, scratch, -1);
685 bltzal(scratch, shifted_branch_offset(L, false)); 1564 offset = shifted_branch_offset(L, false);
1565 bltzal(scratch, offset);
686 break; 1566 break;
687 1567
688 // Unsigned comparison. 1568 // Unsigned comparison.
689 case Ugreater: 1569 case Ugreater:
690 sltu(scratch, r2, rs); 1570 sltu(scratch, r2, rs);
691 addiu(scratch, scratch, -1); 1571 addiu(scratch, scratch, -1);
692 bgezal(scratch, shifted_branch_offset(L, false)); 1572 offset = shifted_branch_offset(L, false);
1573 bgezal(scratch, offset);
693 break; 1574 break;
694 case Ugreater_equal: 1575 case Ugreater_equal:
695 sltu(scratch, rs, r2); 1576 sltu(scratch, rs, r2);
696 addiu(scratch, scratch, -1); 1577 addiu(scratch, scratch, -1);
697 bltzal(scratch, shifted_branch_offset(L, false)); 1578 offset = shifted_branch_offset(L, false);
1579 bltzal(scratch, offset);
698 break; 1580 break;
699 case Uless: 1581 case Uless:
700 sltu(scratch, rs, r2); 1582 sltu(scratch, rs, r2);
701 addiu(scratch, scratch, -1); 1583 addiu(scratch, scratch, -1);
702 bgezal(scratch, shifted_branch_offset(L, false)); 1584 offset = shifted_branch_offset(L, false);
1585 bgezal(scratch, offset);
703 break; 1586 break;
704 case Uless_equal: 1587 case Uless_equal:
705 sltu(scratch, r2, rs); 1588 sltu(scratch, r2, rs);
706 addiu(scratch, scratch, -1); 1589 addiu(scratch, scratch, -1);
707 bltzal(scratch, shifted_branch_offset(L, false)); 1590 offset = shifted_branch_offset(L, false);
1591 bltzal(scratch, offset);
708 break; 1592 break;
709 1593
710 default: 1594 default:
711 UNREACHABLE(); 1595 UNREACHABLE();
712 } 1596 }
713 // Emit a nop in the branch delay slot. 1597
714 nop(); 1598 // Check that offset could actually hold on an int16_t.
1599 ASSERT(is_int16(offset));
1600
1601 // Emit a nop in the branch delay slot if required.
1602 if (bdslot == PROTECT)
1603 nop();
1604 }
1605
1606
1607 void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
1608 BlockTrampolinePoolScope block_trampoline_pool(this);
1609 if (target.is_reg()) {
1610 jr(target.rm());
1611 } else {
1612 if (!MustUseReg(target.rmode_)) {
1613 j(target.imm32_);
1614 } else {
1615 li(t9, target);
1616 jr(t9);
1617 }
1618 }
1619 // Emit a nop in the branch delay slot if required.
1620 if (bdslot == PROTECT)
1621 nop();
715 } 1622 }
716 1623
717 1624
718 void MacroAssembler::Jump(const Operand& target, 1625 void MacroAssembler::Jump(const Operand& target,
719 Condition cond, Register rs, const Operand& rt) { 1626 Condition cond, Register rs, const Operand& rt,
1627 BranchDelaySlot bdslot) {
1628 BlockTrampolinePoolScope block_trampoline_pool(this);
1629 BRANCH_ARGS_CHECK(cond, rs, rt);
720 if (target.is_reg()) { 1630 if (target.is_reg()) {
721 if (cond == cc_always) { 1631 if (cond == cc_always) {
722 jr(target.rm()); 1632 jr(target.rm());
723 } else { 1633 } else {
724 Branch(NegateCondition(cond), 2, rs, rt); 1634 Branch(2, NegateCondition(cond), rs, rt);
725 jr(target.rm()); 1635 jr(target.rm());
726 } 1636 }
1637 } else { // Not register target.
1638 if (!MustUseReg(target.rmode_)) {
1639 if (cond == cc_always) {
1640 j(target.imm32_);
1641 } else {
1642 Branch(2, NegateCondition(cond), rs, rt);
1643 j(target.imm32_); // Will generate only one instruction.
1644 }
1645 } else { // MustUseReg(target)
1646 li(t9, target);
1647 if (cond == cc_always) {
1648 jr(t9);
1649 } else {
1650 Branch(2, NegateCondition(cond), rs, rt);
1651 jr(t9); // Will generate only one instruction.
1652 }
1653 }
1654 }
1655 // Emit a nop in the branch delay slot if required.
1656 if (bdslot == PROTECT)
1657 nop();
1658 }
1659
1660
1661 // Note: To call gcc-compiled C code on mips, you must call thru t9.
1662 void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
1663 BlockTrampolinePoolScope block_trampoline_pool(this);
1664 if (target.is_reg()) {
1665 jalr(target.rm());
727 } else { // !target.is_reg() 1666 } else { // !target.is_reg()
728 if (!MustUseAt(target.rmode_)) { 1667 if (!MustUseReg(target.rmode_)) {
729 if (cond == cc_always) { 1668 jal(target.imm32_);
730 j(target.imm32_); 1669 } else { // MustUseReg(target)
731 } else { 1670 li(t9, target);
732 Branch(NegateCondition(cond), 2, rs, rt); 1671 jalr(t9);
733 j(target.imm32_); // Will generate only one instruction. 1672 }
734 } 1673 }
735 } else { // MustUseAt(target) 1674 // Emit a nop in the branch delay slot if required.
736 li(at, target); 1675 if (bdslot == PROTECT)
737 if (cond == cc_always) { 1676 nop();
738 jr(at); 1677 }
739 } else { 1678
740 Branch(NegateCondition(cond), 2, rs, rt); 1679
741 jr(at); // Will generate only one instruction. 1680 // Note: To call gcc-compiled C code on mips, you must call thru t9.
742 }
743 }
744 }
745 // Emit a nop in the branch delay slot.
746 nop();
747 }
748
749
750 void MacroAssembler::Call(const Operand& target, 1681 void MacroAssembler::Call(const Operand& target,
751 Condition cond, Register rs, const Operand& rt) { 1682 Condition cond, Register rs, const Operand& rt,
1683 BranchDelaySlot bdslot) {
1684 BlockTrampolinePoolScope block_trampoline_pool(this);
1685 BRANCH_ARGS_CHECK(cond, rs, rt);
752 if (target.is_reg()) { 1686 if (target.is_reg()) {
753 if (cond == cc_always) { 1687 if (cond == cc_always) {
754 jalr(target.rm()); 1688 jalr(target.rm());
755 } else { 1689 } else {
756 Branch(NegateCondition(cond), 2, rs, rt); 1690 Branch(2, NegateCondition(cond), rs, rt);
757 jalr(target.rm()); 1691 jalr(target.rm());
758 } 1692 }
759 } else { // !target.is_reg() 1693 } else { // !target.is_reg()
760 if (!MustUseAt(target.rmode_)) { 1694 if (!MustUseReg(target.rmode_)) {
761 if (cond == cc_always) { 1695 if (cond == cc_always) {
762 jal(target.imm32_); 1696 jal(target.imm32_);
763 } else { 1697 } else {
764 Branch(NegateCondition(cond), 2, rs, rt); 1698 Branch(2, NegateCondition(cond), rs, rt);
765 jal(target.imm32_); // Will generate only one instruction. 1699 jal(target.imm32_); // Will generate only one instruction.
766 } 1700 }
767 } else { // MustUseAt(target) 1701 } else { // MustUseReg(target)
768 li(at, target); 1702 li(t9, target);
769 if (cond == cc_always) { 1703 if (cond == cc_always) {
770 jalr(at); 1704 jalr(t9);
771 } else { 1705 } else {
772 Branch(NegateCondition(cond), 2, rs, rt); 1706 Branch(2, NegateCondition(cond), rs, rt);
773 jalr(at); // Will generate only one instruction. 1707 jalr(t9); // Will generate only one instruction.
774 } 1708 }
775 } 1709 }
776 } 1710 }
777 // Emit a nop in the branch delay slot. 1711 // Emit a nop in the branch delay slot if required.
778 nop(); 1712 if (bdslot == PROTECT)
779 } 1713 nop();
780 1714 }
781 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { 1715
782 UNIMPLEMENTED_MIPS(); 1716
783 } 1717 void MacroAssembler::Drop(int count,
784 1718 Condition cond,
785 1719 Register reg,
786 void MacroAssembler::Drop(int count, Condition cond) { 1720 const Operand& op) {
787 UNIMPLEMENTED_MIPS(); 1721 if (count <= 0) {
1722 return;
1723 }
1724
1725 Label skip;
1726
1727 if (cond != al) {
1728 Branch(&skip, NegateCondition(cond), reg, op);
1729 }
1730
1731 if (count > 0) {
1732 addiu(sp, sp, count * kPointerSize);
1733 }
1734
1735 if (cond != al) {
1736 bind(&skip);
1737 }
1738 }
1739
1740
1741 void MacroAssembler::DropAndRet(int drop,
1742 Condition cond,
1743 Register r1,
1744 const Operand& r2) {
1745 // This is a workaround to make sure only one branch instruction is
1746 // generated. It relies on Drop and Ret not creating branches if
1747 // cond == cc_always.
1748 Label skip;
1749 if (cond != cc_always) {
1750 Branch(&skip, NegateCondition(cond), r1, r2);
1751 }
1752
1753 Drop(drop);
1754 Ret();
1755
1756 if (cond != cc_always) {
1757 bind(&skip);
1758 }
1759 }
1760
1761
1762 void MacroAssembler::Swap(Register reg1,
1763 Register reg2,
1764 Register scratch) {
1765 if (scratch.is(no_reg)) {
1766 Xor(reg1, reg1, Operand(reg2));
1767 Xor(reg2, reg2, Operand(reg1));
1768 Xor(reg1, reg1, Operand(reg2));
1769 } else {
1770 mov(scratch, reg1);
1771 mov(reg1, reg2);
1772 mov(reg2, scratch);
1773 }
788 } 1774 }
789 1775
790 1776
791 void MacroAssembler::Call(Label* target) { 1777 void MacroAssembler::Call(Label* target) {
792 UNIMPLEMENTED_MIPS(); 1778 BranchAndLink(target);
1779 }
1780
1781
1782 void MacroAssembler::Move(Register dst, Register src) {
1783 if (!dst.is(src)) {
1784 mov(dst, src);
1785 }
793 } 1786 }
794 1787
795 1788
796 #ifdef ENABLE_DEBUGGER_SUPPORT 1789 #ifdef ENABLE_DEBUGGER_SUPPORT
797 // --------------------------------------------------------------------------- 1790
798 // Debugger Support 1791 void MacroAssembler::DebugBreak() {
799 1792 ASSERT(allow_stub_calls());
800 void MacroAssembler::DebugBreak() { 1793 mov(a0, zero_reg);
801 UNIMPLEMENTED_MIPS(); 1794 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
802 } 1795 CEntryStub ces(1);
803 #endif 1796 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1797 }
1798
1799 #endif // ENABLE_DEBUGGER_SUPPORT
804 1800
805 1801
806 // --------------------------------------------------------------------------- 1802 // ---------------------------------------------------------------------------
807 // Exception handling 1803 // Exception handling
808 1804
809 void MacroAssembler::PushTryHandler(CodeLocation try_location, 1805 void MacroAssembler::PushTryHandler(CodeLocation try_location,
810 HandlerType type) { 1806 HandlerType type) {
811 // Adjust this code if not the case. 1807 // Adjust this code if not the case.
812 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); 1808 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
813 // The return address is passed in register ra. 1809 // The return address is passed in register ra.
814 if (try_location == IN_JAVASCRIPT) { 1810 if (try_location == IN_JAVASCRIPT) {
815 if (type == TRY_CATCH_HANDLER) { 1811 if (type == TRY_CATCH_HANDLER) {
816 li(t0, Operand(StackHandler::TRY_CATCH)); 1812 li(t0, Operand(StackHandler::TRY_CATCH));
817 } else { 1813 } else {
818 li(t0, Operand(StackHandler::TRY_FINALLY)); 1814 li(t0, Operand(StackHandler::TRY_FINALLY));
819 } 1815 }
820 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize 1816 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
821 && StackHandlerConstants::kFPOffset == 2 * kPointerSize 1817 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
822 && StackHandlerConstants::kPCOffset == 3 * kPointerSize 1818 && StackHandlerConstants::kPCOffset == 3 * kPointerSize
823 && StackHandlerConstants::kNextOffset == 0 * kPointerSize); 1819 && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
824 // Save the current handler as the next handler. 1820 // Save the current handler as the next handler.
825 LoadExternalReference(t2, ExternalReference(Isolate::k_handler_address)); 1821 li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
826 lw(t1, MemOperand(t2)); 1822 lw(t1, MemOperand(t2));
827 1823
828 addiu(sp, sp, -StackHandlerConstants::kSize); 1824 addiu(sp, sp, -StackHandlerConstants::kSize);
829 sw(ra, MemOperand(sp, 12)); 1825 sw(ra, MemOperand(sp, 12));
830 sw(fp, MemOperand(sp, 8)); 1826 sw(fp, MemOperand(sp, 8));
831 sw(t0, MemOperand(sp, 4)); 1827 sw(t0, MemOperand(sp, 4));
832 sw(t1, MemOperand(sp, 0)); 1828 sw(t1, MemOperand(sp, 0));
833 1829
834 // Link this handler as the new current one. 1830 // Link this handler as the new current one.
835 sw(sp, MemOperand(t2)); 1831 sw(sp, MemOperand(t2));
836 1832
837 } else { 1833 } else {
838 // Must preserve a0-a3, and s0 (argv). 1834 // Must preserve a0-a3, and s0 (argv).
839 ASSERT(try_location == IN_JS_ENTRY); 1835 ASSERT(try_location == IN_JS_ENTRY);
840 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize 1836 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
841 && StackHandlerConstants::kFPOffset == 2 * kPointerSize 1837 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
842 && StackHandlerConstants::kPCOffset == 3 * kPointerSize 1838 && StackHandlerConstants::kPCOffset == 3 * kPointerSize
843 && StackHandlerConstants::kNextOffset == 0 * kPointerSize); 1839 && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
844 1840
845 // The frame pointer does not point to a JS frame so we save NULL 1841 // The frame pointer does not point to a JS frame so we save NULL
846 // for fp. We expect the code throwing an exception to check fp 1842 // for fp. We expect the code throwing an exception to check fp
847 // before dereferencing it to restore the context. 1843 // before dereferencing it to restore the context.
848 li(t0, Operand(StackHandler::ENTRY)); 1844 li(t0, Operand(StackHandler::ENTRY));
849 1845
850 // Save the current handler as the next handler. 1846 // Save the current handler as the next handler.
851 LoadExternalReference(t2, ExternalReference(Isolate::k_handler_address)); 1847 li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
852 lw(t1, MemOperand(t2)); 1848 lw(t1, MemOperand(t2));
853 1849
854 addiu(sp, sp, -StackHandlerConstants::kSize); 1850 addiu(sp, sp, -StackHandlerConstants::kSize);
855 sw(ra, MemOperand(sp, 12)); 1851 sw(ra, MemOperand(sp, 12));
856 sw(zero_reg, MemOperand(sp, 8)); 1852 sw(zero_reg, MemOperand(sp, 8));
857 sw(t0, MemOperand(sp, 4)); 1853 sw(t0, MemOperand(sp, 4));
858 sw(t1, MemOperand(sp, 0)); 1854 sw(t1, MemOperand(sp, 0));
859 1855
860 // Link this handler as the new current one. 1856 // Link this handler as the new current one.
861 sw(sp, MemOperand(t2)); 1857 sw(sp, MemOperand(t2));
862 } 1858 }
863 } 1859 }
864 1860
865 1861
866 void MacroAssembler::PopTryHandler() { 1862 void MacroAssembler::PopTryHandler() {
867 UNIMPLEMENTED_MIPS(); 1863 ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
868 } 1864 pop(a1);
869 1865 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
870 1866 li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
871 1867 sw(a1, MemOperand(at));
872 // ----------------------------------------------------------------------------- 1868 }
873 // Activation frames 1869
874 1870
875 void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) { 1871 void MacroAssembler::AllocateInNewSpace(int object_size,
876 Label extra_push, end; 1872 Register result,
877 1873 Register scratch1,
878 andi(scratch, sp, 7); 1874 Register scratch2,
879 1875 Label* gc_required,
880 // We check for args and receiver size on the stack, all of them word sized. 1876 AllocationFlags flags) {
881 // We add one for sp, that we also want to store on the stack. 1877 if (!FLAG_inline_new) {
882 if (((arg_count + 1) % kPointerSizeLog2) == 0) { 1878 if (FLAG_debug_code) {
883 Branch(ne, &extra_push, at, Operand(zero_reg)); 1879 // Trash the registers to simulate an allocation failure.
884 } else { // ((arg_count + 1) % 2) == 1 1880 li(result, 0x7091);
885 Branch(eq, &extra_push, at, Operand(zero_reg)); 1881 li(scratch1, 0x7191);
886 } 1882 li(scratch2, 0x7291);
887 1883 }
888 // Save sp on the stack. 1884 jmp(gc_required);
889 mov(scratch, sp); 1885 return;
890 Push(scratch); 1886 }
891 b(&end); 1887
892 1888 ASSERT(!result.is(scratch1));
893 // Align before saving sp on the stack. 1889 ASSERT(!result.is(scratch2));
894 bind(&extra_push); 1890 ASSERT(!scratch1.is(scratch2));
895 mov(scratch, sp); 1891 ASSERT(!scratch1.is(t9));
896 addiu(sp, sp, -8); 1892 ASSERT(!scratch2.is(t9));
897 sw(scratch, MemOperand(sp)); 1893 ASSERT(!result.is(t9));
898 1894
899 // The stack is aligned and sp is stored on the top. 1895 // Make object size into bytes.
900 bind(&end); 1896 if ((flags & SIZE_IN_WORDS) != 0) {
901 } 1897 object_size *= kPointerSize;
902 1898 }
903 1899 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
904 void MacroAssembler::ReturnFromAlignedCall() { 1900
905 lw(sp, MemOperand(sp)); 1901 // Check relative positions of allocation top and limit addresses.
906 } 1902 // ARM adds additional checks to make sure the ldm instruction can be
907 1903 // used. On MIPS we don't have ldm so we don't need additional checks either.
908 1904 ExternalReference new_space_allocation_top =
1905 ExternalReference::new_space_allocation_top_address(isolate());
1906 ExternalReference new_space_allocation_limit =
1907 ExternalReference::new_space_allocation_limit_address(isolate());
1908 intptr_t top =
1909 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1910 intptr_t limit =
1911 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1912 ASSERT((limit - top) == kPointerSize);
1913
1914 // Set up allocation top address and object size registers.
1915 Register topaddr = scratch1;
1916 Register obj_size_reg = scratch2;
1917 li(topaddr, Operand(new_space_allocation_top));
1918 li(obj_size_reg, Operand(object_size));
1919
1920 // This code stores a temporary value in t9.
1921 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1922 // Load allocation top into result and allocation limit into t9.
1923 lw(result, MemOperand(topaddr));
1924 lw(t9, MemOperand(topaddr, kPointerSize));
1925 } else {
1926 if (FLAG_debug_code) {
1927 // Assert that result actually contains top on entry. t9 is used
1928 // immediately below so this use of t9 does not cause difference with
1929 // respect to register content between debug and release mode.
1930 lw(t9, MemOperand(topaddr));
1931 Check(eq, "Unexpected allocation top", result, Operand(t9));
1932 }
1933 // Load allocation limit into t9. Result already contains allocation top.
1934 lw(t9, MemOperand(topaddr, limit - top));
1935 }
1936
1937 // Calculate new top and bail out if new space is exhausted. Use result
1938 // to calculate the new top.
1939 Addu(scratch2, result, Operand(obj_size_reg));
1940 Branch(gc_required, Ugreater, scratch2, Operand(t9));
1941 sw(scratch2, MemOperand(topaddr));
1942
1943 // Tag object if requested.
1944 if ((flags & TAG_OBJECT) != 0) {
1945 Addu(result, result, Operand(kHeapObjectTag));
1946 }
1947 }
1948
1949
1950 void MacroAssembler::AllocateInNewSpace(Register object_size,
1951 Register result,
1952 Register scratch1,
1953 Register scratch2,
1954 Label* gc_required,
1955 AllocationFlags flags) {
1956 if (!FLAG_inline_new) {
1957 if (FLAG_debug_code) {
1958 // Trash the registers to simulate an allocation failure.
1959 li(result, 0x7091);
1960 li(scratch1, 0x7191);
1961 li(scratch2, 0x7291);
1962 }
1963 jmp(gc_required);
1964 return;
1965 }
1966
1967 ASSERT(!result.is(scratch1));
1968 ASSERT(!result.is(scratch2));
1969 ASSERT(!scratch1.is(scratch2));
1970 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
1971
1972 // Check relative positions of allocation top and limit addresses.
1973 // ARM adds additional checks to make sure the ldm instruction can be
1974 // used. On MIPS we don't have ldm so we don't need additional checks either.
1975 ExternalReference new_space_allocation_top =
1976 ExternalReference::new_space_allocation_top_address(isolate());
1977 ExternalReference new_space_allocation_limit =
1978 ExternalReference::new_space_allocation_limit_address(isolate());
1979 intptr_t top =
1980 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1981 intptr_t limit =
1982 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1983 ASSERT((limit - top) == kPointerSize);
1984
1985 // Set up allocation top address and object size registers.
1986 Register topaddr = scratch1;
1987 li(topaddr, Operand(new_space_allocation_top));
1988
1989 // This code stores a temporary value in t9.
1990 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1991 // Load allocation top into result and allocation limit into t9.
1992 lw(result, MemOperand(topaddr));
1993 lw(t9, MemOperand(topaddr, kPointerSize));
1994 } else {
1995 if (FLAG_debug_code) {
1996 // Assert that result actually contains top on entry. t9 is used
1997 // immediately below so this use of t9 does not cause difference with
1998 // respect to register content between debug and release mode.
1999 lw(t9, MemOperand(topaddr));
2000 Check(eq, "Unexpected allocation top", result, Operand(t9));
2001 }
2002 // Load allocation limit into t9. Result already contains allocation top.
2003 lw(t9, MemOperand(topaddr, limit - top));
2004 }
2005
2006 // Calculate new top and bail out if new space is exhausted. Use result
2007 // to calculate the new top. Object size may be in words so a shift is
2008 // required to get the number of bytes.
2009 if ((flags & SIZE_IN_WORDS) != 0) {
2010 sll(scratch2, object_size, kPointerSizeLog2);
2011 Addu(scratch2, result, scratch2);
2012 } else {
2013 Addu(scratch2, result, Operand(object_size));
2014 }
2015 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2016
2017 // Update allocation top. result temporarily holds the new top.
2018 if (FLAG_debug_code) {
2019 And(t9, scratch2, Operand(kObjectAlignmentMask));
2020 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
2021 }
2022 sw(scratch2, MemOperand(topaddr));
2023
2024 // Tag object if requested.
2025 if ((flags & TAG_OBJECT) != 0) {
2026 Addu(result, result, Operand(kHeapObjectTag));
2027 }
2028 }
2029
2030
2031 void MacroAssembler::UndoAllocationInNewSpace(Register object,
2032 Register scratch) {
2033 ExternalReference new_space_allocation_top =
2034 ExternalReference::new_space_allocation_top_address(isolate());
2035
2036 // Make sure the object has no tag before resetting top.
2037 And(object, object, Operand(~kHeapObjectTagMask));
2038 #ifdef DEBUG
2039 // Check that the object un-allocated is below the current top.
2040 li(scratch, Operand(new_space_allocation_top));
2041 lw(scratch, MemOperand(scratch));
2042 Check(less, "Undo allocation of non allocated memory",
2043 object, Operand(scratch));
2044 #endif
2045 // Write the address of the object to un-allocate as the current top.
2046 li(scratch, Operand(new_space_allocation_top));
2047 sw(object, MemOperand(scratch));
2048 }
2049
2050
2051 void MacroAssembler::AllocateTwoByteString(Register result,
2052 Register length,
2053 Register scratch1,
2054 Register scratch2,
2055 Register scratch3,
2056 Label* gc_required) {
2057 // Calculate the number of bytes needed for the characters in the string while
2058 // observing object alignment.
2059 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2060 sll(scratch1, length, 1); // Length in bytes, not chars.
2061 addiu(scratch1, scratch1,
2062 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
2063 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2064
2065 // Allocate two-byte string in new space.
2066 AllocateInNewSpace(scratch1,
2067 result,
2068 scratch2,
2069 scratch3,
2070 gc_required,
2071 TAG_OBJECT);
2072
2073 // Set the map, length and hash field.
2074 InitializeNewString(result,
2075 length,
2076 Heap::kStringMapRootIndex,
2077 scratch1,
2078 scratch2);
2079 }
2080
2081
2082 void MacroAssembler::AllocateAsciiString(Register result,
2083 Register length,
2084 Register scratch1,
2085 Register scratch2,
2086 Register scratch3,
2087 Label* gc_required) {
2088 // Calculate the number of bytes needed for the characters in the string
2089 // while observing object alignment.
2090 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2091 ASSERT(kCharSize == 1);
2092 addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
2093 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2094
2095 // Allocate ASCII string in new space.
2096 AllocateInNewSpace(scratch1,
2097 result,
2098 scratch2,
2099 scratch3,
2100 gc_required,
2101 TAG_OBJECT);
2102
2103 // Set the map, length and hash field.
2104 InitializeNewString(result,
2105 length,
2106 Heap::kAsciiStringMapRootIndex,
2107 scratch1,
2108 scratch2);
2109 }
2110
2111
2112 void MacroAssembler::AllocateTwoByteConsString(Register result,
2113 Register length,
2114 Register scratch1,
2115 Register scratch2,
2116 Label* gc_required) {
2117 AllocateInNewSpace(ConsString::kSize,
2118 result,
2119 scratch1,
2120 scratch2,
2121 gc_required,
2122 TAG_OBJECT);
2123 InitializeNewString(result,
2124 length,
2125 Heap::kConsStringMapRootIndex,
2126 scratch1,
2127 scratch2);
2128 }
2129
2130
2131 void MacroAssembler::AllocateAsciiConsString(Register result,
2132 Register length,
2133 Register scratch1,
2134 Register scratch2,
2135 Label* gc_required) {
2136 AllocateInNewSpace(ConsString::kSize,
2137 result,
2138 scratch1,
2139 scratch2,
2140 gc_required,
2141 TAG_OBJECT);
2142 InitializeNewString(result,
2143 length,
2144 Heap::kConsAsciiStringMapRootIndex,
2145 scratch1,
2146 scratch2);
2147 }
2148
2149
2150 // Allocates a heap number or jumps to the label if the young space is full and
2151 // a scavenge is needed.
2152 void MacroAssembler::AllocateHeapNumber(Register result,
2153 Register scratch1,
2154 Register scratch2,
2155 Register heap_number_map,
2156 Label* need_gc) {
2157 // Allocate an object in the heap for the heap number and tag it as a heap
2158 // object.
2159 AllocateInNewSpace(HeapNumber::kSize,
2160 result,
2161 scratch1,
2162 scratch2,
2163 need_gc,
2164 TAG_OBJECT);
2165
2166 // Store heap number map in the allocated object.
2167 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2168 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2169 }
2170
2171
2172 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2173 FPURegister value,
2174 Register scratch1,
2175 Register scratch2,
2176 Label* gc_required) {
2177 LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
2178 AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
2179 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2180 }
2181
2182
2183 // Copies a fixed number of fields of heap objects from src to dst.
2184 void MacroAssembler::CopyFields(Register dst,
2185 Register src,
2186 RegList temps,
2187 int field_count) {
2188 ASSERT((temps & dst.bit()) == 0);
2189 ASSERT((temps & src.bit()) == 0);
2190 // Primitive implementation using only one temporary register.
2191
2192 Register tmp = no_reg;
2193 // Find a temp register in temps list.
2194 for (int i = 0; i < kNumRegisters; i++) {
2195 if ((temps & (1 << i)) != 0) {
2196 tmp.code_ = i;
2197 break;
2198 }
2199 }
2200 ASSERT(!tmp.is(no_reg));
2201
2202 for (int i = 0; i < field_count; i++) {
2203 lw(tmp, FieldMemOperand(src, i * kPointerSize));
2204 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
2205 }
2206 }
2207
2208
2209 void MacroAssembler::CheckMap(Register obj,
2210 Register scratch,
2211 Handle<Map> map,
2212 Label* fail,
2213 bool is_heap_object) {
2214 if (!is_heap_object) {
2215 JumpIfSmi(obj, fail);
2216 }
2217 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2218 li(at, Operand(map));
2219 Branch(fail, ne, scratch, Operand(at));
2220 }
2221
2222
2223 void MacroAssembler::CheckMap(Register obj,
2224 Register scratch,
2225 Heap::RootListIndex index,
2226 Label* fail,
2227 bool is_heap_object) {
2228 if (!is_heap_object) {
2229 JumpIfSmi(obj, fail);
2230 }
2231 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2232 LoadRoot(at, index);
2233 Branch(fail, ne, scratch, Operand(at));
2234 }
2235
2236
909 // ----------------------------------------------------------------------------- 2237 // -----------------------------------------------------------------------------
910 // JavaScript invokes 2238 // JavaScript invokes
911 2239
912 void MacroAssembler::InvokePrologue(const ParameterCount& expected, 2240 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
913 const ParameterCount& actual, 2241 const ParameterCount& actual,
914 Handle<Code> code_constant, 2242 Handle<Code> code_constant,
915 Register code_reg, 2243 Register code_reg,
916 Label* done, 2244 Label* done,
917 InvokeFlag flag) { 2245 InvokeFlag flag,
2246 PostCallGenerator* post_call_generator) {
918 bool definitely_matches = false; 2247 bool definitely_matches = false;
919 Label regular_invoke; 2248 Label regular_invoke;
920 2249
921 // Check whether the expected and actual arguments count match. If not, 2250 // Check whether the expected and actual arguments count match. If not,
922 // setup registers according to contract with ArgumentsAdaptorTrampoline: 2251 // setup registers according to contract with ArgumentsAdaptorTrampoline:
923 // a0: actual arguments count 2252 // a0: actual arguments count
924 // a1: function (passed through to callee) 2253 // a1: function (passed through to callee)
925 // a2: expected arguments count 2254 // a2: expected arguments count
926 // a3: callee code entry 2255 // a3: callee code entry
927 2256
(...skipping 14 matching lines...) Expand all
942 if (expected.immediate() == sentinel) { 2271 if (expected.immediate() == sentinel) {
943 // Don't worry about adapting arguments for builtins that 2272 // Don't worry about adapting arguments for builtins that
944 // don't want that done. Skip adaption code by making it look 2273 // don't want that done. Skip adaption code by making it look
945 // like we have a match between expected and actual number of 2274 // like we have a match between expected and actual number of
946 // arguments. 2275 // arguments.
947 definitely_matches = true; 2276 definitely_matches = true;
948 } else { 2277 } else {
949 li(a2, Operand(expected.immediate())); 2278 li(a2, Operand(expected.immediate()));
950 } 2279 }
951 } 2280 }
952 } else if (actual.is_immediate()) {
953 Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate()));
954 li(a0, Operand(actual.immediate()));
955 } else { 2281 } else {
956 Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg())); 2282 if (actual.is_immediate()) {
2283 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
2284 li(a0, Operand(actual.immediate()));
2285 } else {
2286 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
2287 }
957 } 2288 }
958 2289
959 if (!definitely_matches) { 2290 if (!definitely_matches) {
960 if (!code_constant.is_null()) { 2291 if (!code_constant.is_null()) {
961 li(a3, Operand(code_constant)); 2292 li(a3, Operand(code_constant));
962 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); 2293 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
963 } 2294 }
964 2295
965 ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline); 2296 Handle<Code> adaptor =
2297 isolate()->builtins()->ArgumentsAdaptorTrampoline();
966 if (flag == CALL_FUNCTION) { 2298 if (flag == CALL_FUNCTION) {
967 CallBuiltin(adaptor); 2299 Call(adaptor, RelocInfo::CODE_TARGET);
968 b(done); 2300 if (post_call_generator != NULL) post_call_generator->Generate();
969 nop(); 2301 jmp(done);
970 } else { 2302 } else {
971 JumpToBuiltin(adaptor); 2303 Jump(adaptor, RelocInfo::CODE_TARGET);
972 } 2304 }
973 bind(&regular_invoke); 2305 bind(&regular_invoke);
974 } 2306 }
975 } 2307 }
976 2308
2309
977 void MacroAssembler::InvokeCode(Register code, 2310 void MacroAssembler::InvokeCode(Register code,
978 const ParameterCount& expected, 2311 const ParameterCount& expected,
979 const ParameterCount& actual, 2312 const ParameterCount& actual,
980 InvokeFlag flag) { 2313 InvokeFlag flag,
2314 PostCallGenerator* post_call_generator) {
981 Label done; 2315 Label done;
982 2316
983 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); 2317 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2318 post_call_generator);
984 if (flag == CALL_FUNCTION) { 2319 if (flag == CALL_FUNCTION) {
985 Call(code); 2320 Call(code);
986 } else { 2321 } else {
987 ASSERT(flag == JUMP_FUNCTION); 2322 ASSERT(flag == JUMP_FUNCTION);
988 Jump(code); 2323 Jump(code);
989 } 2324 }
990 // Continue here if InvokePrologue does handle the invocation due to 2325 // Continue here if InvokePrologue does handle the invocation due to
991 // mismatched parameter counts. 2326 // mismatched parameter counts.
992 bind(&done); 2327 bind(&done);
993 } 2328 }
(...skipping 13 matching lines...) Expand all
1007 Jump(code, rmode); 2342 Jump(code, rmode);
1008 } 2343 }
1009 // Continue here if InvokePrologue does handle the invocation due to 2344 // Continue here if InvokePrologue does handle the invocation due to
1010 // mismatched parameter counts. 2345 // mismatched parameter counts.
1011 bind(&done); 2346 bind(&done);
1012 } 2347 }
1013 2348
1014 2349
1015 void MacroAssembler::InvokeFunction(Register function, 2350 void MacroAssembler::InvokeFunction(Register function,
1016 const ParameterCount& actual, 2351 const ParameterCount& actual,
1017 InvokeFlag flag) { 2352 InvokeFlag flag,
2353 PostCallGenerator* post_call_generator) {
1018 // Contract with called JS functions requires that function is passed in a1. 2354 // Contract with called JS functions requires that function is passed in a1.
1019 ASSERT(function.is(a1)); 2355 ASSERT(function.is(a1));
1020 Register expected_reg = a2; 2356 Register expected_reg = a2;
1021 Register code_reg = a3; 2357 Register code_reg = a3;
1022 2358
1023 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); 2359 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1024 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 2360 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
1025 lw(expected_reg, 2361 lw(expected_reg,
1026 FieldMemOperand(code_reg, 2362 FieldMemOperand(code_reg,
1027 SharedFunctionInfo::kFormalParameterCountOffset)); 2363 SharedFunctionInfo::kFormalParameterCountOffset));
1028 lw(code_reg, 2364 sra(expected_reg, expected_reg, kSmiTagSize);
1029 MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); 2365 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
1030 addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
1031 2366
1032 ParameterCount expected(expected_reg); 2367 ParameterCount expected(expected_reg);
1033 InvokeCode(code_reg, expected, actual, flag); 2368 InvokeCode(code_reg, expected, actual, flag, post_call_generator);
2369 }
2370
2371
2372 void MacroAssembler::InvokeFunction(JSFunction* function,
2373 const ParameterCount& actual,
2374 InvokeFlag flag) {
2375 ASSERT(function->is_compiled());
2376
2377 // Get the function and setup the context.
2378 li(a1, Operand(Handle<JSFunction>(function)));
2379 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2380
2381 // Invoke the cached code.
2382 Handle<Code> code(function->code());
2383 ParameterCount expected(function->shared()->formal_parameter_count());
2384 if (V8::UseCrankshaft()) {
2385 UNIMPLEMENTED_MIPS();
2386 } else {
2387 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
2388 }
2389 }
2390
2391
2392 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
2393 Register map,
2394 Register scratch,
2395 Label* fail) {
2396 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
2397 IsInstanceJSObjectType(map, scratch, fail);
2398 }
2399
2400
2401 void MacroAssembler::IsInstanceJSObjectType(Register map,
2402 Register scratch,
2403 Label* fail) {
2404 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2405 Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
2406 Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
2407 }
2408
2409
2410 void MacroAssembler::IsObjectJSStringType(Register object,
2411 Register scratch,
2412 Label* fail) {
2413 ASSERT(kNotStringTag != 0);
2414
2415 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2416 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2417 And(scratch, scratch, Operand(kIsNotStringMask));
2418 Branch(fail, ne, scratch, Operand(zero_reg));
1034 } 2419 }
1035 2420
1036 2421
1037 // --------------------------------------------------------------------------- 2422 // ---------------------------------------------------------------------------
1038 // Support functions. 2423 // Support functions.
1039 2424
1040 void MacroAssembler::GetObjectType(Register function, 2425
1041 Register map, 2426 void MacroAssembler::TryGetFunctionPrototype(Register function,
1042 Register type_reg) { 2427 Register result,
1043 lw(map, FieldMemOperand(function, HeapObject::kMapOffset)); 2428 Register scratch,
1044 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); 2429 Label* miss) {
1045 } 2430 // Check that the receiver isn't a smi.
2431 JumpIfSmi(function, miss);
2432
2433 // Check that the function really is a function. Load map into result reg.
2434 GetObjectType(function, result, scratch);
2435 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
2436
2437 // Make sure that the function has an instance prototype.
2438 Label non_instance;
2439 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2440 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2441 Branch(&non_instance, ne, scratch, Operand(zero_reg));
2442
2443 // Get the prototype or initial map from the function.
2444 lw(result,
2445 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2446
2447 // If the prototype or initial map is the hole, don't return it and
2448 // simply miss the cache instead. This will allow us to allocate a
2449 // prototype object on-demand in the runtime system.
2450 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
2451 Branch(miss, eq, result, Operand(t8));
2452
2453 // If the function does not have an initial map, we're done.
2454 Label done;
2455 GetObjectType(result, scratch, scratch);
2456 Branch(&done, ne, scratch, Operand(MAP_TYPE));
2457
2458 // Get the prototype from the initial map.
2459 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2460 jmp(&done);
2461
2462 // Non-instance prototype: Fetch prototype from constructor field
2463 // in initial map.
2464 bind(&non_instance);
2465 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2466
2467 // All done.
2468 bind(&done);
2469 }
1046 2470
1047 2471
1048 void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) { 2472 void MacroAssembler::GetObjectType(Register object,
1049 // Load builtin address. 2473 Register map,
1050 LoadExternalReference(t9, builtin_entry); 2474 Register type_reg) {
1051 lw(t9, MemOperand(t9)); // Deref address. 2475 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
1052 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); 2476 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1053 // Call and allocate arguments slots. 2477 }
1054 jalr(t9);
1055 // Use the branch delay slot to allocated argument slots.
1056 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
1057 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
1058 }
1059
1060
1061 void MacroAssembler::CallBuiltin(Register target) {
1062 // Target already holds target address.
1063 // Call and allocate arguments slots.
1064 jalr(target);
1065 // Use the branch delay slot to allocated argument slots.
1066 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
1067 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
1068 }
1069
1070
1071 void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
1072 // Load builtin address.
1073 LoadExternalReference(t9, builtin_entry);
1074 lw(t9, MemOperand(t9)); // Deref address.
1075 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1076 // Call and allocate arguments slots.
1077 jr(t9);
1078 // Use the branch delay slot to allocated argument slots.
1079 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
1080 }
1081
1082
1083 void MacroAssembler::JumpToBuiltin(Register target) {
1084 // t9 already holds target address.
1085 // Call and allocate arguments slots.
1086 jr(t9);
1087 // Use the branch delay slot to allocated argument slots.
1088 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
1089 }
1090 2478
1091 2479
1092 // ----------------------------------------------------------------------------- 2480 // -----------------------------------------------------------------------------
1093 // Runtime calls 2481 // Runtime calls
1094 2482
1095 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, 2483 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
1096 Register r1, const Operand& r2) { 2484 Register r1, const Operand& r2) {
1097 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. 2485 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1098 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); 2486 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
1099 } 2487 }
1100 2488
1101 2489
1102 void MacroAssembler::StubReturn(int argc) { 2490 void MacroAssembler::TailCallStub(CodeStub* stub) {
1103 UNIMPLEMENTED_MIPS(); 2491 ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
2492 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1104 } 2493 }
1105 2494
1106 2495
1107 void MacroAssembler::IllegalOperation(int num_arguments) { 2496 void MacroAssembler::IllegalOperation(int num_arguments) {
1108 if (num_arguments > 0) { 2497 if (num_arguments > 0) {
1109 addiu(sp, sp, num_arguments * kPointerSize); 2498 addiu(sp, sp, num_arguments * kPointerSize);
1110 } 2499 }
1111 LoadRoot(v0, Heap::kUndefinedValueRootIndex); 2500 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
1112 } 2501 }
1113 2502
1114 2503
1115 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { 2504 void MacroAssembler::IndexFromHash(Register hash,
2505 Register index) {
2506 // If the hash field contains an array index pick it out. The assert checks
2507 // that the constants for the maximum number of digits for an array index
2508 // cached in the hash field and the number of bits reserved for it does not
2509 // conflict.
2510 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2511 (1 << String::kArrayIndexValueBits));
2512 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
2513 // the low kHashShift bits.
2514 STATIC_ASSERT(kSmiTag == 0);
2515 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2516 sll(index, hash, kSmiTagSize);
2517 }
2518
2519
2520 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
2521 FPURegister result,
2522 Register scratch1,
2523 Register scratch2,
2524 Register heap_number_map,
2525 Label* not_number,
2526 ObjectToDoubleFlags flags) {
2527 Label done;
2528 if ((flags & OBJECT_NOT_SMI) == 0) {
2529 Label not_smi;
2530 JumpIfNotSmi(object, &not_smi);
2531 // Remove smi tag and convert to double.
2532 sra(scratch1, object, kSmiTagSize);
2533 mtc1(scratch1, result);
2534 cvt_d_w(result, result);
2535 Branch(&done);
2536 bind(&not_smi);
2537 }
2538 // Check for heap number and load double value from it.
2539 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
2540 Branch(not_number, ne, scratch1, Operand(heap_number_map));
2541
2542 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
2543 // If exponent is all ones the number is either a NaN or +/-Infinity.
2544 Register exponent = scratch1;
2545 Register mask_reg = scratch2;
2546 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
2547 li(mask_reg, HeapNumber::kExponentMask);
2548
2549 And(exponent, exponent, mask_reg);
2550 Branch(not_number, eq, exponent, Operand(mask_reg));
2551 }
2552 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
2553 bind(&done);
2554 }
2555
2556
2557
2558 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
2559 FPURegister value,
2560 Register scratch1) {
2561 sra(scratch1, smi, kSmiTagSize);
2562 mtc1(scratch1, value);
2563 cvt_d_w(value, value);
2564 }
2565
2566
2567 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2568 int num_arguments) {
1116 // All parameters are on the stack. v0 has the return value after call. 2569 // All parameters are on the stack. v0 has the return value after call.
1117 2570
1118 // If the expected number of arguments of the runtime function is 2571 // If the expected number of arguments of the runtime function is
1119 // constant, we check that the actual number of arguments match the 2572 // constant, we check that the actual number of arguments match the
1120 // expectation. 2573 // expectation.
1121 if (f->nargs >= 0 && f->nargs != num_arguments) { 2574 if (f->nargs >= 0 && f->nargs != num_arguments) {
1122 IllegalOperation(num_arguments); 2575 IllegalOperation(num_arguments);
1123 return; 2576 return;
1124 } 2577 }
1125 2578
1126 // TODO(1236192): Most runtime routines don't need the number of 2579 // TODO(1236192): Most runtime routines don't need the number of
1127 // arguments passed in because it is constant. At some point we 2580 // arguments passed in because it is constant. At some point we
1128 // should remove this need and make the runtime routine entry code 2581 // should remove this need and make the runtime routine entry code
1129 // smarter. 2582 // smarter.
1130 li(a0, num_arguments); 2583 li(a0, num_arguments);
1131 LoadExternalReference(a1, ExternalReference(f)); 2584 li(a1, Operand(ExternalReference(f, isolate())));
1132 CEntryStub stub(1); 2585 CEntryStub stub(1);
1133 CallStub(&stub); 2586 CallStub(&stub);
1134 } 2587 }
1135 2588
1136 2589
2590 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2591 const Runtime::Function* function = Runtime::FunctionForId(id);
2592 li(a0, Operand(function->nargs));
2593 li(a1, Operand(ExternalReference(function, isolate())));
2594 CEntryStub stub(1);
2595 stub.SaveDoubles();
2596 CallStub(&stub);
2597 }
2598
2599
1137 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { 2600 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
1138 CallRuntime(Runtime::FunctionForId(fid), num_arguments); 2601 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
1139 } 2602 }
1140 2603
1141 2604
2605 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2606 int num_arguments) {
2607 li(a0, Operand(num_arguments));
2608 li(a1, Operand(ext));
2609
2610 CEntryStub stub(1);
2611 CallStub(&stub);
2612 }
2613
2614
1142 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, 2615 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1143 int num_arguments, 2616 int num_arguments,
1144 int result_size) { 2617 int result_size) {
1145 UNIMPLEMENTED_MIPS(); 2618 // TODO(1236192): Most runtime routines don't need the number of
2619 // arguments passed in because it is constant. At some point we
2620 // should remove this need and make the runtime routine entry code
2621 // smarter.
2622 li(a0, Operand(num_arguments));
2623 JumpToExternalReference(ext);
1146 } 2624 }
1147 2625
1148 2626
1149 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, 2627 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1150 int num_arguments, 2628 int num_arguments,
1151 int result_size) { 2629 int result_size) {
1152 TailCallExternalReference(ExternalReference(fid), num_arguments, result_size); 2630 TailCallExternalReference(ExternalReference(fid, isolate()),
2631 num_arguments,
2632 result_size);
1153 } 2633 }
1154 2634
1155 2635
1156 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { 2636 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1157 UNIMPLEMENTED_MIPS(); 2637 li(a1, Operand(builtin));
1158 } 2638 CEntryStub stub(1);
1159 2639 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1160
1161 Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
1162 bool* resolved) {
1163 UNIMPLEMENTED_MIPS();
1164 return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN
1165 } 2640 }
1166 2641
1167 2642
1168 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 2643 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1169 InvokeJSFlags flags) { 2644 InvokeJSFlags flags,
1170 UNIMPLEMENTED_MIPS(); 2645 PostCallGenerator* post_call_generator) {
2646 GetBuiltinEntry(t9, id);
2647 if (flags == CALL_JS) {
2648 Call(t9);
2649 if (post_call_generator != NULL) post_call_generator->Generate();
2650 } else {
2651 ASSERT(flags == JUMP_JS);
2652 Jump(t9);
2653 }
2654 }
2655
2656
2657 void MacroAssembler::GetBuiltinFunction(Register target,
2658 Builtins::JavaScript id) {
2659 // Load the builtins object into target register.
2660 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2661 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2662 // Load the JavaScript builtin function from the builtins object.
2663 lw(target, FieldMemOperand(target,
2664 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1171 } 2665 }
1172 2666
1173 2667
1174 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 2668 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
1175 UNIMPLEMENTED_MIPS(); 2669 ASSERT(!target.is(a1));
2670 GetBuiltinFunction(a1, id);
2671 // Load the code entry point from the builtins object.
2672 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
1176 } 2673 }
1177 2674
1178 2675
1179 void MacroAssembler::SetCounter(StatsCounter* counter, int value, 2676 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
1180 Register scratch1, Register scratch2) { 2677 Register scratch1, Register scratch2) {
1181 UNIMPLEMENTED_MIPS(); 2678 if (FLAG_native_code_counters && counter->Enabled()) {
2679 li(scratch1, Operand(value));
2680 li(scratch2, Operand(ExternalReference(counter)));
2681 sw(scratch1, MemOperand(scratch2));
2682 }
1182 } 2683 }
1183 2684
1184 2685
1185 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, 2686 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1186 Register scratch1, Register scratch2) { 2687 Register scratch1, Register scratch2) {
1187 UNIMPLEMENTED_MIPS(); 2688 ASSERT(value > 0);
2689 if (FLAG_native_code_counters && counter->Enabled()) {
2690 li(scratch2, Operand(ExternalReference(counter)));
2691 lw(scratch1, MemOperand(scratch2));
2692 Addu(scratch1, scratch1, Operand(value));
2693 sw(scratch1, MemOperand(scratch2));
2694 }
1188 } 2695 }
1189 2696
1190 2697
1191 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, 2698 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1192 Register scratch1, Register scratch2) { 2699 Register scratch1, Register scratch2) {
1193 UNIMPLEMENTED_MIPS(); 2700 ASSERT(value > 0);
2701 if (FLAG_native_code_counters && counter->Enabled()) {
2702 li(scratch2, Operand(ExternalReference(counter)));
2703 lw(scratch1, MemOperand(scratch2));
2704 Subu(scratch1, scratch1, Operand(value));
2705 sw(scratch1, MemOperand(scratch2));
2706 }
1194 } 2707 }
1195 2708
1196 2709
1197 // ----------------------------------------------------------------------------- 2710 // -----------------------------------------------------------------------------
1198 // Debugging 2711 // Debugging
1199 2712
1200 void MacroAssembler::Assert(Condition cc, const char* msg, 2713 void MacroAssembler::Assert(Condition cc, const char* msg,
1201 Register rs, Operand rt) { 2714 Register rs, Operand rt) {
1202 UNIMPLEMENTED_MIPS(); 2715 if (FLAG_debug_code)
2716 Check(cc, msg, rs, rt);
2717 }
2718
2719
2720 void MacroAssembler::AssertRegisterIsRoot(Register reg,
2721 Heap::RootListIndex index) {
2722 if (FLAG_debug_code) {
2723 LoadRoot(at, index);
2724 Check(eq, "Register did not match expected root", reg, Operand(at));
2725 }
2726 }
2727
2728
2729 void MacroAssembler::AssertFastElements(Register elements) {
2730 if (FLAG_debug_code) {
2731 ASSERT(!elements.is(at));
2732 Label ok;
2733 Push(elements);
2734 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2735 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2736 Branch(&ok, eq, elements, Operand(at));
2737 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
2738 Branch(&ok, eq, elements, Operand(at));
2739 Abort("JSObject with fast elements map has slow elements");
2740 bind(&ok);
2741 Pop(elements);
2742 }
1203 } 2743 }
1204 2744
1205 2745
1206 void MacroAssembler::Check(Condition cc, const char* msg, 2746 void MacroAssembler::Check(Condition cc, const char* msg,
1207 Register rs, Operand rt) { 2747 Register rs, Operand rt) {
1208 UNIMPLEMENTED_MIPS(); 2748 Label L;
2749 Branch(&L, cc, rs, rt);
2750 Abort(msg);
2751 // will not return here
2752 bind(&L);
1209 } 2753 }
1210 2754
1211 2755
1212 void MacroAssembler::Abort(const char* msg) { 2756 void MacroAssembler::Abort(const char* msg) {
1213 UNIMPLEMENTED_MIPS(); 2757 Label abort_start;
2758 bind(&abort_start);
2759 // We want to pass the msg string like a smi to avoid GC
2760 // problems, however msg is not guaranteed to be aligned
2761 // properly. Instead, we pass an aligned pointer that is
2762 // a proper v8 smi, but also pass the alignment difference
2763 // from the real pointer as a smi.
2764 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2765 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2766 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2767 #ifdef DEBUG
2768 if (msg != NULL) {
2769 RecordComment("Abort message: ");
2770 RecordComment(msg);
2771 }
2772 #endif
2773 // Disable stub call restrictions to always allow calls to abort.
2774 AllowStubCallsScope allow_scope(this, true);
2775
2776 li(a0, Operand(p0));
2777 Push(a0);
2778 li(a0, Operand(Smi::FromInt(p1 - p0)));
2779 Push(a0);
2780 CallRuntime(Runtime::kAbort, 2);
2781 // will not return here
2782 if (is_trampoline_pool_blocked()) {
2783 // If the calling code cares about the exact number of
2784 // instructions generated, we insert padding here to keep the size
2785 // of the Abort macro constant.
2786 // Currently in debug mode with debug_code enabled the number of
2787 // generated instructions is 14, so we use this as a maximum value.
2788 static const int kExpectedAbortInstructions = 14;
2789 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2790 ASSERT(abort_instructions <= kExpectedAbortInstructions);
2791 while (abort_instructions++ < kExpectedAbortInstructions) {
2792 nop();
2793 }
2794 }
2795 }
2796
2797
2798 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2799 if (context_chain_length > 0) {
2800 // Move up the chain of contexts to the context containing the slot.
2801 lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
2802 // Load the function context (which is the incoming, outer context).
2803 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2804 for (int i = 1; i < context_chain_length; i++) {
2805 lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2806 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2807 }
2808 // The context may be an intermediate context, not a function context.
2809 lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2810 } else { // Slot is in the current function context.
2811 // The context may be an intermediate context, not a function context.
2812 lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2813 }
2814 }
2815
2816
2817 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2818 // Load the global or builtins object from the current context.
2819 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2820 // Load the global context from the global or builtins object.
2821 lw(function, FieldMemOperand(function,
2822 GlobalObject::kGlobalContextOffset));
2823 // Load the function from the global context.
2824 lw(function, MemOperand(function, Context::SlotOffset(index)));
2825 }
2826
2827
2828 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2829 Register map,
2830 Register scratch) {
2831 // Load the initial map. The global functions all have initial maps.
2832 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2833 if (FLAG_debug_code) {
2834 Label ok, fail;
2835 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
2836 Branch(&ok);
2837 bind(&fail);
2838 Abort("Global functions must have initial map");
2839 bind(&ok);
2840 }
1214 } 2841 }
1215 2842
1216 2843
1217 void MacroAssembler::EnterFrame(StackFrame::Type type) { 2844 void MacroAssembler::EnterFrame(StackFrame::Type type) {
1218 addiu(sp, sp, -5 * kPointerSize); 2845 addiu(sp, sp, -5 * kPointerSize);
1219 li(t0, Operand(Smi::FromInt(type))); 2846 li(t8, Operand(Smi::FromInt(type)));
1220 li(t1, Operand(CodeObject())); 2847 li(t9, Operand(CodeObject()));
1221 sw(ra, MemOperand(sp, 4 * kPointerSize)); 2848 sw(ra, MemOperand(sp, 4 * kPointerSize));
1222 sw(fp, MemOperand(sp, 3 * kPointerSize)); 2849 sw(fp, MemOperand(sp, 3 * kPointerSize));
1223 sw(cp, MemOperand(sp, 2 * kPointerSize)); 2850 sw(cp, MemOperand(sp, 2 * kPointerSize));
1224 sw(t0, MemOperand(sp, 1 * kPointerSize)); 2851 sw(t8, MemOperand(sp, 1 * kPointerSize));
1225 sw(t1, MemOperand(sp, 0 * kPointerSize)); 2852 sw(t9, MemOperand(sp, 0 * kPointerSize));
1226 addiu(fp, sp, 3 * kPointerSize); 2853 addiu(fp, sp, 3 * kPointerSize);
1227 } 2854 }
1228 2855
1229 2856
1230 void MacroAssembler::LeaveFrame(StackFrame::Type type) { 2857 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1231 mov(sp, fp); 2858 mov(sp, fp);
1232 lw(fp, MemOperand(sp, 0 * kPointerSize)); 2859 lw(fp, MemOperand(sp, 0 * kPointerSize));
1233 lw(ra, MemOperand(sp, 1 * kPointerSize)); 2860 lw(ra, MemOperand(sp, 1 * kPointerSize));
1234 addiu(sp, sp, 2 * kPointerSize); 2861 addiu(sp, sp, 2 * kPointerSize);
1235 } 2862 }
1236 2863
1237 2864
1238 void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, 2865 void MacroAssembler::EnterExitFrame(Register hold_argc,
1239 Register hold_argc,
1240 Register hold_argv, 2866 Register hold_argv,
1241 Register hold_function) { 2867 Register hold_function,
1242 // Compute the argv pointer and keep it in a callee-saved register. 2868 bool save_doubles) {
1243 // a0 is argc. 2869 // a0 is argc.
1244 sll(t0, a0, kPointerSizeLog2); 2870 sll(t8, a0, kPointerSizeLog2);
1245 add(hold_argv, sp, t0); 2871 addu(hold_argv, sp, t8);
1246 addi(hold_argv, hold_argv, -kPointerSize); 2872 addiu(hold_argv, hold_argv, -kPointerSize);
1247 2873
1248 // Compute callee's stack pointer before making changes and save it as 2874 // Compute callee's stack pointer before making changes and save it as
1249 // t1 register so that it is restored as sp register on exit, thereby 2875 // t9 register so that it is restored as sp register on exit, thereby
1250 // popping the args. 2876 // popping the args.
1251 // t1 = sp + kPointerSize * #args 2877 // t9 = sp + kPointerSize * #args
1252 add(t1, sp, t0); 2878 addu(t9, sp, t8);
2879
2880 // Compute the argv pointer and keep it in a callee-saved register.
2881 // This only seems to be needed for crankshaft and may cause problems
2882 // so it's disabled for now.
2883 // Subu(s6, t9, Operand(kPointerSize));
1253 2884
1254 // Align the stack at this point. 2885 // Align the stack at this point.
1255 AlignStack(0); 2886 AlignStack(0);
1256 2887
1257 // Save registers. 2888 // Save registers.
1258 addiu(sp, sp, -12); 2889 addiu(sp, sp, -12);
1259 sw(t1, MemOperand(sp, 8)); 2890 sw(t9, MemOperand(sp, 8));
1260 sw(ra, MemOperand(sp, 4)); 2891 sw(ra, MemOperand(sp, 4));
1261 sw(fp, MemOperand(sp, 0)); 2892 sw(fp, MemOperand(sp, 0));
1262 mov(fp, sp); // Setup new frame pointer. 2893 mov(fp, sp); // Setup new frame pointer.
1263 2894
1264 // Push debug marker. 2895 li(t8, Operand(CodeObject()));
1265 if (mode == ExitFrame::MODE_DEBUG) { 2896 Push(t8); // Accessed from ExitFrame::code_slot.
1266 Push(zero_reg);
1267 } else {
1268 li(t0, Operand(CodeObject()));
1269 Push(t0);
1270 }
1271 2897
1272 // Save the frame pointer and the context in top. 2898 // Save the frame pointer and the context in top.
1273 LoadExternalReference(t0, ExternalReference(Isolate::k_c_entry_fp_address)); 2899 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
1274 sw(fp, MemOperand(t0)); 2900 sw(fp, MemOperand(t8));
1275 LoadExternalReference(t0, ExternalReference(Isolate::k_context_address)); 2901 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
1276 sw(cp, MemOperand(t0)); 2902 sw(cp, MemOperand(t8));
1277 2903
1278 // Setup argc and the builtin function in callee-saved registers. 2904 // Setup argc and the builtin function in callee-saved registers.
1279 mov(hold_argc, a0); 2905 mov(hold_argc, a0);
1280 mov(hold_function, a1); 2906 mov(hold_function, a1);
2907
2908 // Optionally save all double registers.
2909 if (save_doubles) {
2910 #ifdef DEBUG
2911 int frame_alignment = ActivationFrameAlignment();
2912 #endif
2913 // The stack alignment code above made sp unaligned, so add space for one
2914 // more double register and use aligned addresses.
2915 ASSERT(kDoubleSize == frame_alignment);
2916 // Mark the frame as containing doubles by pushing a non-valid return
2917 // address, i.e. 0.
2918 ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
2919 push(zero_reg); // Marker and alignment word.
2920 int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
2921 Subu(sp, sp, Operand(space));
2922 // Remember: we only need to save every 2nd double FPU value.
2923 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
2924 FPURegister reg = FPURegister::from_code(i);
2925 sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
2926 }
2927 // Note that f0 will be accessible at fp - 2*kPointerSize -
2928 // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
2929 // alignment word were pushed after the fp.
2930 }
1281 } 2931 }
1282 2932
1283 2933
1284 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { 2934 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
2935 // Optionally restore all double registers.
2936 if (save_doubles) {
2937 // TODO(regis): Use vldrm instruction.
2938 // Remember: we only need to restore every 2nd double FPU value.
2939 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
2940 FPURegister reg = FPURegister::from_code(i);
2941 // Register f30-f31 is just below the marker.
2942 const int offset = ExitFrameConstants::kMarkerOffset;
2943 ldc1(reg, MemOperand(fp,
2944 (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
2945 }
2946 }
2947
1285 // Clear top frame. 2948 // Clear top frame.
1286 LoadExternalReference(t0, ExternalReference(Isolate::k_c_entry_fp_address)); 2949 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
1287 sw(zero_reg, MemOperand(t0)); 2950 sw(zero_reg, MemOperand(t8));
1288 2951
1289 // Restore current context from top and clear it in debug mode. 2952 // Restore current context from top and clear it in debug mode.
1290 LoadExternalReference(t0, ExternalReference(Isolate::k_context_address)); 2953 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
1291 lw(cp, MemOperand(t0)); 2954 lw(cp, MemOperand(t8));
1292 #ifdef DEBUG 2955 #ifdef DEBUG
1293 sw(a3, MemOperand(t0)); 2956 sw(a3, MemOperand(t8));
1294 #endif 2957 #endif
1295 2958
1296 // Pop the arguments, restore registers, and return. 2959 // Pop the arguments, restore registers, and return.
1297 mov(sp, fp); // Respect ABI stack constraint. 2960 mov(sp, fp); // Respect ABI stack constraint.
1298 lw(fp, MemOperand(sp, 0)); 2961 lw(fp, MemOperand(sp, 0));
1299 lw(ra, MemOperand(sp, 4)); 2962 lw(ra, MemOperand(sp, 4));
1300 lw(sp, MemOperand(sp, 8)); 2963 lw(sp, MemOperand(sp, 8));
1301 jr(ra); 2964 jr(ra);
1302 nop(); // Branch delay slot nop. 2965 nop(); // Branch delay slot nop.
1303 } 2966 }
1304 2967
1305 2968
2969 void MacroAssembler::InitializeNewString(Register string,
2970 Register length,
2971 Heap::RootListIndex map_index,
2972 Register scratch1,
2973 Register scratch2) {
2974 sll(scratch1, length, kSmiTagSize);
2975 LoadRoot(scratch2, map_index);
2976 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
2977 li(scratch1, Operand(String::kEmptyHashField));
2978 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
2979 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
2980 }
2981
2982
2983 int MacroAssembler::ActivationFrameAlignment() {
2984 #if defined(V8_HOST_ARCH_MIPS)
2985 // Running on the real platform. Use the alignment as mandated by the local
2986 // environment.
2987 // Note: This will break if we ever start generating snapshots on one Mips
2988 // platform for another Mips platform with a different alignment.
2989 return OS::ActivationFrameAlignment();
2990 #else // defined(V8_HOST_ARCH_MIPS)
2991 // If we are using the simulator then we should always align to the expected
2992 // alignment. As the simulator is used to generate snapshots we do not know
2993 // if the target platform will need alignment, so this is controlled from a
2994 // flag.
2995 return FLAG_sim_stack_alignment;
2996 #endif // defined(V8_HOST_ARCH_MIPS)
2997 }
2998
2999
1306 void MacroAssembler::AlignStack(int offset) { 3000 void MacroAssembler::AlignStack(int offset) {
1307 // On MIPS an offset of 0 aligns to 0 modulo 8 bytes, 3001 // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
1308 // and an offset of 1 aligns to 4 modulo 8 bytes. 3002 // and an offset of 1 aligns to 4 modulo 8 bytes.
3003 #if defined(V8_HOST_ARCH_MIPS)
3004 // Running on the real platform. Use the alignment as mandated by the local
3005 // environment.
3006 // Note: This will break if we ever start generating snapshots on one MIPS
3007 // platform for another MIPS platform with a different alignment.
1309 int activation_frame_alignment = OS::ActivationFrameAlignment(); 3008 int activation_frame_alignment = OS::ActivationFrameAlignment();
3009 #else // defined(V8_HOST_ARCH_MIPS)
3010 // If we are using the simulator then we should always align to the expected
3011 // alignment. As the simulator is used to generate snapshots we do not know
3012 // if the target platform will need alignment, so we will always align at
3013 // this point here.
3014 int activation_frame_alignment = 2 * kPointerSize;
3015 #endif // defined(V8_HOST_ARCH_MIPS)
1310 if (activation_frame_alignment != kPointerSize) { 3016 if (activation_frame_alignment != kPointerSize) {
1311 // This code needs to be made more general if this assert doesn't hold. 3017 // This code needs to be made more general if this assert doesn't hold.
1312 ASSERT(activation_frame_alignment == 2 * kPointerSize); 3018 ASSERT(activation_frame_alignment == 2 * kPointerSize);
1313 if (offset == 0) { 3019 if (offset == 0) {
1314 andi(t0, sp, activation_frame_alignment - 1); 3020 andi(t8, sp, activation_frame_alignment - 1);
1315 Push(zero_reg, eq, t0, zero_reg); 3021 Push(zero_reg, eq, t8, zero_reg);
1316 } else { 3022 } else {
1317 andi(t0, sp, activation_frame_alignment - 1); 3023 andi(t8, sp, activation_frame_alignment - 1);
1318 addiu(t0, t0, -4); 3024 addiu(t8, t8, -4);
1319 Push(zero_reg, eq, t0, zero_reg); 3025 Push(zero_reg, eq, t8, zero_reg);
1320 } 3026 }
1321 } 3027 }
1322 } 3028 }
1323 3029
3030
3031
3032 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
3033 Register reg,
3034 Register scratch,
3035 Label* not_power_of_two_or_zero) {
3036 Subu(scratch, reg, Operand(1));
3037 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
3038 scratch, Operand(zero_reg));
3039 and_(at, scratch, reg); // In the delay slot.
3040 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
3041 }
3042
3043
3044 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
3045 Register reg2,
3046 Label* on_not_both_smi) {
3047 STATIC_ASSERT(kSmiTag == 0);
3048 ASSERT_EQ(1, kSmiTagMask);
3049 or_(at, reg1, reg2);
3050 andi(at, at, kSmiTagMask);
3051 Branch(on_not_both_smi, ne, at, Operand(zero_reg));
3052 }
3053
3054
3055 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3056 Register reg2,
3057 Label* on_either_smi) {
3058 STATIC_ASSERT(kSmiTag == 0);
3059 ASSERT_EQ(1, kSmiTagMask);
3060 // Both Smi tags must be 1 (not Smi).
3061 and_(at, reg1, reg2);
3062 andi(at, at, kSmiTagMask);
3063 Branch(on_either_smi, eq, at, Operand(zero_reg));
3064 }
3065
3066
3067 void MacroAssembler::AbortIfSmi(Register object) {
3068 STATIC_ASSERT(kSmiTag == 0);
3069 andi(at, object, kSmiTagMask);
3070 Assert(ne, "Operand is a smi", at, Operand(zero_reg));
3071 }
3072
3073
3074 void MacroAssembler::AbortIfNotSmi(Register object) {
3075 STATIC_ASSERT(kSmiTag == 0);
3076 andi(at, object, kSmiTagMask);
3077 Assert(eq, "Operand is a smi", at, Operand(zero_reg));
3078 }
3079
3080
3081 void MacroAssembler::AbortIfNotRootValue(Register src,
3082 Heap::RootListIndex root_value_index,
3083 const char* message) {
3084 ASSERT(!src.is(at));
3085 LoadRoot(at, root_value_index);
3086 Assert(eq, message, src, Operand(at));
3087 }
3088
3089
3090 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3091 Register heap_number_map,
3092 Register scratch,
3093 Label* on_not_heap_number) {
3094 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3095 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3096 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
3097 }
3098
3099
3100 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3101 Register first,
3102 Register second,
3103 Register scratch1,
3104 Register scratch2,
3105 Label* failure) {
3106 // Test that both first and second are sequential ASCII strings.
3107 // Assume that they are non-smis.
3108 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3109 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3110 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3111 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3112
3113 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3114 scratch2,
3115 scratch1,
3116 scratch2,
3117 failure);
3118 }
3119
3120
3121 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3122 Register second,
3123 Register scratch1,
3124 Register scratch2,
3125 Label* failure) {
3126 // Check that neither is a smi.
3127 STATIC_ASSERT(kSmiTag == 0);
3128 And(scratch1, first, Operand(second));
3129 And(scratch1, scratch1, Operand(kSmiTagMask));
3130 Branch(failure, eq, scratch1, Operand(zero_reg));
3131 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3132 second,
3133 scratch1,
3134 scratch2,
3135 failure);
3136 }
3137
3138
3139 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3140 Register first,
3141 Register second,
3142 Register scratch1,
3143 Register scratch2,
3144 Label* failure) {
3145 int kFlatAsciiStringMask =
3146 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3147 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
3148 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
3149 andi(scratch1, first, kFlatAsciiStringMask);
3150 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
3151 andi(scratch2, second, kFlatAsciiStringMask);
3152 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
3153 }
3154
3155
3156 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3157 Register scratch,
3158 Label* failure) {
3159 int kFlatAsciiStringMask =
3160 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3161 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
3162 And(scratch, type, Operand(kFlatAsciiStringMask));
3163 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
3164 }
3165
3166
3167 static const int kRegisterPassedArguments = 4;
3168
3169 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
3170 int frame_alignment = ActivationFrameAlignment();
3171
3172 // Reserve space for Isolate address which is always passed as last parameter
3173 num_arguments += 1;
3174
3175 // Up to four simple arguments are passed in registers a0..a3.
3176 // Those four arguments must have reserved argument slots on the stack for
3177 // mips, even though those argument slots are not normally used.
3178 // Remaining arguments are pushed on the stack, above (higher address than)
3179 // the argument slots.
3180 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
3181 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
3182 0 : num_arguments - kRegisterPassedArguments) +
3183 (StandardFrameConstants::kCArgsSlotsSize /
3184 kPointerSize);
3185 if (frame_alignment > kPointerSize) {
3186 // Make stack end at alignment and make room for num_arguments - 4 words
3187 // and the original value of sp.
3188 mov(scratch, sp);
3189 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3190 ASSERT(IsPowerOf2(frame_alignment));
3191 And(sp, sp, Operand(-frame_alignment));
3192 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3193 } else {
3194 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3195 }
3196 }
3197
3198
3199 void MacroAssembler::CallCFunction(ExternalReference function,
3200 int num_arguments) {
3201 CallCFunctionHelper(no_reg, function, at, num_arguments);
3202 }
3203
3204
3205 void MacroAssembler::CallCFunction(Register function,
3206 Register scratch,
3207 int num_arguments) {
3208 CallCFunctionHelper(function,
3209 ExternalReference::the_hole_value_location(isolate()),
3210 scratch,
3211 num_arguments);
3212 }
3213
3214
3215 void MacroAssembler::CallCFunctionHelper(Register function,
3216 ExternalReference function_reference,
3217 Register scratch,
3218 int num_arguments) {
3219 // Push Isolate address as the last argument.
3220 if (num_arguments < kRegisterPassedArguments) {
3221 Register arg_to_reg[] = {a0, a1, a2, a3};
3222 Register r = arg_to_reg[num_arguments];
3223 li(r, Operand(ExternalReference::isolate_address()));
3224 } else {
3225 int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
3226 (StandardFrameConstants::kCArgsSlotsSize /
3227 kPointerSize);
3228 // Push Isolate address on the stack after the arguments.
3229 li(scratch, Operand(ExternalReference::isolate_address()));
3230 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3231 }
3232 num_arguments += 1;
3233
3234 // Make sure that the stack is aligned before calling a C function unless
3235 // running in the simulator. The simulator has its own alignment check which
3236 // provides more information.
3237 // The argument stots are presumed to have been set up by
3238 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
3239
3240 #if defined(V8_HOST_ARCH_MIPS)
3241 if (emit_debug_code()) {
3242 int frame_alignment = OS::ActivationFrameAlignment();
3243 int frame_alignment_mask = frame_alignment - 1;
3244 if (frame_alignment > kPointerSize) {
3245 ASSERT(IsPowerOf2(frame_alignment));
3246 Label alignment_as_expected;
3247 And(at, sp, Operand(frame_alignment_mask));
3248 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
3249 // Don't use Check here, as it will call Runtime_Abort possibly
3250 // re-entering here.
3251 stop("Unexpected alignment in CallCFunction");
3252 bind(&alignment_as_expected);
3253 }
3254 }
3255 #endif // V8_HOST_ARCH_MIPS
3256
3257 // Just call directly. The function called cannot cause a GC, or
3258 // allow preemption, so the return address in the link register
3259 // stays correct.
3260 if (!function.is(t9)) {
3261 mov(t9, function);
3262 function = t9;
3263 }
3264
3265 if (function.is(no_reg)) {
3266 li(t9, Operand(function_reference));
3267 function = t9;
3268 }
3269
3270 Call(function);
3271
3272 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
3273 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
3274 0 : num_arguments - kRegisterPassedArguments) +
3275 (StandardFrameConstants::kCArgsSlotsSize /
3276 kPointerSize);
3277
3278 if (OS::ActivationFrameAlignment() > kPointerSize) {
3279 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3280 } else {
3281 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3282 }
3283 }
3284
3285
3286 #undef BRANCH_ARGS_CHECK
3287
3288
3289 #ifdef ENABLE_DEBUGGER_SUPPORT
3290 CodePatcher::CodePatcher(byte* address, int instructions)
3291 : address_(address),
3292 instructions_(instructions),
3293 size_(instructions * Assembler::kInstrSize),
3294 masm_(address, size_ + Assembler::kGap) {
3295 // Create a new macro assembler pointing to the address of the code to patch.
3296 // The size is adjusted with kGap on order for the assembler to generate size
3297 // bytes of instructions without failing with buffer size constraints.
3298 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3299 }
3300
3301
3302 CodePatcher::~CodePatcher() {
3303 // Indicate that code has changed.
3304 CPU::FlushICache(address_, size_);
3305
3306 // Check that the code was patched as expected.
3307 ASSERT(masm_.pc_ == address_ + size_);
3308 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3309 }
3310
3311
3312 void CodePatcher::Emit(Instr x) {
3313 masm()->emit(x);
3314 }
3315
3316
3317 void CodePatcher::Emit(Address addr) {
3318 masm()->emit(reinterpret_cast<Instr>(addr));
3319 }
3320
3321
3322 #endif // ENABLE_DEBUGGER_SUPPORT
3323
3324
1324 } } // namespace v8::internal 3325 } } // namespace v8::internal
1325 3326
1326 #endif // V8_TARGET_ARCH_MIPS 3327 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/macro-assembler-mips.h ('k') | src/mips/regexp-macro-assembler-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698