Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(396)

Side by Side Diff: src/mips/macro-assembler-mips.cc

Issue 6965006: Update mips infrastructure files. (Closed) Base URL: http://github.com/v8/v8.git@bleeding_edge
Patch Set: Fix additional style issues. Created 9 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include <limits.h> // For LONG_MIN, LONG_MAX 28 #include <limits.h> // For LONG_MIN, LONG_MAX.
29 29
30 #include "v8.h" 30 #include "v8.h"
31 31
32 #if defined(V8_TARGET_ARCH_MIPS) 32 #if defined(V8_TARGET_ARCH_MIPS)
33 33
34 #include "bootstrapper.h" 34 #include "bootstrapper.h"
35 #include "codegen-inl.h" 35 #include "codegen.h"
36 #include "debug.h" 36 #include "debug.h"
37 #include "runtime.h" 37 #include "runtime.h"
38 38
39 namespace v8 { 39 namespace v8 {
40 namespace internal { 40 namespace internal {
41 41
42 MacroAssembler::MacroAssembler(void* buffer, int size) 42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43 : Assembler(buffer, size), 43 : Assembler(arg_isolate, buffer, size),
44 generating_stub_(false), 44 generating_stub_(false),
45 allow_stub_calls_(true), 45 allow_stub_calls_(true) {
46 code_object_(HEAP->undefined_value()) { 46 if (isolate() != NULL) {
47 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
48 isolate());
49 }
47 } 50 }
48 51
49 52
50 // Arguments macros 53 // Arguments macros.
51 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 54 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
52 #define COND_ARGS cond, r1, r2 55 #define COND_ARGS cond, r1, r2
53 56
54 #define REGISTER_TARGET_BODY(Name) \ 57 #define REGISTER_TARGET_BODY(Name) \
55 void MacroAssembler::Name(Register target, \ 58 void MacroAssembler::Name(Register target, \
56 BranchDelaySlot bd) { \ 59 BranchDelaySlot bd) { \
57 Name(Operand(target), bd); \ 60 Name(Operand(target), bd); \
58 } \ 61 } \
59 void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \ 62 void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
60 BranchDelaySlot bd) { \ 63 BranchDelaySlot bd) { \
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
154 Condition cond, 157 Condition cond,
155 Register src1, const Operand& src2) { 158 Register src1, const Operand& src2) {
156 Branch(2, NegateCondition(cond), src1, src2); 159 Branch(2, NegateCondition(cond), src1, src2);
157 sw(source, MemOperand(s6, index << kPointerSizeLog2)); 160 sw(source, MemOperand(s6, index << kPointerSizeLog2));
158 } 161 }
159 162
160 163
161 void MacroAssembler::RecordWriteHelper(Register object, 164 void MacroAssembler::RecordWriteHelper(Register object,
162 Register address, 165 Register address,
163 Register scratch) { 166 Register scratch) {
164 if (FLAG_debug_code) { 167 if (emit_debug_code()) {
165 // Check that the object is not in new space. 168 // Check that the object is not in new space.
166 Label not_in_new_space; 169 Label not_in_new_space;
167 InNewSpace(object, scratch, ne, &not_in_new_space); 170 InNewSpace(object, scratch, ne, &not_in_new_space);
168 Abort("new-space object passed to RecordWriteHelper"); 171 Abort("new-space object passed to RecordWriteHelper");
169 bind(&not_in_new_space); 172 bind(&not_in_new_space);
170 } 173 }
171 174
172 // Calculate page address: Clear bits from 0 to kPageSizeBits. 175 // Calculate page address: Clear bits from 0 to kPageSizeBits.
173 if (mips32r2) { 176 if (mips32r2) {
174 Ins(object, zero_reg, 0, kPageSizeBits); 177 Ins(object, zero_reg, 0, kPageSizeBits);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
223 // Add offset into the object. 226 // Add offset into the object.
224 Addu(scratch0, object, offset); 227 Addu(scratch0, object, offset);
225 228
226 // Record the actual write. 229 // Record the actual write.
227 RecordWriteHelper(object, scratch0, scratch1); 230 RecordWriteHelper(object, scratch0, scratch1);
228 231
229 bind(&done); 232 bind(&done);
230 233
231 // Clobber all input registers when running with the debug-code flag 234 // Clobber all input registers when running with the debug-code flag
232 // turned on to provoke errors. 235 // turned on to provoke errors.
233 if (FLAG_debug_code) { 236 if (emit_debug_code()) {
234 li(object, Operand(BitCast<int32_t>(kZapValue))); 237 li(object, Operand(BitCast<int32_t>(kZapValue)));
235 li(scratch0, Operand(BitCast<int32_t>(kZapValue))); 238 li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
236 li(scratch1, Operand(BitCast<int32_t>(kZapValue))); 239 li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
237 } 240 }
238 } 241 }
239 242
240 243
241 // Will clobber 4 registers: object, address, scratch, ip. The 244 // Will clobber 4 registers: object, address, scratch, ip. The
242 // register 'object' contains a heap object pointer. The heap object 245 // register 'object' contains a heap object pointer. The heap object
243 // tag is shifted away. 246 // tag is shifted away.
(...skipping 11 matching lines...) Expand all
255 // region marks for new space pages. 258 // region marks for new space pages.
256 InNewSpace(object, scratch, eq, &done); 259 InNewSpace(object, scratch, eq, &done);
257 260
258 // Record the actual write. 261 // Record the actual write.
259 RecordWriteHelper(object, address, scratch); 262 RecordWriteHelper(object, address, scratch);
260 263
261 bind(&done); 264 bind(&done);
262 265
263 // Clobber all input registers when running with the debug-code flag 266 // Clobber all input registers when running with the debug-code flag
264 // turned on to provoke errors. 267 // turned on to provoke errors.
265 if (FLAG_debug_code) { 268 if (emit_debug_code()) {
266 li(object, Operand(BitCast<int32_t>(kZapValue))); 269 li(object, Operand(BitCast<int32_t>(kZapValue)));
267 li(address, Operand(BitCast<int32_t>(kZapValue))); 270 li(address, Operand(BitCast<int32_t>(kZapValue)));
268 li(scratch, Operand(BitCast<int32_t>(kZapValue))); 271 li(scratch, Operand(BitCast<int32_t>(kZapValue)));
269 } 272 }
270 } 273 }
271 274
272 275
273 // ----------------------------------------------------------------------------- 276 // -----------------------------------------------------------------------------
274 // Allocation support 277 // Allocation support.
275 278
276 279
277 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 280 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
278 Register scratch, 281 Register scratch,
279 Label* miss) { 282 Label* miss) {
280 Label same_contexts; 283 Label same_contexts;
281 284
282 ASSERT(!holder_reg.is(scratch)); 285 ASSERT(!holder_reg.is(scratch));
283 ASSERT(!holder_reg.is(at)); 286 ASSERT(!holder_reg.is(at));
284 ASSERT(!scratch.is(at)); 287 ASSERT(!scratch.is(at));
285 288
286 // Load current lexical context from the stack frame. 289 // Load current lexical context from the stack frame.
287 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); 290 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
288 // In debug mode, make sure the lexical context is set. 291 // In debug mode, make sure the lexical context is set.
289 #ifdef DEBUG 292 #ifdef DEBUG
290 Check(ne, "we should not have an empty lexical context", 293 Check(ne, "we should not have an empty lexical context",
291 scratch, Operand(zero_reg)); 294 scratch, Operand(zero_reg));
292 #endif 295 #endif
293 296
294 // Load the global context of the current context. 297 // Load the global context of the current context.
295 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; 298 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
296 lw(scratch, FieldMemOperand(scratch, offset)); 299 lw(scratch, FieldMemOperand(scratch, offset));
297 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); 300 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
298 301
299 // Check the context is a global context. 302 // Check the context is a global context.
300 if (FLAG_debug_code) { 303 if (emit_debug_code()) {
301 // TODO(119): Avoid push(holder_reg)/pop(holder_reg). 304 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
302 Push(holder_reg); // Temporarily save holder on the stack. 305 push(holder_reg); // Temporarily save holder on the stack.
303 // Read the first word and compare to the global_context_map. 306 // Read the first word and compare to the global_context_map.
304 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); 307 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
305 LoadRoot(at, Heap::kGlobalContextMapRootIndex); 308 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
306 Check(eq, "JSGlobalObject::global_context should be a global context.", 309 Check(eq, "JSGlobalObject::global_context should be a global context.",
307 holder_reg, Operand(at)); 310 holder_reg, Operand(at));
308 Pop(holder_reg); // Restore holder. 311 pop(holder_reg); // Restore holder.
309 } 312 }
310 313
311 // Check if both contexts are the same. 314 // Check if both contexts are the same.
312 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); 315 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
313 Branch(&same_contexts, eq, scratch, Operand(at)); 316 Branch(&same_contexts, eq, scratch, Operand(at));
314 317
315 // Check the context is a global context. 318 // Check the context is a global context.
316 if (FLAG_debug_code) { 319 if (emit_debug_code()) {
317 // TODO(119): Avoid push(holder_reg)/pop(holder_reg). 320 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
318 Push(holder_reg); // Temporarily save holder on the stack. 321 push(holder_reg); // Temporarily save holder on the stack.
319 mov(holder_reg, at); // Move at to its holding place. 322 mov(holder_reg, at); // Move at to its holding place.
320 LoadRoot(at, Heap::kNullValueRootIndex); 323 LoadRoot(at, Heap::kNullValueRootIndex);
321 Check(ne, "JSGlobalProxy::context() should not be null.", 324 Check(ne, "JSGlobalProxy::context() should not be null.",
322 holder_reg, Operand(at)); 325 holder_reg, Operand(at));
323 326
324 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); 327 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
325 LoadRoot(at, Heap::kGlobalContextMapRootIndex); 328 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
326 Check(eq, "JSGlobalObject::global_context should be a global context.", 329 Check(eq, "JSGlobalObject::global_context should be a global context.",
327 holder_reg, Operand(at)); 330 holder_reg, Operand(at));
328 // Restore at is not needed. at is reloaded below. 331 // Restore at is not needed. at is reloaded below.
329 Pop(holder_reg); // Restore holder. 332 pop(holder_reg); // Restore holder.
330 // Restore at to holder's context. 333 // Restore at to holder's context.
331 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); 334 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
332 } 335 }
333 336
334 // Check that the security token in the calling global object is 337 // Check that the security token in the calling global object is
335 // compatible with the security token in the receiving global 338 // compatible with the security token in the receiving global
336 // object. 339 // object.
337 int token_offset = Context::kHeaderSize + 340 int token_offset = Context::kHeaderSize +
338 Context::SECURITY_TOKEN_INDEX * kPointerSize; 341 Context::SECURITY_TOKEN_INDEX * kPointerSize;
339 342
340 lw(scratch, FieldMemOperand(scratch, token_offset)); 343 lw(scratch, FieldMemOperand(scratch, token_offset));
341 lw(at, FieldMemOperand(at, token_offset)); 344 lw(at, FieldMemOperand(at, token_offset));
342 Branch(miss, ne, scratch, Operand(at)); 345 Branch(miss, ne, scratch, Operand(at));
343 346
344 bind(&same_contexts); 347 bind(&same_contexts);
345 } 348 }
346 349
347 350
348 // --------------------------------------------------------------------------- 351 // ---------------------------------------------------------------------------
349 // Instruction macros 352 // Instruction macros.
350 353
351 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { 354 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
352 if (rt.is_reg()) { 355 if (rt.is_reg()) {
353 addu(rd, rs, rt.rm()); 356 addu(rd, rs, rt.rm());
354 } else { 357 } else {
355 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 358 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
356 addiu(rd, rs, rt.imm32_); 359 addiu(rd, rs, rt.imm32_);
357 } else { 360 } else {
358 // li handles the relocation. 361 // li handles the relocation.
359 ASSERT(!rs.is(at)); 362 ASSERT(!rs.is(at));
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
493 nor(rd, rs, rt.rm()); 496 nor(rd, rs, rt.rm());
494 } else { 497 } else {
495 // li handles the relocation. 498 // li handles the relocation.
496 ASSERT(!rs.is(at)); 499 ASSERT(!rs.is(at));
497 li(at, rt); 500 li(at, rt);
498 nor(rd, rs, at); 501 nor(rd, rs, at);
499 } 502 }
500 } 503 }
501 504
502 505
506 void MacroAssembler::Neg(Register rs, const Operand& rt) {
507 ASSERT(rt.is_reg());
508 ASSERT(!at.is(rs));
509 ASSERT(!at.is(rt.rm()));
510 li(at, -1);
511 xor_(rs, rt.rm(), at);
512 }
513
514
503 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { 515 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
504 if (rt.is_reg()) { 516 if (rt.is_reg()) {
505 slt(rd, rs, rt.rm()); 517 slt(rd, rs, rt.rm());
506 } else { 518 } else {
507 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 519 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
508 slti(rd, rs, rt.imm32_); 520 slti(rd, rs, rt.imm32_);
509 } else { 521 } else {
510 // li handles the relocation. 522 // li handles the relocation.
511 ASSERT(!rs.is(at)); 523 ASSERT(!rs.is(at));
512 li(at, rt); 524 li(at, rt);
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
574 } else { 586 } else {
575 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); 587 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
576 ori(rd, rd, (j.imm32_ & kImm16Mask)); 588 ori(rd, rd, (j.imm32_ & kImm16Mask));
577 } 589 }
578 } else if (MustUseReg(j.rmode_) || gen2instr) { 590 } else if (MustUseReg(j.rmode_) || gen2instr) {
579 if (MustUseReg(j.rmode_)) { 591 if (MustUseReg(j.rmode_)) {
580 RecordRelocInfo(j.rmode_, j.imm32_); 592 RecordRelocInfo(j.rmode_, j.imm32_);
581 } 593 }
582 // We need always the same number of instructions as we may need to patch 594 // We need always the same number of instructions as we may need to patch
583 // this code to load another value which may need 2 instructions to load. 595 // this code to load another value which may need 2 instructions to load.
584 if (is_int16(j.imm32_)) { 596 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
585 nop(); 597 ori(rd, rd, (j.imm32_ & kImm16Mask));
586 addiu(rd, zero_reg, j.imm32_);
587 } else if (!(j.imm32_ & kHiMask)) {
588 nop();
589 ori(rd, zero_reg, j.imm32_);
590 } else if (!(j.imm32_ & kImm16Mask)) {
591 nop();
592 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
593 } else {
594 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
595 ori(rd, rd, (j.imm32_ & kImm16Mask));
596 }
597 } 598 }
598 } 599 }
599 600
600 601
601 // Exception-generating instructions and debugging support 602 // Exception-generating instructions and debugging support.
602 void MacroAssembler::stop(const char* msg) { 603 void MacroAssembler::stop(const char* msg) {
603 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it. 604 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
604 // We use the 0x54321 value to be able to find it easily when reading memory. 605 // We use the 0x54321 value to be able to find it easily when reading memory.
605 break_(0x54321); 606 break_(0x54321);
606 } 607 }
607 608
608 609
609 void MacroAssembler::MultiPush(RegList regs) { 610 void MacroAssembler::MultiPush(RegList regs) {
610 int16_t NumSaved = 0; 611 int16_t NumSaved = 0;
611 int16_t NumToPush = NumberOfBitsSet(regs); 612 int16_t NumToPush = NumberOfBitsSet(regs);
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
720 721
721 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) { 722 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
722 // Convert rs to a FP value in fd (and fd + 1). 723 // Convert rs to a FP value in fd (and fd + 1).
723 // We do this by converting rs minus the MSB to avoid sign conversion, 724 // We do this by converting rs minus the MSB to avoid sign conversion,
724 // then adding 2^31-1 and 1 to the result. 725 // then adding 2^31-1 and 1 to the result.
725 726
726 ASSERT(!fd.is(f20)); 727 ASSERT(!fd.is(f20));
727 ASSERT(!rs.is(t9)); 728 ASSERT(!rs.is(t9));
728 ASSERT(!rs.is(t8)); 729 ASSERT(!rs.is(t8));
729 730
730 // Save rs's MSB to t8 731 // Save rs's MSB to t8.
731 And(t8, rs, 0x80000000); 732 And(t8, rs, 0x80000000);
732 // Remove rs's MSB. 733 // Remove rs's MSB.
733 And(t9, rs, 0x7FFFFFFF); 734 And(t9, rs, 0x7FFFFFFF);
734 // Move t9 to fd 735 // Move t9 to fd.
735 mtc1(t9, fd); 736 mtc1(t9, fd);
736 737
737 // Convert fd to a real FP value. 738 // Convert fd to a real FP value.
738 cvt_d_w(fd, fd); 739 cvt_d_w(fd, fd);
739 740
740 Label conversion_done; 741 Label conversion_done;
741 742
742 // If rs's MSB was 0, it's done. 743 // If rs's MSB was 0, it's done.
743 // Otherwise we need to add that to the FP register. 744 // Otherwise we need to add that to the FP register.
744 Branch(&conversion_done, eq, t8, Operand(zero_reg)); 745 Branch(&conversion_done, eq, t8, Operand(zero_reg));
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
832 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent)); 833 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
833 834
834 // We know the exponent is smaller than 30 (biased). If it is less than 835 // We know the exponent is smaller than 30 (biased). If it is less than
835 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie 836 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
836 // it rounds to zero. 837 // it rounds to zero.
837 const uint32_t zero_exponent = 838 const uint32_t zero_exponent =
838 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; 839 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
839 Subu(scratch2, scratch2, Operand(zero_exponent)); 840 Subu(scratch2, scratch2, Operand(zero_exponent));
840 // Dest already has a Smi zero. 841 // Dest already has a Smi zero.
841 Branch(&done, lt, scratch2, Operand(zero_reg)); 842 Branch(&done, lt, scratch2, Operand(zero_reg));
842 if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) { 843 if (!CpuFeatures::IsSupported(FPU)) {
843 // We have a shifted exponent between 0 and 30 in scratch2. 844 // We have a shifted exponent between 0 and 30 in scratch2.
844 srl(dest, scratch2, HeapNumber::kExponentShift); 845 srl(dest, scratch2, HeapNumber::kExponentShift);
845 // We now have the exponent in dest. Subtract from 30 to get 846 // We now have the exponent in dest. Subtract from 30 to get
846 // how much to shift down. 847 // how much to shift down.
847 li(at, Operand(30)); 848 li(at, Operand(30));
848 subu(dest, at, dest); 849 subu(dest, at, dest);
849 } 850 }
850 bind(&right_exponent); 851 bind(&right_exponent);
851 if (Isolate::Current()->cpu_features()->IsSupported(FPU)) { 852 if (CpuFeatures::IsSupported(FPU)) {
852 CpuFeatures::Scope scope(FPU); 853 CpuFeatures::Scope scope(FPU);
853 // MIPS FPU instructions implementing double precision to integer 854 // MIPS FPU instructions implementing double precision to integer
854 // conversion using round to zero. Since the FP value was qualified 855 // conversion using round to zero. Since the FP value was qualified
855 // above, the resulting integer should be a legal int32. 856 // above, the resulting integer should be a legal int32.
856 // The original 'Exponent' word is still in scratch. 857 // The original 'Exponent' word is still in scratch.
857 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); 858 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
858 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1)); 859 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
859 trunc_w_d(double_scratch, double_scratch); 860 trunc_w_d(double_scratch, double_scratch);
860 mfc1(dest, double_scratch); 861 mfc1(dest, double_scratch);
861 } else { 862 } else {
(...skipping 29 matching lines...) Expand all
891 // Trick to check sign bit (msb) held in dest, count leading zero. 892 // Trick to check sign bit (msb) held in dest, count leading zero.
892 // 0 indicates negative, save negative version with conditional move. 893 // 0 indicates negative, save negative version with conditional move.
893 clz(dest, dest); 894 clz(dest, dest);
894 movz(scratch, scratch2, dest); 895 movz(scratch, scratch2, dest);
895 mov(dest, scratch); 896 mov(dest, scratch);
896 } 897 }
897 bind(&done); 898 bind(&done);
898 } 899 }
899 900
900 901
902 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
903 Register input_high,
904 Register input_low,
905 Register scratch) {
906 Label done, normal_exponent, restore_sign;
907 // Extract the biased exponent in result.
908 Ext(result,
909 input_high,
910 HeapNumber::kExponentShift,
911 HeapNumber::kExponentBits);
912
913 // Check for Infinity and NaNs, which should return 0.
914 Subu(scratch, result, HeapNumber::kExponentMask);
915 movz(result, zero_reg, scratch);
916 Branch(&done, eq, scratch, Operand(zero_reg));
917
918 // Express exponent as delta to (number of mantissa bits + 31).
919 Subu(result,
920 result,
921 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
922
923 // If the delta is strictly positive, all bits would be shifted away,
924 // which means that we can return 0.
925 Branch(&normal_exponent, le, result, Operand(zero_reg));
926 mov(result, zero_reg);
927 Branch(&done);
928
929 bind(&normal_exponent);
930 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
931 // Calculate shift.
932 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
933
934 // Save the sign.
935 Register sign = result;
936 result = no_reg;
937 And(sign, input_high, Operand(HeapNumber::kSignMask));
938
939 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
940 // to check for this specific case.
941 Label high_shift_needed, high_shift_done;
942 Branch(&high_shift_needed, lt, scratch, Operand(32));
943 mov(input_high, zero_reg);
944 Branch(&high_shift_done);
945 bind(&high_shift_needed);
946
947 // Set the implicit 1 before the mantissa part in input_high.
948 Or(input_high,
949 input_high,
950 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
951 // Shift the mantissa bits to the correct position.
952 // We don't need to clear non-mantissa bits as they will be shifted away.
953 // If they weren't, it would mean that the answer is in the 32bit range.
954 sllv(input_high, input_high, scratch);
955
956 bind(&high_shift_done);
957
958 // Replace the shifted bits with bits from the lower mantissa word.
959 Label pos_shift, shift_done;
960 li(at, 32);
961 subu(scratch, at, scratch);
962 Branch(&pos_shift, ge, scratch, Operand(zero_reg));
963
964 // Negate scratch.
965 Subu(scratch, zero_reg, scratch);
966 sllv(input_low, input_low, scratch);
967 Branch(&shift_done);
968
969 bind(&pos_shift);
970 srlv(input_low, input_low, scratch);
971
972 bind(&shift_done);
973 Or(input_high, input_high, Operand(input_low));
974 // Restore sign if necessary.
975 mov(scratch, sign);
976 result = sign;
977 sign = no_reg;
978 Subu(result, zero_reg, input_high);
979 movz(result, input_high, scratch);
980 bind(&done);
981 }
982
983
984 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
985 Register src,
986 int num_least_bits) {
987 Ext(dst, src, kSmiTagSize, num_least_bits);
988 }
989
990
991 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
992 Register src,
993 int num_least_bits) {
994 And(dst, src, Operand((1 << num_least_bits) - 1));
995 }
996
997
901 // Emulated condtional branches do not emit a nop in the branch delay slot. 998 // Emulated condtional branches do not emit a nop in the branch delay slot.
902 // 999 //
903 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. 1000 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
904 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ 1001 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
905 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ 1002 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
906 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) 1003 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
907 1004
908 1005
909 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { 1006 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
910 b(offset); 1007 b(offset);
(...skipping 19 matching lines...) Expand all
930 switch (cond) { 1027 switch (cond) {
931 case cc_always: 1028 case cc_always:
932 b(offset); 1029 b(offset);
933 break; 1030 break;
934 case eq: 1031 case eq:
935 beq(rs, r2, offset); 1032 beq(rs, r2, offset);
936 break; 1033 break;
937 case ne: 1034 case ne:
938 bne(rs, r2, offset); 1035 bne(rs, r2, offset);
939 break; 1036 break;
940 // Signed comparison 1037 // Signed comparison.
941 case greater: 1038 case greater:
942 if (r2.is(zero_reg)) { 1039 if (r2.is(zero_reg)) {
943 bgtz(rs, offset); 1040 bgtz(rs, offset);
944 } else { 1041 } else {
945 slt(scratch, r2, rs); 1042 slt(scratch, r2, rs);
946 bne(scratch, zero_reg, offset); 1043 bne(scratch, zero_reg, offset);
947 } 1044 }
948 break; 1045 break;
949 case greater_equal: 1046 case greater_equal:
950 if (r2.is(zero_reg)) { 1047 if (r2.is(zero_reg)) {
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
1021 li(r2, rt); 1118 li(r2, rt);
1022 beq(rs, r2, offset); 1119 beq(rs, r2, offset);
1023 break; 1120 break;
1024 case ne: 1121 case ne:
1025 // We don't want any other register but scratch clobbered. 1122 // We don't want any other register but scratch clobbered.
1026 ASSERT(!scratch.is(rs)); 1123 ASSERT(!scratch.is(rs));
1027 r2 = scratch; 1124 r2 = scratch;
1028 li(r2, rt); 1125 li(r2, rt);
1029 bne(rs, r2, offset); 1126 bne(rs, r2, offset);
1030 break; 1127 break;
1031 // Signed comparison 1128 // Signed comparison.
1032 case greater: 1129 case greater:
1033 if (rt.imm32_ == 0) { 1130 if (rt.imm32_ == 0) {
1034 bgtz(rs, offset); 1131 bgtz(rs, offset);
1035 } else { 1132 } else {
1036 r2 = scratch; 1133 r2 = scratch;
1037 li(r2, rt); 1134 li(r2, rt);
1038 slt(scratch, r2, rs); 1135 slt(scratch, r2, rs);
1039 bne(scratch, zero_reg, offset); 1136 bne(scratch, zero_reg, offset);
1040 } 1137 }
1041 break; 1138 break;
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
1163 b(offset); 1260 b(offset);
1164 break; 1261 break;
1165 case eq: 1262 case eq:
1166 offset = shifted_branch_offset(L, false); 1263 offset = shifted_branch_offset(L, false);
1167 beq(rs, r2, offset); 1264 beq(rs, r2, offset);
1168 break; 1265 break;
1169 case ne: 1266 case ne:
1170 offset = shifted_branch_offset(L, false); 1267 offset = shifted_branch_offset(L, false);
1171 bne(rs, r2, offset); 1268 bne(rs, r2, offset);
1172 break; 1269 break;
1173 // Signed comparison 1270 // Signed comparison.
1174 case greater: 1271 case greater:
1175 if (r2.is(zero_reg)) { 1272 if (r2.is(zero_reg)) {
1176 offset = shifted_branch_offset(L, false); 1273 offset = shifted_branch_offset(L, false);
1177 bgtz(rs, offset); 1274 bgtz(rs, offset);
1178 } else { 1275 } else {
1179 slt(scratch, r2, rs); 1276 slt(scratch, r2, rs);
1180 offset = shifted_branch_offset(L, false); 1277 offset = shifted_branch_offset(L, false);
1181 bne(scratch, zero_reg, offset); 1278 bne(scratch, zero_reg, offset);
1182 } 1279 }
1183 break; 1280 break;
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1269 li(r2, rt); 1366 li(r2, rt);
1270 offset = shifted_branch_offset(L, false); 1367 offset = shifted_branch_offset(L, false);
1271 beq(rs, r2, offset); 1368 beq(rs, r2, offset);
1272 break; 1369 break;
1273 case ne: 1370 case ne:
1274 r2 = scratch; 1371 r2 = scratch;
1275 li(r2, rt); 1372 li(r2, rt);
1276 offset = shifted_branch_offset(L, false); 1373 offset = shifted_branch_offset(L, false);
1277 bne(rs, r2, offset); 1374 bne(rs, r2, offset);
1278 break; 1375 break;
1279 // Signed comparison 1376 // Signed comparison.
1280 case greater: 1377 case greater:
1281 if (rt.imm32_ == 0) { 1378 if (rt.imm32_ == 0) {
1282 offset = shifted_branch_offset(L, false); 1379 offset = shifted_branch_offset(L, false);
1283 bgtz(rs, offset); 1380 bgtz(rs, offset);
1284 } else { 1381 } else {
1285 r2 = scratch; 1382 r2 = scratch;
1286 li(r2, rt); 1383 li(r2, rt);
1287 slt(scratch, r2, rs); 1384 slt(scratch, r2, rs);
1288 offset = shifted_branch_offset(L, false); 1385 offset = shifted_branch_offset(L, false);
1289 bne(scratch, zero_reg, offset); 1386 bne(scratch, zero_reg, offset);
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
1437 bne(rs, r2, 2); 1534 bne(rs, r2, 2);
1438 nop(); 1535 nop();
1439 bal(offset); 1536 bal(offset);
1440 break; 1537 break;
1441 case ne: 1538 case ne:
1442 beq(rs, r2, 2); 1539 beq(rs, r2, 2);
1443 nop(); 1540 nop();
1444 bal(offset); 1541 bal(offset);
1445 break; 1542 break;
1446 1543
1447 // Signed comparison 1544 // Signed comparison.
1448 case greater: 1545 case greater:
1449 slt(scratch, r2, rs); 1546 slt(scratch, r2, rs);
1450 addiu(scratch, scratch, -1); 1547 addiu(scratch, scratch, -1);
1451 bgezal(scratch, offset); 1548 bgezal(scratch, offset);
1452 break; 1549 break;
1453 case greater_equal: 1550 case greater_equal:
1454 slt(scratch, rs, r2); 1551 slt(scratch, rs, r2);
1455 addiu(scratch, scratch, -1); 1552 addiu(scratch, scratch, -1);
1456 bltzal(scratch, offset); 1553 bltzal(scratch, offset);
1457 break; 1554 break;
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
1532 offset = shifted_branch_offset(L, false); 1629 offset = shifted_branch_offset(L, false);
1533 bal(offset); 1630 bal(offset);
1534 break; 1631 break;
1535 case ne: 1632 case ne:
1536 beq(rs, r2, 2); 1633 beq(rs, r2, 2);
1537 nop(); 1634 nop();
1538 offset = shifted_branch_offset(L, false); 1635 offset = shifted_branch_offset(L, false);
1539 bal(offset); 1636 bal(offset);
1540 break; 1637 break;
1541 1638
1542 // Signed comparison 1639 // Signed comparison.
1543 case greater: 1640 case greater:
1544 slt(scratch, r2, rs); 1641 slt(scratch, r2, rs);
1545 addiu(scratch, scratch, -1); 1642 addiu(scratch, scratch, -1);
1546 offset = shifted_branch_offset(L, false); 1643 offset = shifted_branch_offset(L, false);
1547 bgezal(scratch, offset); 1644 bgezal(scratch, offset);
1548 break; 1645 break;
1549 case greater_equal: 1646 case greater_equal:
1550 slt(scratch, rs, r2); 1647 slt(scratch, rs, r2);
1551 addiu(scratch, scratch, -1); 1648 addiu(scratch, scratch, -1);
1552 offset = shifted_branch_offset(L, false); 1649 offset = shifted_branch_offset(L, false);
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
1635 jr(target.rm()); 1732 jr(target.rm());
1636 } 1733 }
1637 } else { // Not register target. 1734 } else { // Not register target.
1638 if (!MustUseReg(target.rmode_)) { 1735 if (!MustUseReg(target.rmode_)) {
1639 if (cond == cc_always) { 1736 if (cond == cc_always) {
1640 j(target.imm32_); 1737 j(target.imm32_);
1641 } else { 1738 } else {
1642 Branch(2, NegateCondition(cond), rs, rt); 1739 Branch(2, NegateCondition(cond), rs, rt);
1643 j(target.imm32_); // Will generate only one instruction. 1740 j(target.imm32_); // Will generate only one instruction.
1644 } 1741 }
1645 } else { // MustUseReg(target) 1742 } else { // MustUseReg(target).
1646 li(t9, target); 1743 li(t9, target);
1647 if (cond == cc_always) { 1744 if (cond == cc_always) {
1648 jr(t9); 1745 jr(t9);
1649 } else { 1746 } else {
1650 Branch(2, NegateCondition(cond), rs, rt); 1747 Branch(2, NegateCondition(cond), rs, rt);
1651 jr(t9); // Will generate only one instruction. 1748 jr(t9); // Will generate only one instruction.
1652 } 1749 }
1653 } 1750 }
1654 } 1751 }
1655 // Emit a nop in the branch delay slot if required. 1752 // Emit a nop in the branch delay slot if required.
1656 if (bdslot == PROTECT) 1753 if (bdslot == PROTECT)
1657 nop(); 1754 nop();
1658 } 1755 }
1659 1756
1660 1757
1661 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) { 1758 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
1662 UNIMPLEMENTED_MIPS(); 1759 return 4 * kInstrSize;
1663 return 0;
1664 } 1760 }
1665 1761
1666 1762
1667 int MacroAssembler::CallSize(Register reg) { 1763 int MacroAssembler::CallSize(Register reg) {
1668 UNIMPLEMENTED_MIPS(); 1764 return 2 * kInstrSize;
1669 return 0;
1670 } 1765 }
1671 1766
1672 1767
1673 // Note: To call gcc-compiled C code on mips, you must call thru t9. 1768 // Note: To call gcc-compiled C code on mips, you must call thru t9.
1674 void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) { 1769 void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
1675 BlockTrampolinePoolScope block_trampoline_pool(this); 1770 BlockTrampolinePoolScope block_trampoline_pool(this);
1676 if (target.is_reg()) { 1771 if (target.is_reg()) {
1677 jalr(target.rm()); 1772 jalr(target.rm());
1678 } else { // !target.is_reg() 1773 } else { // !target.is_reg().
1679 if (!MustUseReg(target.rmode_)) { 1774 if (!MustUseReg(target.rmode_)) {
1680 jal(target.imm32_); 1775 jal(target.imm32_);
1681 } else { // MustUseReg(target) 1776 } else { // MustUseReg(target).
1777 // Must record previous source positions before the
1778 // li() generates a new code target.
1779 positions_recorder()->WriteRecordedPositions();
1682 li(t9, target); 1780 li(t9, target);
1683 jalr(t9); 1781 jalr(t9);
1684 } 1782 }
1685 } 1783 }
1686 // Emit a nop in the branch delay slot if required. 1784 // Emit a nop in the branch delay slot if required.
1687 if (bdslot == PROTECT) 1785 if (bdslot == PROTECT)
1688 nop(); 1786 nop();
1689 } 1787 }
1690 1788
1691 1789
1692 // Note: To call gcc-compiled C code on mips, you must call thru t9. 1790 // Note: To call gcc-compiled C code on mips, you must call thru t9.
1693 void MacroAssembler::Call(const Operand& target, 1791 void MacroAssembler::Call(const Operand& target,
1694 Condition cond, Register rs, const Operand& rt, 1792 Condition cond, Register rs, const Operand& rt,
1695 BranchDelaySlot bdslot) { 1793 BranchDelaySlot bdslot) {
1696 BlockTrampolinePoolScope block_trampoline_pool(this); 1794 BlockTrampolinePoolScope block_trampoline_pool(this);
1697 BRANCH_ARGS_CHECK(cond, rs, rt); 1795 BRANCH_ARGS_CHECK(cond, rs, rt);
1698 if (target.is_reg()) { 1796 if (target.is_reg()) {
1699 if (cond == cc_always) { 1797 if (cond == cc_always) {
1700 jalr(target.rm()); 1798 jalr(target.rm());
1701 } else { 1799 } else {
1702 Branch(2, NegateCondition(cond), rs, rt); 1800 Branch(2, NegateCondition(cond), rs, rt);
1703 jalr(target.rm()); 1801 jalr(target.rm());
1704 } 1802 }
1705 } else { // !target.is_reg() 1803 } else { // !target.is_reg().
1706 if (!MustUseReg(target.rmode_)) { 1804 if (!MustUseReg(target.rmode_)) {
1707 if (cond == cc_always) { 1805 if (cond == cc_always) {
1708 jal(target.imm32_); 1806 jal(target.imm32_);
1709 } else { 1807 } else {
1710 Branch(2, NegateCondition(cond), rs, rt); 1808 Branch(2, NegateCondition(cond), rs, rt);
1711 jal(target.imm32_); // Will generate only one instruction. 1809 jal(target.imm32_); // Will generate only one instruction.
1712 } 1810 }
1713 } else { // MustUseReg(target) 1811 } else { // MustUseReg(target)
1714 li(t9, target); 1812 li(t9, target);
1715 if (cond == cc_always) { 1813 if (cond == cc_always) {
1716 jalr(t9); 1814 jalr(t9);
1717 } else { 1815 } else {
1718 Branch(2, NegateCondition(cond), rs, rt); 1816 Branch(2, NegateCondition(cond), rs, rt);
1719 jalr(t9); // Will generate only one instruction. 1817 jalr(t9); // Will generate only one instruction.
1720 } 1818 }
1721 } 1819 }
1722 } 1820 }
1723 // Emit a nop in the branch delay slot if required. 1821 // Emit a nop in the branch delay slot if required.
1724 if (bdslot == PROTECT) 1822 if (bdslot == PROTECT)
1725 nop(); 1823 nop();
1726 } 1824 }
1727 1825
1728 1826
1827 void MacroAssembler::CallWithAstId(Handle<Code> code,
1828 RelocInfo::Mode rmode,
1829 unsigned ast_id,
1830 Condition cond,
1831 Register r1,
1832 const Operand& r2) {
1833 ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
1834 ASSERT(ast_id != kNoASTId);
1835 ASSERT(ast_id_for_reloc_info_ == kNoASTId);
1836 ast_id_for_reloc_info_ = ast_id;
1837 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
1838 }
1839
1840
1729 void MacroAssembler::Drop(int count, 1841 void MacroAssembler::Drop(int count,
1730 Condition cond, 1842 Condition cond,
1731 Register reg, 1843 Register reg,
1732 const Operand& op) { 1844 const Operand& op) {
1733 if (count <= 0) { 1845 if (count <= 0) {
1734 return; 1846 return;
1735 } 1847 }
1736 1848
1737 Label skip; 1849 Label skip;
1738 1850
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
1805 mov(a0, zero_reg); 1917 mov(a0, zero_reg);
1806 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); 1918 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1807 CEntryStub ces(1); 1919 CEntryStub ces(1);
1808 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 1920 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1809 } 1921 }
1810 1922
1811 #endif // ENABLE_DEBUGGER_SUPPORT 1923 #endif // ENABLE_DEBUGGER_SUPPORT
1812 1924
1813 1925
1814 // --------------------------------------------------------------------------- 1926 // ---------------------------------------------------------------------------
1815 // Exception handling 1927 // Exception handling.
1816 1928
1817 void MacroAssembler::PushTryHandler(CodeLocation try_location, 1929 void MacroAssembler::PushTryHandler(CodeLocation try_location,
1818 HandlerType type) { 1930 HandlerType type) {
1819 // Adjust this code if not the case. 1931 // Adjust this code if not the case.
1820 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); 1932 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1821 // The return address is passed in register ra. 1933 // The return address is passed in register ra.
1822 if (try_location == IN_JAVASCRIPT) { 1934 if (try_location == IN_JAVASCRIPT) {
1823 if (type == TRY_CATCH_HANDLER) { 1935 if (type == TRY_CATCH_HANDLER) {
1824 li(t0, Operand(StackHandler::TRY_CATCH)); 1936 li(t0, Operand(StackHandler::TRY_CATCH));
1825 } else { 1937 } else {
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
1880 } 1992 }
1881 1993
1882 1994
1883 void MacroAssembler::AllocateInNewSpace(int object_size, 1995 void MacroAssembler::AllocateInNewSpace(int object_size,
1884 Register result, 1996 Register result,
1885 Register scratch1, 1997 Register scratch1,
1886 Register scratch2, 1998 Register scratch2,
1887 Label* gc_required, 1999 Label* gc_required,
1888 AllocationFlags flags) { 2000 AllocationFlags flags) {
1889 if (!FLAG_inline_new) { 2001 if (!FLAG_inline_new) {
1890 if (FLAG_debug_code) { 2002 if (emit_debug_code()) {
1891 // Trash the registers to simulate an allocation failure. 2003 // Trash the registers to simulate an allocation failure.
1892 li(result, 0x7091); 2004 li(result, 0x7091);
1893 li(scratch1, 0x7191); 2005 li(scratch1, 0x7191);
1894 li(scratch2, 0x7291); 2006 li(scratch2, 0x7291);
1895 } 2007 }
1896 jmp(gc_required); 2008 jmp(gc_required);
1897 return; 2009 return;
1898 } 2010 }
1899 2011
1900 ASSERT(!result.is(scratch1)); 2012 ASSERT(!result.is(scratch1));
(...skipping 27 matching lines...) Expand all
1928 Register obj_size_reg = scratch2; 2040 Register obj_size_reg = scratch2;
1929 li(topaddr, Operand(new_space_allocation_top)); 2041 li(topaddr, Operand(new_space_allocation_top));
1930 li(obj_size_reg, Operand(object_size)); 2042 li(obj_size_reg, Operand(object_size));
1931 2043
1932 // This code stores a temporary value in t9. 2044 // This code stores a temporary value in t9.
1933 if ((flags & RESULT_CONTAINS_TOP) == 0) { 2045 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1934 // Load allocation top into result and allocation limit into t9. 2046 // Load allocation top into result and allocation limit into t9.
1935 lw(result, MemOperand(topaddr)); 2047 lw(result, MemOperand(topaddr));
1936 lw(t9, MemOperand(topaddr, kPointerSize)); 2048 lw(t9, MemOperand(topaddr, kPointerSize));
1937 } else { 2049 } else {
1938 if (FLAG_debug_code) { 2050 if (emit_debug_code()) {
1939 // Assert that result actually contains top on entry. t9 is used 2051 // Assert that result actually contains top on entry. t9 is used
1940 // immediately below so this use of t9 does not cause difference with 2052 // immediately below so this use of t9 does not cause difference with
1941 // respect to register content between debug and release mode. 2053 // respect to register content between debug and release mode.
1942 lw(t9, MemOperand(topaddr)); 2054 lw(t9, MemOperand(topaddr));
1943 Check(eq, "Unexpected allocation top", result, Operand(t9)); 2055 Check(eq, "Unexpected allocation top", result, Operand(t9));
1944 } 2056 }
1945 // Load allocation limit into t9. Result already contains allocation top. 2057 // Load allocation limit into t9. Result already contains allocation top.
1946 lw(t9, MemOperand(topaddr, limit - top)); 2058 lw(t9, MemOperand(topaddr, limit - top));
1947 } 2059 }
1948 2060
(...skipping 10 matching lines...) Expand all
1959 } 2071 }
1960 2072
1961 2073
1962 void MacroAssembler::AllocateInNewSpace(Register object_size, 2074 void MacroAssembler::AllocateInNewSpace(Register object_size,
1963 Register result, 2075 Register result,
1964 Register scratch1, 2076 Register scratch1,
1965 Register scratch2, 2077 Register scratch2,
1966 Label* gc_required, 2078 Label* gc_required,
1967 AllocationFlags flags) { 2079 AllocationFlags flags) {
1968 if (!FLAG_inline_new) { 2080 if (!FLAG_inline_new) {
1969 if (FLAG_debug_code) { 2081 if (emit_debug_code()) {
1970 // Trash the registers to simulate an allocation failure. 2082 // Trash the registers to simulate an allocation failure.
1971 li(result, 0x7091); 2083 li(result, 0x7091);
1972 li(scratch1, 0x7191); 2084 li(scratch1, 0x7191);
1973 li(scratch2, 0x7291); 2085 li(scratch2, 0x7291);
1974 } 2086 }
1975 jmp(gc_required); 2087 jmp(gc_required);
1976 return; 2088 return;
1977 } 2089 }
1978 2090
1979 ASSERT(!result.is(scratch1)); 2091 ASSERT(!result.is(scratch1));
(...skipping 17 matching lines...) Expand all
1997 // Set up allocation top address and object size registers. 2109 // Set up allocation top address and object size registers.
1998 Register topaddr = scratch1; 2110 Register topaddr = scratch1;
1999 li(topaddr, Operand(new_space_allocation_top)); 2111 li(topaddr, Operand(new_space_allocation_top));
2000 2112
2001 // This code stores a temporary value in t9. 2113 // This code stores a temporary value in t9.
2002 if ((flags & RESULT_CONTAINS_TOP) == 0) { 2114 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2003 // Load allocation top into result and allocation limit into t9. 2115 // Load allocation top into result and allocation limit into t9.
2004 lw(result, MemOperand(topaddr)); 2116 lw(result, MemOperand(topaddr));
2005 lw(t9, MemOperand(topaddr, kPointerSize)); 2117 lw(t9, MemOperand(topaddr, kPointerSize));
2006 } else { 2118 } else {
2007 if (FLAG_debug_code) { 2119 if (emit_debug_code()) {
2008 // Assert that result actually contains top on entry. t9 is used 2120 // Assert that result actually contains top on entry. t9 is used
2009 // immediately below so this use of t9 does not cause difference with 2121 // immediately below so this use of t9 does not cause difference with
2010 // respect to register content between debug and release mode. 2122 // respect to register content between debug and release mode.
2011 lw(t9, MemOperand(topaddr)); 2123 lw(t9, MemOperand(topaddr));
2012 Check(eq, "Unexpected allocation top", result, Operand(t9)); 2124 Check(eq, "Unexpected allocation top", result, Operand(t9));
2013 } 2125 }
2014 // Load allocation limit into t9. Result already contains allocation top. 2126 // Load allocation limit into t9. Result already contains allocation top.
2015 lw(t9, MemOperand(topaddr, limit - top)); 2127 lw(t9, MemOperand(topaddr, limit - top));
2016 } 2128 }
2017 2129
2018 // Calculate new top and bail out if new space is exhausted. Use result 2130 // Calculate new top and bail out if new space is exhausted. Use result
2019 // to calculate the new top. Object size may be in words so a shift is 2131 // to calculate the new top. Object size may be in words so a shift is
2020 // required to get the number of bytes. 2132 // required to get the number of bytes.
2021 if ((flags & SIZE_IN_WORDS) != 0) { 2133 if ((flags & SIZE_IN_WORDS) != 0) {
2022 sll(scratch2, object_size, kPointerSizeLog2); 2134 sll(scratch2, object_size, kPointerSizeLog2);
2023 Addu(scratch2, result, scratch2); 2135 Addu(scratch2, result, scratch2);
2024 } else { 2136 } else {
2025 Addu(scratch2, result, Operand(object_size)); 2137 Addu(scratch2, result, Operand(object_size));
2026 } 2138 }
2027 Branch(gc_required, Ugreater, scratch2, Operand(t9)); 2139 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2028 2140
2029 // Update allocation top. result temporarily holds the new top. 2141 // Update allocation top. result temporarily holds the new top.
2030 if (FLAG_debug_code) { 2142 if (emit_debug_code()) {
2031 And(t9, scratch2, Operand(kObjectAlignmentMask)); 2143 And(t9, scratch2, Operand(kObjectAlignmentMask));
2032 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg)); 2144 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
2033 } 2145 }
2034 sw(scratch2, MemOperand(topaddr)); 2146 sw(scratch2, MemOperand(topaddr));
2035 2147
2036 // Tag object if requested. 2148 // Tag object if requested.
2037 if ((flags & TAG_OBJECT) != 0) { 2149 if ((flags & TAG_OBJECT) != 0) {
2038 Addu(result, result, Operand(kHeapObjectTag)); 2150 Addu(result, result, Operand(kHeapObjectTag));
2039 } 2151 }
2040 } 2152 }
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
2211 } 2323 }
2212 ASSERT(!tmp.is(no_reg)); 2324 ASSERT(!tmp.is(no_reg));
2213 2325
2214 for (int i = 0; i < field_count; i++) { 2326 for (int i = 0; i < field_count; i++) {
2215 lw(tmp, FieldMemOperand(src, i * kPointerSize)); 2327 lw(tmp, FieldMemOperand(src, i * kPointerSize));
2216 sw(tmp, FieldMemOperand(dst, i * kPointerSize)); 2328 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
2217 } 2329 }
2218 } 2330 }
2219 2331
2220 2332
2333 void MacroAssembler::CopyBytes(Register src,
2334 Register dst,
2335 Register length,
2336 Register scratch) {
2337 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
2338
2339 // Align src before copying in word size chunks.
2340 bind(&align_loop);
2341 Branch(&done, eq, length, Operand(zero_reg));
2342 bind(&align_loop_1);
2343 And(scratch, src, kPointerSize - 1);
2344 Branch(&word_loop, eq, scratch, Operand(zero_reg));
2345 lbu(scratch, MemOperand(src));
2346 Addu(src, src, 1);
2347 sb(scratch, MemOperand(dst));
2348 Addu(dst, dst, 1);
2349 Subu(length, length, Operand(1));
2350 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2351
2352 // Copy bytes in word size chunks.
2353 bind(&word_loop);
2354 if (FLAG_debug_code) {
2355 And(scratch, src, kPointerSize - 1);
2356 Assert(eq, "Expecting alignment for CopyBytes",
2357 scratch, Operand(zero_reg));
2358 }
2359 Branch(&byte_loop, lt, length, Operand(kPointerSize));
2360 lw(scratch, MemOperand(src));
2361 Addu(src, src, kPointerSize);
2362
2363 // TODO(kalmard) check if this can be optimized to use sw in most cases.
2364 // Can't use unaligned access - copy byte by byte.
2365 sb(scratch, MemOperand(dst, 0));
2366 srl(scratch, scratch, 8);
2367 sb(scratch, MemOperand(dst, 1));
2368 srl(scratch, scratch, 8);
2369 sb(scratch, MemOperand(dst, 2));
2370 srl(scratch, scratch, 8);
2371 sb(scratch, MemOperand(dst, 3));
2372 Addu(dst, dst, 4);
2373
2374 Subu(length, length, Operand(kPointerSize));
2375 Branch(&word_loop);
2376
2377 // Copy the last bytes if any left.
2378 bind(&byte_loop);
2379 Branch(&done, eq, length, Operand(zero_reg));
2380 bind(&byte_loop_1);
2381 lbu(scratch, MemOperand(src));
2382 Addu(src, src, 1);
2383 sb(scratch, MemOperand(dst));
2384 Addu(dst, dst, 1);
2385 Subu(length, length, Operand(1));
2386 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2387 bind(&done);
2388 }
2389
2390
2221 void MacroAssembler::CheckMap(Register obj, 2391 void MacroAssembler::CheckMap(Register obj,
2222 Register scratch, 2392 Register scratch,
2223 Handle<Map> map, 2393 Handle<Map> map,
2224 Label* fail, 2394 Label* fail,
2225 bool is_heap_object) { 2395 bool is_heap_object) {
2226 if (!is_heap_object) { 2396 if (!is_heap_object) {
2227 JumpIfSmi(obj, fail); 2397 JumpIfSmi(obj, fail);
2228 } 2398 }
2229 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 2399 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2230 li(at, Operand(map)); 2400 li(at, Operand(map));
2231 Branch(fail, ne, scratch, Operand(at)); 2401 Branch(fail, ne, scratch, Operand(at));
2232 } 2402 }
2233 2403
2234 2404
2235 void MacroAssembler::CheckMap(Register obj, 2405 void MacroAssembler::CheckMap(Register obj,
2236 Register scratch, 2406 Register scratch,
2237 Heap::RootListIndex index, 2407 Heap::RootListIndex index,
2238 Label* fail, 2408 Label* fail,
2239 bool is_heap_object) { 2409 bool is_heap_object) {
2240 if (!is_heap_object) { 2410 if (!is_heap_object) {
2241 JumpIfSmi(obj, fail); 2411 JumpIfSmi(obj, fail);
2242 } 2412 }
2243 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 2413 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2244 LoadRoot(at, index); 2414 LoadRoot(at, index);
2245 Branch(fail, ne, scratch, Operand(at)); 2415 Branch(fail, ne, scratch, Operand(at));
2246 } 2416 }
2247 2417
2248 2418
2419 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
2420 if (IsMipsSoftFloatABI) {
2421 mtc1(v0, dst);
2422 mtc1(v1, FPURegister::from_code(dst.code() + 1));
2423 } else {
2424 if (!dst.is(f0)) {
2425 mov_d(dst, f0); // Reg f0 is o32 ABI FP return value.
2426 }
2427 }
2428 }
2429
2430
2249 // ----------------------------------------------------------------------------- 2431 // -----------------------------------------------------------------------------
2250 // JavaScript invokes 2432 // JavaScript invokes.
2251 2433
2252 void MacroAssembler::InvokePrologue(const ParameterCount& expected, 2434 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2253 const ParameterCount& actual, 2435 const ParameterCount& actual,
2254 Handle<Code> code_constant, 2436 Handle<Code> code_constant,
2255 Register code_reg, 2437 Register code_reg,
2256 Label* done, 2438 Label* done,
2257 InvokeFlag flag, 2439 InvokeFlag flag,
2258 const CallWrapper& call_wrapper) { 2440 const CallWrapper& call_wrapper) {
2259 bool definitely_matches = false; 2441 bool definitely_matches = false;
2260 Label regular_invoke; 2442 Label regular_invoke;
(...skipping 22 matching lines...) Expand all
2283 if (expected.immediate() == sentinel) { 2465 if (expected.immediate() == sentinel) {
2284 // Don't worry about adapting arguments for builtins that 2466 // Don't worry about adapting arguments for builtins that
2285 // don't want that done. Skip adaption code by making it look 2467 // don't want that done. Skip adaption code by making it look
2286 // like we have a match between expected and actual number of 2468 // like we have a match between expected and actual number of
2287 // arguments. 2469 // arguments.
2288 definitely_matches = true; 2470 definitely_matches = true;
2289 } else { 2471 } else {
2290 li(a2, Operand(expected.immediate())); 2472 li(a2, Operand(expected.immediate()));
2291 } 2473 }
2292 } 2474 }
2475 } else if (actual.is_immediate()) {
2476 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
2477 li(a0, Operand(actual.immediate()));
2293 } else { 2478 } else {
2294 if (actual.is_immediate()) { 2479 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
2295 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
2296 li(a0, Operand(actual.immediate()));
2297 } else {
2298 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
2299 }
2300 } 2480 }
2301 2481
2302 if (!definitely_matches) { 2482 if (!definitely_matches) {
2303 if (!code_constant.is_null()) { 2483 if (!code_constant.is_null()) {
2304 li(a3, Operand(code_constant)); 2484 li(a3, Operand(code_constant));
2305 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); 2485 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
2306 } 2486 }
2307 2487
2308 Handle<Code> adaptor = 2488 Handle<Code> adaptor =
2309 isolate()->builtins()->ArgumentsAdaptorTrampoline(); 2489 isolate()->builtins()->ArgumentsAdaptorTrampoline();
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
2484 2664
2485 void MacroAssembler::GetObjectType(Register object, 2665 void MacroAssembler::GetObjectType(Register object,
2486 Register map, 2666 Register map,
2487 Register type_reg) { 2667 Register type_reg) {
2488 lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2668 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2489 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); 2669 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2490 } 2670 }
2491 2671
2492 2672
2493 // ----------------------------------------------------------------------------- 2673 // -----------------------------------------------------------------------------
2494 // Runtime calls 2674 // Runtime calls.
2495 2675
2496 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, 2676 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
2497 Register r1, const Operand& r2) { 2677 Register r1, const Operand& r2) {
2498 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. 2678 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
2499 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); 2679 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
2500 } 2680 }
2501 2681
2502 2682
2503 void MacroAssembler::TailCallStub(CodeStub* stub) { 2683 void MacroAssembler::TailCallStub(CodeStub* stub) {
2504 ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs 2684 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
2505 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); 2685 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
2506 } 2686 }
2507 2687
2508 2688
2509 void MacroAssembler::IllegalOperation(int num_arguments) { 2689 void MacroAssembler::IllegalOperation(int num_arguments) {
2510 if (num_arguments > 0) { 2690 if (num_arguments > 0) {
2511 addiu(sp, sp, num_arguments * kPointerSize); 2691 addiu(sp, sp, num_arguments * kPointerSize);
2512 } 2692 }
2513 LoadRoot(v0, Heap::kUndefinedValueRootIndex); 2693 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
2514 } 2694 }
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
2560 li(mask_reg, HeapNumber::kExponentMask); 2740 li(mask_reg, HeapNumber::kExponentMask);
2561 2741
2562 And(exponent, exponent, mask_reg); 2742 And(exponent, exponent, mask_reg);
2563 Branch(not_number, eq, exponent, Operand(mask_reg)); 2743 Branch(not_number, eq, exponent, Operand(mask_reg));
2564 } 2744 }
2565 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); 2745 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
2566 bind(&done); 2746 bind(&done);
2567 } 2747 }
2568 2748
2569 2749
2570
2571 void MacroAssembler::SmiToDoubleFPURegister(Register smi, 2750 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
2572 FPURegister value, 2751 FPURegister value,
2573 Register scratch1) { 2752 Register scratch1) {
2574 sra(scratch1, smi, kSmiTagSize); 2753 sra(scratch1, smi, kSmiTagSize);
2575 mtc1(scratch1, value); 2754 mtc1(scratch1, value);
2576 cvt_d_w(value, value); 2755 cvt_d_w(value, value);
2577 } 2756 }
2578 2757
2579 2758
2759 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
2760 Register left,
2761 Register right,
2762 Register overflow_dst,
2763 Register scratch) {
2764 ASSERT(!dst.is(overflow_dst));
2765 ASSERT(!dst.is(scratch));
2766 ASSERT(!overflow_dst.is(scratch));
2767 ASSERT(!overflow_dst.is(left));
2768 ASSERT(!overflow_dst.is(right));
2769 ASSERT(!left.is(right));
2770
2771 // TODO(kalmard) There must be a way to optimize dst == left and dst == right
2772 // cases.
2773
2774 if (dst.is(left)) {
2775 addu(overflow_dst, left, right);
2776 xor_(dst, overflow_dst, left);
2777 xor_(scratch, overflow_dst, right);
2778 and_(scratch, scratch, dst);
2779 mov(dst, overflow_dst);
2780 mov(overflow_dst, scratch);
2781 } else if (dst.is(right)) {
2782 addu(overflow_dst, left, right);
2783 xor_(dst, overflow_dst, right);
2784 xor_(scratch, overflow_dst, left);
2785 and_(scratch, scratch, dst);
2786 mov(dst, overflow_dst);
2787 mov(overflow_dst, scratch);
2788 } else {
2789 addu(dst, left, right);
2790 xor_(overflow_dst, dst, left);
2791 xor_(scratch, dst, right);
2792 and_(overflow_dst, scratch, overflow_dst);
2793 }
2794 }
2795
2796
2797 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
2798 Register left,
2799 Register right,
2800 Register overflow_dst,
2801 Register scratch) {
2802 ASSERT(!dst.is(overflow_dst));
2803 ASSERT(!dst.is(scratch));
2804 ASSERT(!overflow_dst.is(scratch));
2805 ASSERT(!overflow_dst.is(left));
2806 ASSERT(!overflow_dst.is(right));
2807 ASSERT(!left.is(right));
2808 ASSERT(!scratch.is(left));
2809 ASSERT(!scratch.is(right));
2810
2811 // TODO(kalmard) There must be a way to optimize dst == left and dst == right
2812 // cases.
2813
2814 if (dst.is(left)) {
2815 subu(overflow_dst, left, right);
2816 xor_(scratch, overflow_dst, left);
2817 xor_(dst, left, right);
2818 and_(scratch, scratch, dst);
2819 mov(dst, overflow_dst);
2820 mov(overflow_dst, scratch);
2821 } else if (dst.is(right)) {
2822 subu(overflow_dst, left, right);
2823 xor_(dst, left, right);
2824 xor_(scratch, overflow_dst, left);
2825 and_(scratch, scratch, dst);
2826 mov(dst, overflow_dst);
2827 mov(overflow_dst, scratch);
2828 } else {
2829 subu(dst, left, right);
2830 xor_(overflow_dst, dst, left);
2831 xor_(scratch, left, right);
2832 and_(overflow_dst, scratch, overflow_dst);
2833 }
2834 }
2835
2836
2580 void MacroAssembler::CallRuntime(const Runtime::Function* f, 2837 void MacroAssembler::CallRuntime(const Runtime::Function* f,
2581 int num_arguments) { 2838 int num_arguments) {
2582 // All parameters are on the stack. v0 has the return value after call. 2839 // All parameters are on the stack. v0 has the return value after call.
2583 2840
2584 // If the expected number of arguments of the runtime function is 2841 // If the expected number of arguments of the runtime function is
2585 // constant, we check that the actual number of arguments match the 2842 // constant, we check that the actual number of arguments match the
2586 // expectation. 2843 // expectation.
2587 if (f->nargs >= 0 && f->nargs != num_arguments) { 2844 if (f->nargs >= 0 && f->nargs != num_arguments) {
2588 IllegalOperation(num_arguments); 2845 IllegalOperation(num_arguments);
2589 return; 2846 return;
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
2715 if (FLAG_native_code_counters && counter->Enabled()) { 2972 if (FLAG_native_code_counters && counter->Enabled()) {
2716 li(scratch2, Operand(ExternalReference(counter))); 2973 li(scratch2, Operand(ExternalReference(counter)));
2717 lw(scratch1, MemOperand(scratch2)); 2974 lw(scratch1, MemOperand(scratch2));
2718 Subu(scratch1, scratch1, Operand(value)); 2975 Subu(scratch1, scratch1, Operand(value));
2719 sw(scratch1, MemOperand(scratch2)); 2976 sw(scratch1, MemOperand(scratch2));
2720 } 2977 }
2721 } 2978 }
2722 2979
2723 2980
2724 // ----------------------------------------------------------------------------- 2981 // -----------------------------------------------------------------------------
2725 // Debugging 2982 // Debugging.
2726 2983
2727 void MacroAssembler::Assert(Condition cc, const char* msg, 2984 void MacroAssembler::Assert(Condition cc, const char* msg,
2728 Register rs, Operand rt) { 2985 Register rs, Operand rt) {
2729 if (FLAG_debug_code) 2986 if (emit_debug_code())
2730 Check(cc, msg, rs, rt); 2987 Check(cc, msg, rs, rt);
2731 } 2988 }
2732 2989
2733 2990
2734 void MacroAssembler::AssertRegisterIsRoot(Register reg, 2991 void MacroAssembler::AssertRegisterIsRoot(Register reg,
2735 Heap::RootListIndex index) { 2992 Heap::RootListIndex index) {
2736 if (FLAG_debug_code) { 2993 if (emit_debug_code()) {
2737 LoadRoot(at, index); 2994 LoadRoot(at, index);
2738 Check(eq, "Register did not match expected root", reg, Operand(at)); 2995 Check(eq, "Register did not match expected root", reg, Operand(at));
2739 } 2996 }
2740 } 2997 }
2741 2998
2742 2999
2743 void MacroAssembler::AssertFastElements(Register elements) { 3000 void MacroAssembler::AssertFastElements(Register elements) {
2744 if (FLAG_debug_code) { 3001 if (emit_debug_code()) {
2745 ASSERT(!elements.is(at)); 3002 ASSERT(!elements.is(at));
2746 Label ok; 3003 Label ok;
2747 Push(elements); 3004 push(elements);
2748 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); 3005 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2749 LoadRoot(at, Heap::kFixedArrayMapRootIndex); 3006 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2750 Branch(&ok, eq, elements, Operand(at)); 3007 Branch(&ok, eq, elements, Operand(at));
2751 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); 3008 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
2752 Branch(&ok, eq, elements, Operand(at)); 3009 Branch(&ok, eq, elements, Operand(at));
2753 Abort("JSObject with fast elements map has slow elements"); 3010 Abort("JSObject with fast elements map has slow elements");
2754 bind(&ok); 3011 bind(&ok);
2755 Pop(elements); 3012 pop(elements);
2756 } 3013 }
2757 } 3014 }
2758 3015
2759 3016
2760 void MacroAssembler::Check(Condition cc, const char* msg, 3017 void MacroAssembler::Check(Condition cc, const char* msg,
2761 Register rs, Operand rt) { 3018 Register rs, Operand rt) {
2762 Label L; 3019 Label L;
2763 Branch(&L, cc, rs, rt); 3020 Branch(&L, cc, rs, rt);
2764 Abort(msg); 3021 Abort(msg);
2765 // will not return here 3022 // Will not return here.
2766 bind(&L); 3023 bind(&L);
2767 } 3024 }
2768 3025
2769 3026
2770 void MacroAssembler::Abort(const char* msg) { 3027 void MacroAssembler::Abort(const char* msg) {
2771 Label abort_start; 3028 Label abort_start;
2772 bind(&abort_start); 3029 bind(&abort_start);
2773 // We want to pass the msg string like a smi to avoid GC 3030 // We want to pass the msg string like a smi to avoid GC
2774 // problems, however msg is not guaranteed to be aligned 3031 // problems, however msg is not guaranteed to be aligned
2775 // properly. Instead, we pass an aligned pointer that is 3032 // properly. Instead, we pass an aligned pointer that is
2776 // a proper v8 smi, but also pass the alignment difference 3033 // a proper v8 smi, but also pass the alignment difference
2777 // from the real pointer as a smi. 3034 // from the real pointer as a smi.
2778 intptr_t p1 = reinterpret_cast<intptr_t>(msg); 3035 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2779 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; 3036 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2780 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); 3037 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2781 #ifdef DEBUG 3038 #ifdef DEBUG
2782 if (msg != NULL) { 3039 if (msg != NULL) {
2783 RecordComment("Abort message: "); 3040 RecordComment("Abort message: ");
2784 RecordComment(msg); 3041 RecordComment(msg);
2785 } 3042 }
2786 #endif 3043 #endif
2787 // Disable stub call restrictions to always allow calls to abort. 3044 // Disable stub call restrictions to always allow calls to abort.
2788 AllowStubCallsScope allow_scope(this, true); 3045 AllowStubCallsScope allow_scope(this, true);
2789 3046
2790 li(a0, Operand(p0)); 3047 li(a0, Operand(p0));
2791 Push(a0); 3048 push(a0);
2792 li(a0, Operand(Smi::FromInt(p1 - p0))); 3049 li(a0, Operand(Smi::FromInt(p1 - p0)));
2793 Push(a0); 3050 push(a0);
2794 CallRuntime(Runtime::kAbort, 2); 3051 CallRuntime(Runtime::kAbort, 2);
2795 // will not return here 3052 // Will not return here.
2796 if (is_trampoline_pool_blocked()) { 3053 if (is_trampoline_pool_blocked()) {
2797 // If the calling code cares about the exact number of 3054 // If the calling code cares about the exact number of
2798 // instructions generated, we insert padding here to keep the size 3055 // instructions generated, we insert padding here to keep the size
2799 // of the Abort macro constant. 3056 // of the Abort macro constant.
2800 // Currently in debug mode with debug_code enabled the number of 3057 // Currently in debug mode with debug_code enabled the number of
2801 // generated instructions is 14, so we use this as a maximum value. 3058 // generated instructions is 14, so we use this as a maximum value.
2802 static const int kExpectedAbortInstructions = 14; 3059 static const int kExpectedAbortInstructions = 14;
2803 int abort_instructions = InstructionsGeneratedSince(&abort_start); 3060 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2804 ASSERT(abort_instructions <= kExpectedAbortInstructions); 3061 ASSERT(abort_instructions <= kExpectedAbortInstructions);
2805 while (abort_instructions++ < kExpectedAbortInstructions) { 3062 while (abort_instructions++ < kExpectedAbortInstructions) {
2806 nop(); 3063 nop();
2807 } 3064 }
2808 } 3065 }
2809 } 3066 }
2810 3067
2811 3068
2812 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 3069 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2813 if (context_chain_length > 0) { 3070 if (context_chain_length > 0) {
2814 // Move up the chain of contexts to the context containing the slot. 3071 // Move up the chain of contexts to the context containing the slot.
2815 lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX))); 3072 lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
2816 // Load the function context (which is the incoming, outer context). 3073 // Load the function context (which is the incoming, outer context).
2817 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); 3074 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2818 for (int i = 1; i < context_chain_length; i++) { 3075 for (int i = 1; i < context_chain_length; i++) {
2819 lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); 3076 lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2820 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); 3077 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2821 } 3078 }
2822 // The context may be an intermediate context, not a function context. 3079 } else {
2823 lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); 3080 // Slot is in the current function context. Move it into the
2824 } else { // Slot is in the current function context. 3081 // destination register in case we store into it (the write barrier
2825 // The context may be an intermediate context, not a function context. 3082 // cannot be allowed to destroy the context in esi).
2826 lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX))); 3083 Move(dst, cp);
3084 }
3085
3086 // We should not have found a 'with' context by walking the context chain
3087 // (i.e., the static scope chain and runtime context chain do not agree).
3088 // A variable occurring in such a scope should have slot type LOOKUP and
3089 // not CONTEXT.
3090 if (emit_debug_code()) {
3091 lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
3092 Check(eq, "Yo dawg, I heard you liked function contexts "
3093 "so I put function contexts in all your contexts",
3094 dst, Operand(t9));
2827 } 3095 }
2828 } 3096 }
2829 3097
2830 3098
2831 void MacroAssembler::LoadGlobalFunction(int index, Register function) { 3099 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2832 // Load the global or builtins object from the current context. 3100 // Load the global or builtins object from the current context.
2833 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 3101 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2834 // Load the global context from the global or builtins object. 3102 // Load the global context from the global or builtins object.
2835 lw(function, FieldMemOperand(function, 3103 lw(function, FieldMemOperand(function,
2836 GlobalObject::kGlobalContextOffset)); 3104 GlobalObject::kGlobalContextOffset));
2837 // Load the function from the global context. 3105 // Load the function from the global context.
2838 lw(function, MemOperand(function, Context::SlotOffset(index))); 3106 lw(function, MemOperand(function, Context::SlotOffset(index)));
2839 } 3107 }
2840 3108
2841 3109
2842 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, 3110 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2843 Register map, 3111 Register map,
2844 Register scratch) { 3112 Register scratch) {
2845 // Load the initial map. The global functions all have initial maps. 3113 // Load the initial map. The global functions all have initial maps.
2846 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3114 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2847 if (FLAG_debug_code) { 3115 if (emit_debug_code()) {
2848 Label ok, fail; 3116 Label ok, fail;
2849 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false); 3117 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
2850 Branch(&ok); 3118 Branch(&ok);
2851 bind(&fail); 3119 bind(&fail);
2852 Abort("Global functions must have initial map"); 3120 Abort("Global functions must have initial map");
2853 bind(&ok); 3121 bind(&ok);
2854 } 3122 }
2855 } 3123 }
2856 3124
2857 3125
(...skipping 26 matching lines...) Expand all
2884 sll(t8, a0, kPointerSizeLog2); 3152 sll(t8, a0, kPointerSizeLog2);
2885 addu(hold_argv, sp, t8); 3153 addu(hold_argv, sp, t8);
2886 addiu(hold_argv, hold_argv, -kPointerSize); 3154 addiu(hold_argv, hold_argv, -kPointerSize);
2887 3155
2888 // Compute callee's stack pointer before making changes and save it as 3156 // Compute callee's stack pointer before making changes and save it as
2889 // t9 register so that it is restored as sp register on exit, thereby 3157 // t9 register so that it is restored as sp register on exit, thereby
2890 // popping the args. 3158 // popping the args.
2891 // t9 = sp + kPointerSize * #args 3159 // t9 = sp + kPointerSize * #args
2892 addu(t9, sp, t8); 3160 addu(t9, sp, t8);
2893 3161
2894 // Compute the argv pointer and keep it in a callee-saved register.
2895 // This only seems to be needed for crankshaft and may cause problems
2896 // so it's disabled for now.
2897 // Subu(s6, t9, Operand(kPointerSize));
2898
2899 // Align the stack at this point. 3162 // Align the stack at this point.
2900 AlignStack(0); 3163 AlignStack(0);
2901 3164
2902 // Save registers. 3165 // Save registers.
2903 addiu(sp, sp, -12); 3166 addiu(sp, sp, -12);
2904 sw(t9, MemOperand(sp, 8)); 3167 sw(t9, MemOperand(sp, 8));
2905 sw(ra, MemOperand(sp, 4)); 3168 sw(ra, MemOperand(sp, 4));
2906 sw(fp, MemOperand(sp, 0)); 3169 sw(fp, MemOperand(sp, 0));
2907 mov(fp, sp); // Setup new frame pointer. 3170 mov(fp, sp); // Setup new frame pointer.
2908 3171
2909 li(t8, Operand(CodeObject())); 3172 li(t8, Operand(CodeObject()));
2910 Push(t8); // Accessed from ExitFrame::code_slot. 3173 push(t8); // Accessed from ExitFrame::code_slot.
2911 3174
2912 // Save the frame pointer and the context in top. 3175 // Save the frame pointer and the context in top.
2913 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); 3176 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
2914 sw(fp, MemOperand(t8)); 3177 sw(fp, MemOperand(t8));
2915 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate()))); 3178 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
2916 sw(cp, MemOperand(t8)); 3179 sw(cp, MemOperand(t8));
2917 3180
2918 // Setup argc and the builtin function in callee-saved registers. 3181 // Setup argc and the builtin function in callee-saved registers.
2919 mov(hold_argc, a0); 3182 mov(hold_argc, a0);
2920 mov(hold_function, a1); 3183 mov(hold_function, a1);
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
3085 } 3348 }
3086 3349
3087 3350
3088 void MacroAssembler::AbortIfNotSmi(Register object) { 3351 void MacroAssembler::AbortIfNotSmi(Register object) {
3089 STATIC_ASSERT(kSmiTag == 0); 3352 STATIC_ASSERT(kSmiTag == 0);
3090 andi(at, object, kSmiTagMask); 3353 andi(at, object, kSmiTagMask);
3091 Assert(eq, "Operand is a smi", at, Operand(zero_reg)); 3354 Assert(eq, "Operand is a smi", at, Operand(zero_reg));
3092 } 3355 }
3093 3356
3094 3357
3358 void MacroAssembler::AbortIfNotString(Register object) {
3359 STATIC_ASSERT(kSmiTag == 0);
3360 And(t0, object, Operand(kSmiTagMask));
3361 Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
3362 push(object);
3363 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
3364 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
3365 Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
3366 pop(object);
3367 }
3368
3369
3095 void MacroAssembler::AbortIfNotRootValue(Register src, 3370 void MacroAssembler::AbortIfNotRootValue(Register src,
3096 Heap::RootListIndex root_value_index, 3371 Heap::RootListIndex root_value_index,
3097 const char* message) { 3372 const char* message) {
3098 ASSERT(!src.is(at)); 3373 ASSERT(!src.is(at));
3099 LoadRoot(at, root_value_index); 3374 LoadRoot(at, root_value_index);
3100 Assert(eq, message, src, Operand(at)); 3375 Assert(eq, message, src, Operand(at));
3101 } 3376 }
3102 3377
3103 3378
3104 void MacroAssembler::JumpIfNotHeapNumber(Register object, 3379 void MacroAssembler::JumpIfNotHeapNumber(Register object,
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
3176 And(scratch, type, Operand(kFlatAsciiStringMask)); 3451 And(scratch, type, Operand(kFlatAsciiStringMask));
3177 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); 3452 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
3178 } 3453 }
3179 3454
3180 3455
3181 static const int kRegisterPassedArguments = 4; 3456 static const int kRegisterPassedArguments = 4;
3182 3457
3183 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { 3458 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
3184 int frame_alignment = ActivationFrameAlignment(); 3459 int frame_alignment = ActivationFrameAlignment();
3185 3460
3186 // Reserve space for Isolate address which is always passed as last parameter
3187 num_arguments += 1;
3188
3189 // Up to four simple arguments are passed in registers a0..a3. 3461 // Up to four simple arguments are passed in registers a0..a3.
3190 // Those four arguments must have reserved argument slots on the stack for 3462 // Those four arguments must have reserved argument slots on the stack for
3191 // mips, even though those argument slots are not normally used. 3463 // mips, even though those argument slots are not normally used.
3192 // Remaining arguments are pushed on the stack, above (higher address than) 3464 // Remaining arguments are pushed on the stack, above (higher address than)
3193 // the argument slots. 3465 // the argument slots.
3194 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); 3466 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
3195 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? 3467 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
3196 0 : num_arguments - kRegisterPassedArguments) + 3468 0 : num_arguments - kRegisterPassedArguments) +
3197 (StandardFrameConstants::kCArgsSlotsSize / 3469 (StandardFrameConstants::kCArgsSlotsSize /
3198 kPointerSize); 3470 kPointerSize);
3199 if (frame_alignment > kPointerSize) { 3471 if (frame_alignment > kPointerSize) {
3200 // Make stack end at alignment and make room for num_arguments - 4 words 3472 // Make stack end at alignment and make room for num_arguments - 4 words
3201 // and the original value of sp. 3473 // and the original value of sp.
3202 mov(scratch, sp); 3474 mov(scratch, sp);
3203 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); 3475 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3204 ASSERT(IsPowerOf2(frame_alignment)); 3476 ASSERT(IsPowerOf2(frame_alignment));
3205 And(sp, sp, Operand(-frame_alignment)); 3477 And(sp, sp, Operand(-frame_alignment));
3206 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); 3478 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3207 } else { 3479 } else {
3208 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); 3480 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3209 } 3481 }
3210 } 3482 }
3211 3483
3212 3484
3213 void MacroAssembler::CallCFunction(ExternalReference function, 3485 void MacroAssembler::CallCFunction(ExternalReference function,
3214 int num_arguments) { 3486 int num_arguments) {
3215 CallCFunctionHelper(no_reg, function, at, num_arguments); 3487 CallCFunctionHelper(no_reg, function, t8, num_arguments);
3216 } 3488 }
3217 3489
3218 3490
3219 void MacroAssembler::CallCFunction(Register function, 3491 void MacroAssembler::CallCFunction(Register function,
3220 Register scratch, 3492 Register scratch,
3221 int num_arguments) { 3493 int num_arguments) {
3222 CallCFunctionHelper(function, 3494 CallCFunctionHelper(function,
3223 ExternalReference::the_hole_value_location(isolate()), 3495 ExternalReference::the_hole_value_location(isolate()),
3224 scratch, 3496 scratch,
3225 num_arguments); 3497 num_arguments);
3226 } 3498 }
3227 3499
3228 3500
3229 void MacroAssembler::CallCFunctionHelper(Register function, 3501 void MacroAssembler::CallCFunctionHelper(Register function,
3230 ExternalReference function_reference, 3502 ExternalReference function_reference,
3231 Register scratch, 3503 Register scratch,
3232 int num_arguments) { 3504 int num_arguments) {
3233 // Push Isolate address as the last argument.
3234 if (num_arguments < kRegisterPassedArguments) {
3235 Register arg_to_reg[] = {a0, a1, a2, a3};
3236 Register r = arg_to_reg[num_arguments];
3237 li(r, Operand(ExternalReference::isolate_address()));
3238 } else {
3239 int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
3240 (StandardFrameConstants::kCArgsSlotsSize /
3241 kPointerSize);
3242 // Push Isolate address on the stack after the arguments.
3243 li(scratch, Operand(ExternalReference::isolate_address()));
3244 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3245 }
3246 num_arguments += 1;
3247
3248 // Make sure that the stack is aligned before calling a C function unless 3505 // Make sure that the stack is aligned before calling a C function unless
3249 // running in the simulator. The simulator has its own alignment check which 3506 // running in the simulator. The simulator has its own alignment check which
3250 // provides more information. 3507 // provides more information.
3251 // The argument stots are presumed to have been set up by 3508 // The argument stots are presumed to have been set up by
3252 // PrepareCallCFunction. The C function must be called via t9, for mips ABI. 3509 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
3253 3510
3254 #if defined(V8_HOST_ARCH_MIPS) 3511 #if defined(V8_HOST_ARCH_MIPS)
3255 if (emit_debug_code()) { 3512 if (emit_debug_code()) {
3256 int frame_alignment = OS::ActivationFrameAlignment(); 3513 int frame_alignment = OS::ActivationFrameAlignment();
3257 int frame_alignment_mask = frame_alignment - 1; 3514 int frame_alignment_mask = frame_alignment - 1;
3258 if (frame_alignment > kPointerSize) { 3515 if (frame_alignment > kPointerSize) {
3259 ASSERT(IsPowerOf2(frame_alignment)); 3516 ASSERT(IsPowerOf2(frame_alignment));
3260 Label alignment_as_expected; 3517 Label alignment_as_expected;
3261 And(at, sp, Operand(frame_alignment_mask)); 3518 And(at, sp, Operand(frame_alignment_mask));
3262 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); 3519 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
3263 // Don't use Check here, as it will call Runtime_Abort possibly 3520 // Don't use Check here, as it will call Runtime_Abort possibly
3264 // re-entering here. 3521 // re-entering here.
3265 stop("Unexpected alignment in CallCFunction"); 3522 stop("Unexpected alignment in CallCFunction");
3266 bind(&alignment_as_expected); 3523 bind(&alignment_as_expected);
3267 } 3524 }
3268 } 3525 }
3269 #endif // V8_HOST_ARCH_MIPS 3526 #endif // V8_HOST_ARCH_MIPS
3270 3527
3271 // Just call directly. The function called cannot cause a GC, or 3528 // Just call directly. The function called cannot cause a GC, or
3272 // allow preemption, so the return address in the link register 3529 // allow preemption, so the return address in the link register
3273 // stays correct. 3530 // stays correct.
3274 if (!function.is(t9)) { 3531
3532 if (function.is(no_reg)) {
3533 function = t9;
3534 li(function, Operand(function_reference));
3535 } else if (!function.is(t9)) {
3275 mov(t9, function); 3536 mov(t9, function);
3276 function = t9; 3537 function = t9;
3277 } 3538 }
3278 3539
3279 if (function.is(no_reg)) {
3280 li(t9, Operand(function_reference));
3281 function = t9;
3282 }
3283
3284 Call(function); 3540 Call(function);
3285 3541
3286 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); 3542 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
3287 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? 3543 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
3288 0 : num_arguments - kRegisterPassedArguments) + 3544 0 : num_arguments - kRegisterPassedArguments) +
3289 (StandardFrameConstants::kCArgsSlotsSize / 3545 (StandardFrameConstants::kCArgsSlotsSize /
3290 kPointerSize); 3546 kPointerSize);
3291 3547
3292 if (OS::ActivationFrameAlignment() > kPointerSize) { 3548 if (OS::ActivationFrameAlignment() > kPointerSize) {
3293 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); 3549 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3294 } else { 3550 } else {
3295 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); 3551 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3296 } 3552 }
3297 } 3553 }
3298 3554
3299 3555
3300 #undef BRANCH_ARGS_CHECK 3556 #undef BRANCH_ARGS_CHECK
3301 3557
3302 3558
3303 #ifdef ENABLE_DEBUGGER_SUPPORT
3304 CodePatcher::CodePatcher(byte* address, int instructions) 3559 CodePatcher::CodePatcher(byte* address, int instructions)
3305 : address_(address), 3560 : address_(address),
3306 instructions_(instructions), 3561 instructions_(instructions),
3307 size_(instructions * Assembler::kInstrSize), 3562 size_(instructions * Assembler::kInstrSize),
3308 masm_(address, size_ + Assembler::kGap) { 3563 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
3309 // Create a new macro assembler pointing to the address of the code to patch. 3564 // Create a new macro assembler pointing to the address of the code to patch.
3310 // The size is adjusted with kGap on order for the assembler to generate size 3565 // The size is adjusted with kGap on order for the assembler to generate size
3311 // bytes of instructions without failing with buffer size constraints. 3566 // bytes of instructions without failing with buffer size constraints.
3312 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3567 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3313 } 3568 }
3314 3569
3315 3570
3316 CodePatcher::~CodePatcher() { 3571 CodePatcher::~CodePatcher() {
3317 // Indicate that code has changed. 3572 // Indicate that code has changed.
3318 CPU::FlushICache(address_, size_); 3573 CPU::FlushICache(address_, size_);
3319 3574
3320 // Check that the code was patched as expected. 3575 // Check that the code was patched as expected.
3321 ASSERT(masm_.pc_ == address_ + size_); 3576 ASSERT(masm_.pc_ == address_ + size_);
3322 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3577 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3323 } 3578 }
3324 3579
3325 3580
3326 void CodePatcher::Emit(Instr x) { 3581 void CodePatcher::Emit(Instr instr) {
3327 masm()->emit(x); 3582 masm()->emit(instr);
3328 } 3583 }
3329 3584
3330 3585
3331 void CodePatcher::Emit(Address addr) { 3586 void CodePatcher::Emit(Address addr) {
3332 masm()->emit(reinterpret_cast<Instr>(addr)); 3587 masm()->emit(reinterpret_cast<Instr>(addr));
3333 } 3588 }
3334 3589
3335 3590
3336 #endif // ENABLE_DEBUGGER_SUPPORT 3591 void CodePatcher::ChangeBranchCondition(Condition cond) {
3592 Instr instr = Assembler::instr_at(masm_.pc_);
3593 ASSERT(Assembler::IsBranch(instr));
3594 uint32_t opcode = Assembler::GetOpcodeField(instr);
3595 // Currently only the 'eq' and 'ne' cond values are supported and the simple
3596 // branch instructions (with opcode being the branch type).
3597 // There are some special cases (see Assembler::IsBranch()) so extending this
3598 // would be tricky.
3599 ASSERT(opcode == BEQ ||
3600 opcode == BNE ||
3601 opcode == BLEZ ||
3602 opcode == BGTZ ||
3603 opcode == BEQL ||
3604 opcode == BNEL ||
3605 opcode == BLEZL ||
3606 opcode == BGTZL);
3607 opcode = (cond == eq) ? BEQ : BNE;
3608 instr = (instr & ~kOpcodeMask) | opcode;
3609 masm_.emit(instr);
3610 }
3337 3611
3338 3612
3339 } } // namespace v8::internal 3613 } } // namespace v8::internal
3340 3614
3341 #endif // V8_TARGET_ARCH_MIPS 3615 #endif // V8_TARGET_ARCH_MIPS
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698