| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include <limits.h> // For LONG_MIN, LONG_MAX | 28 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 29 | 29 |
| 30 #include "v8.h" | 30 #include "v8.h" |
| 31 | 31 |
| 32 #if defined(V8_TARGET_ARCH_MIPS) | 32 #if defined(V8_TARGET_ARCH_MIPS) |
| 33 | 33 |
| 34 #include "bootstrapper.h" | 34 #include "bootstrapper.h" |
| 35 #include "codegen-inl.h" | 35 #include "codegen.h" |
| 36 #include "debug.h" | 36 #include "debug.h" |
| 37 #include "runtime.h" | 37 #include "runtime.h" |
| 38 | 38 |
| 39 namespace v8 { | 39 namespace v8 { |
| 40 namespace internal { | 40 namespace internal { |
| 41 | 41 |
| 42 MacroAssembler::MacroAssembler(void* buffer, int size) | 42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) |
| 43 : Assembler(buffer, size), | 43 : Assembler(arg_isolate, buffer, size), |
| 44 generating_stub_(false), | 44 generating_stub_(false), |
| 45 allow_stub_calls_(true), | 45 allow_stub_calls_(true) { |
| 46 code_object_(HEAP->undefined_value()) { | 46 if (isolate() != NULL) { |
| 47 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
| 48 isolate()); |
| 49 } |
| 47 } | 50 } |
| 48 | 51 |
| 49 | 52 |
| 50 // Arguments macros | 53 // Arguments macros. |
| 51 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 | 54 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 |
| 52 #define COND_ARGS cond, r1, r2 | 55 #define COND_ARGS cond, r1, r2 |
| 53 | 56 |
| 54 #define REGISTER_TARGET_BODY(Name) \ | 57 #define REGISTER_TARGET_BODY(Name) \ |
| 55 void MacroAssembler::Name(Register target, \ | 58 void MacroAssembler::Name(Register target, \ |
| 56 BranchDelaySlot bd) { \ | 59 BranchDelaySlot bd) { \ |
| 57 Name(Operand(target), bd); \ | 60 Name(Operand(target), bd); \ |
| 58 } \ | 61 } \ |
| 59 void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \ | 62 void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \ |
| 60 BranchDelaySlot bd) { \ | 63 BranchDelaySlot bd) { \ |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 154 Condition cond, | 157 Condition cond, |
| 155 Register src1, const Operand& src2) { | 158 Register src1, const Operand& src2) { |
| 156 Branch(2, NegateCondition(cond), src1, src2); | 159 Branch(2, NegateCondition(cond), src1, src2); |
| 157 sw(source, MemOperand(s6, index << kPointerSizeLog2)); | 160 sw(source, MemOperand(s6, index << kPointerSizeLog2)); |
| 158 } | 161 } |
| 159 | 162 |
| 160 | 163 |
| 161 void MacroAssembler::RecordWriteHelper(Register object, | 164 void MacroAssembler::RecordWriteHelper(Register object, |
| 162 Register address, | 165 Register address, |
| 163 Register scratch) { | 166 Register scratch) { |
| 164 if (FLAG_debug_code) { | 167 if (emit_debug_code()) { |
| 165 // Check that the object is not in new space. | 168 // Check that the object is not in new space. |
| 166 Label not_in_new_space; | 169 Label not_in_new_space; |
| 167 InNewSpace(object, scratch, ne, ¬_in_new_space); | 170 InNewSpace(object, scratch, ne, ¬_in_new_space); |
| 168 Abort("new-space object passed to RecordWriteHelper"); | 171 Abort("new-space object passed to RecordWriteHelper"); |
| 169 bind(¬_in_new_space); | 172 bind(¬_in_new_space); |
| 170 } | 173 } |
| 171 | 174 |
| 172 // Calculate page address: Clear bits from 0 to kPageSizeBits. | 175 // Calculate page address: Clear bits from 0 to kPageSizeBits. |
| 173 if (mips32r2) { | 176 if (mips32r2) { |
| 174 Ins(object, zero_reg, 0, kPageSizeBits); | 177 Ins(object, zero_reg, 0, kPageSizeBits); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 223 // Add offset into the object. | 226 // Add offset into the object. |
| 224 Addu(scratch0, object, offset); | 227 Addu(scratch0, object, offset); |
| 225 | 228 |
| 226 // Record the actual write. | 229 // Record the actual write. |
| 227 RecordWriteHelper(object, scratch0, scratch1); | 230 RecordWriteHelper(object, scratch0, scratch1); |
| 228 | 231 |
| 229 bind(&done); | 232 bind(&done); |
| 230 | 233 |
| 231 // Clobber all input registers when running with the debug-code flag | 234 // Clobber all input registers when running with the debug-code flag |
| 232 // turned on to provoke errors. | 235 // turned on to provoke errors. |
| 233 if (FLAG_debug_code) { | 236 if (emit_debug_code()) { |
| 234 li(object, Operand(BitCast<int32_t>(kZapValue))); | 237 li(object, Operand(BitCast<int32_t>(kZapValue))); |
| 235 li(scratch0, Operand(BitCast<int32_t>(kZapValue))); | 238 li(scratch0, Operand(BitCast<int32_t>(kZapValue))); |
| 236 li(scratch1, Operand(BitCast<int32_t>(kZapValue))); | 239 li(scratch1, Operand(BitCast<int32_t>(kZapValue))); |
| 237 } | 240 } |
| 238 } | 241 } |
| 239 | 242 |
| 240 | 243 |
| 241 // Will clobber 4 registers: object, address, scratch, ip. The | 244 // Will clobber 4 registers: object, address, scratch, ip. The |
| 242 // register 'object' contains a heap object pointer. The heap object | 245 // register 'object' contains a heap object pointer. The heap object |
| 243 // tag is shifted away. | 246 // tag is shifted away. |
| (...skipping 11 matching lines...) Expand all Loading... |
| 255 // region marks for new space pages. | 258 // region marks for new space pages. |
| 256 InNewSpace(object, scratch, eq, &done); | 259 InNewSpace(object, scratch, eq, &done); |
| 257 | 260 |
| 258 // Record the actual write. | 261 // Record the actual write. |
| 259 RecordWriteHelper(object, address, scratch); | 262 RecordWriteHelper(object, address, scratch); |
| 260 | 263 |
| 261 bind(&done); | 264 bind(&done); |
| 262 | 265 |
| 263 // Clobber all input registers when running with the debug-code flag | 266 // Clobber all input registers when running with the debug-code flag |
| 264 // turned on to provoke errors. | 267 // turned on to provoke errors. |
| 265 if (FLAG_debug_code) { | 268 if (emit_debug_code()) { |
| 266 li(object, Operand(BitCast<int32_t>(kZapValue))); | 269 li(object, Operand(BitCast<int32_t>(kZapValue))); |
| 267 li(address, Operand(BitCast<int32_t>(kZapValue))); | 270 li(address, Operand(BitCast<int32_t>(kZapValue))); |
| 268 li(scratch, Operand(BitCast<int32_t>(kZapValue))); | 271 li(scratch, Operand(BitCast<int32_t>(kZapValue))); |
| 269 } | 272 } |
| 270 } | 273 } |
| 271 | 274 |
| 272 | 275 |
| 273 // ----------------------------------------------------------------------------- | 276 // ----------------------------------------------------------------------------- |
| 274 // Allocation support | 277 // Allocation support. |
| 275 | 278 |
| 276 | 279 |
| 277 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 280 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 278 Register scratch, | 281 Register scratch, |
| 279 Label* miss) { | 282 Label* miss) { |
| 280 Label same_contexts; | 283 Label same_contexts; |
| 281 | 284 |
| 282 ASSERT(!holder_reg.is(scratch)); | 285 ASSERT(!holder_reg.is(scratch)); |
| 283 ASSERT(!holder_reg.is(at)); | 286 ASSERT(!holder_reg.is(at)); |
| 284 ASSERT(!scratch.is(at)); | 287 ASSERT(!scratch.is(at)); |
| 285 | 288 |
| 286 // Load current lexical context from the stack frame. | 289 // Load current lexical context from the stack frame. |
| 287 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 290 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 288 // In debug mode, make sure the lexical context is set. | 291 // In debug mode, make sure the lexical context is set. |
| 289 #ifdef DEBUG | 292 #ifdef DEBUG |
| 290 Check(ne, "we should not have an empty lexical context", | 293 Check(ne, "we should not have an empty lexical context", |
| 291 scratch, Operand(zero_reg)); | 294 scratch, Operand(zero_reg)); |
| 292 #endif | 295 #endif |
| 293 | 296 |
| 294 // Load the global context of the current context. | 297 // Load the global context of the current context. |
| 295 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; | 298 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; |
| 296 lw(scratch, FieldMemOperand(scratch, offset)); | 299 lw(scratch, FieldMemOperand(scratch, offset)); |
| 297 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); | 300 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); |
| 298 | 301 |
| 299 // Check the context is a global context. | 302 // Check the context is a global context. |
| 300 if (FLAG_debug_code) { | 303 if (emit_debug_code()) { |
| 301 // TODO(119): Avoid push(holder_reg)/pop(holder_reg). | 304 // TODO(119): Avoid push(holder_reg)/pop(holder_reg). |
| 302 Push(holder_reg); // Temporarily save holder on the stack. | 305 push(holder_reg); // Temporarily save holder on the stack. |
| 303 // Read the first word and compare to the global_context_map. | 306 // Read the first word and compare to the global_context_map. |
| 304 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 307 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 305 LoadRoot(at, Heap::kGlobalContextMapRootIndex); | 308 LoadRoot(at, Heap::kGlobalContextMapRootIndex); |
| 306 Check(eq, "JSGlobalObject::global_context should be a global context.", | 309 Check(eq, "JSGlobalObject::global_context should be a global context.", |
| 307 holder_reg, Operand(at)); | 310 holder_reg, Operand(at)); |
| 308 Pop(holder_reg); // Restore holder. | 311 pop(holder_reg); // Restore holder. |
| 309 } | 312 } |
| 310 | 313 |
| 311 // Check if both contexts are the same. | 314 // Check if both contexts are the same. |
| 312 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); | 315 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
| 313 Branch(&same_contexts, eq, scratch, Operand(at)); | 316 Branch(&same_contexts, eq, scratch, Operand(at)); |
| 314 | 317 |
| 315 // Check the context is a global context. | 318 // Check the context is a global context. |
| 316 if (FLAG_debug_code) { | 319 if (emit_debug_code()) { |
| 317 // TODO(119): Avoid push(holder_reg)/pop(holder_reg). | 320 // TODO(119): Avoid push(holder_reg)/pop(holder_reg). |
| 318 Push(holder_reg); // Temporarily save holder on the stack. | 321 push(holder_reg); // Temporarily save holder on the stack. |
| 319 mov(holder_reg, at); // Move at to its holding place. | 322 mov(holder_reg, at); // Move at to its holding place. |
| 320 LoadRoot(at, Heap::kNullValueRootIndex); | 323 LoadRoot(at, Heap::kNullValueRootIndex); |
| 321 Check(ne, "JSGlobalProxy::context() should not be null.", | 324 Check(ne, "JSGlobalProxy::context() should not be null.", |
| 322 holder_reg, Operand(at)); | 325 holder_reg, Operand(at)); |
| 323 | 326 |
| 324 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); | 327 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); |
| 325 LoadRoot(at, Heap::kGlobalContextMapRootIndex); | 328 LoadRoot(at, Heap::kGlobalContextMapRootIndex); |
| 326 Check(eq, "JSGlobalObject::global_context should be a global context.", | 329 Check(eq, "JSGlobalObject::global_context should be a global context.", |
| 327 holder_reg, Operand(at)); | 330 holder_reg, Operand(at)); |
| 328 // Restore at is not needed. at is reloaded below. | 331 // Restore at is not needed. at is reloaded below. |
| 329 Pop(holder_reg); // Restore holder. | 332 pop(holder_reg); // Restore holder. |
| 330 // Restore at to holder's context. | 333 // Restore at to holder's context. |
| 331 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); | 334 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
| 332 } | 335 } |
| 333 | 336 |
| 334 // Check that the security token in the calling global object is | 337 // Check that the security token in the calling global object is |
| 335 // compatible with the security token in the receiving global | 338 // compatible with the security token in the receiving global |
| 336 // object. | 339 // object. |
| 337 int token_offset = Context::kHeaderSize + | 340 int token_offset = Context::kHeaderSize + |
| 338 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 341 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
| 339 | 342 |
| 340 lw(scratch, FieldMemOperand(scratch, token_offset)); | 343 lw(scratch, FieldMemOperand(scratch, token_offset)); |
| 341 lw(at, FieldMemOperand(at, token_offset)); | 344 lw(at, FieldMemOperand(at, token_offset)); |
| 342 Branch(miss, ne, scratch, Operand(at)); | 345 Branch(miss, ne, scratch, Operand(at)); |
| 343 | 346 |
| 344 bind(&same_contexts); | 347 bind(&same_contexts); |
| 345 } | 348 } |
| 346 | 349 |
| 347 | 350 |
| 348 // --------------------------------------------------------------------------- | 351 // --------------------------------------------------------------------------- |
| 349 // Instruction macros | 352 // Instruction macros. |
| 350 | 353 |
| 351 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { | 354 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { |
| 352 if (rt.is_reg()) { | 355 if (rt.is_reg()) { |
| 353 addu(rd, rs, rt.rm()); | 356 addu(rd, rs, rt.rm()); |
| 354 } else { | 357 } else { |
| 355 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { | 358 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
| 356 addiu(rd, rs, rt.imm32_); | 359 addiu(rd, rs, rt.imm32_); |
| 357 } else { | 360 } else { |
| 358 // li handles the relocation. | 361 // li handles the relocation. |
| 359 ASSERT(!rs.is(at)); | 362 ASSERT(!rs.is(at)); |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 493 nor(rd, rs, rt.rm()); | 496 nor(rd, rs, rt.rm()); |
| 494 } else { | 497 } else { |
| 495 // li handles the relocation. | 498 // li handles the relocation. |
| 496 ASSERT(!rs.is(at)); | 499 ASSERT(!rs.is(at)); |
| 497 li(at, rt); | 500 li(at, rt); |
| 498 nor(rd, rs, at); | 501 nor(rd, rs, at); |
| 499 } | 502 } |
| 500 } | 503 } |
| 501 | 504 |
| 502 | 505 |
| 506 void MacroAssembler::Neg(Register rs, const Operand& rt) { |
| 507 ASSERT(rt.is_reg()); |
| 508 ASSERT(!at.is(rs)); |
| 509 ASSERT(!at.is(rt.rm())); |
| 510 li(at, -1); |
| 511 xor_(rs, rt.rm(), at); |
| 512 } |
| 513 |
| 514 |
| 503 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { | 515 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { |
| 504 if (rt.is_reg()) { | 516 if (rt.is_reg()) { |
| 505 slt(rd, rs, rt.rm()); | 517 slt(rd, rs, rt.rm()); |
| 506 } else { | 518 } else { |
| 507 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { | 519 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
| 508 slti(rd, rs, rt.imm32_); | 520 slti(rd, rs, rt.imm32_); |
| 509 } else { | 521 } else { |
| 510 // li handles the relocation. | 522 // li handles the relocation. |
| 511 ASSERT(!rs.is(at)); | 523 ASSERT(!rs.is(at)); |
| 512 li(at, rt); | 524 li(at, rt); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 574 } else { | 586 } else { |
| 575 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); | 587 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); |
| 576 ori(rd, rd, (j.imm32_ & kImm16Mask)); | 588 ori(rd, rd, (j.imm32_ & kImm16Mask)); |
| 577 } | 589 } |
| 578 } else if (MustUseReg(j.rmode_) || gen2instr) { | 590 } else if (MustUseReg(j.rmode_) || gen2instr) { |
| 579 if (MustUseReg(j.rmode_)) { | 591 if (MustUseReg(j.rmode_)) { |
| 580 RecordRelocInfo(j.rmode_, j.imm32_); | 592 RecordRelocInfo(j.rmode_, j.imm32_); |
| 581 } | 593 } |
| 582 // We need always the same number of instructions as we may need to patch | 594 // We need always the same number of instructions as we may need to patch |
| 583 // this code to load another value which may need 2 instructions to load. | 595 // this code to load another value which may need 2 instructions to load. |
| 584 if (is_int16(j.imm32_)) { | 596 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); |
| 585 nop(); | 597 ori(rd, rd, (j.imm32_ & kImm16Mask)); |
| 586 addiu(rd, zero_reg, j.imm32_); | |
| 587 } else if (!(j.imm32_ & kHiMask)) { | |
| 588 nop(); | |
| 589 ori(rd, zero_reg, j.imm32_); | |
| 590 } else if (!(j.imm32_ & kImm16Mask)) { | |
| 591 nop(); | |
| 592 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); | |
| 593 } else { | |
| 594 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); | |
| 595 ori(rd, rd, (j.imm32_ & kImm16Mask)); | |
| 596 } | |
| 597 } | 598 } |
| 598 } | 599 } |
| 599 | 600 |
| 600 | 601 |
| 601 // Exception-generating instructions and debugging support | 602 // Exception-generating instructions and debugging support. |
| 602 void MacroAssembler::stop(const char* msg) { | 603 void MacroAssembler::stop(const char* msg) { |
| 603 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it. | 604 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it. |
| 604 // We use the 0x54321 value to be able to find it easily when reading memory. | 605 // We use the 0x54321 value to be able to find it easily when reading memory. |
| 605 break_(0x54321); | 606 break_(0x54321); |
| 606 } | 607 } |
| 607 | 608 |
| 608 | 609 |
| 609 void MacroAssembler::MultiPush(RegList regs) { | 610 void MacroAssembler::MultiPush(RegList regs) { |
| 610 int16_t NumSaved = 0; | 611 int16_t NumSaved = 0; |
| 611 int16_t NumToPush = NumberOfBitsSet(regs); | 612 int16_t NumToPush = NumberOfBitsSet(regs); |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 720 | 721 |
| 721 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) { | 722 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) { |
| 722 // Convert rs to a FP value in fd (and fd + 1). | 723 // Convert rs to a FP value in fd (and fd + 1). |
| 723 // We do this by converting rs minus the MSB to avoid sign conversion, | 724 // We do this by converting rs minus the MSB to avoid sign conversion, |
| 724 // then adding 2^31-1 and 1 to the result. | 725 // then adding 2^31-1 and 1 to the result. |
| 725 | 726 |
| 726 ASSERT(!fd.is(f20)); | 727 ASSERT(!fd.is(f20)); |
| 727 ASSERT(!rs.is(t9)); | 728 ASSERT(!rs.is(t9)); |
| 728 ASSERT(!rs.is(t8)); | 729 ASSERT(!rs.is(t8)); |
| 729 | 730 |
| 730 // Save rs's MSB to t8 | 731 // Save rs's MSB to t8. |
| 731 And(t8, rs, 0x80000000); | 732 And(t8, rs, 0x80000000); |
| 732 // Remove rs's MSB. | 733 // Remove rs's MSB. |
| 733 And(t9, rs, 0x7FFFFFFF); | 734 And(t9, rs, 0x7FFFFFFF); |
| 734 // Move t9 to fd | 735 // Move t9 to fd. |
| 735 mtc1(t9, fd); | 736 mtc1(t9, fd); |
| 736 | 737 |
| 737 // Convert fd to a real FP value. | 738 // Convert fd to a real FP value. |
| 738 cvt_d_w(fd, fd); | 739 cvt_d_w(fd, fd); |
| 739 | 740 |
| 740 Label conversion_done; | 741 Label conversion_done; |
| 741 | 742 |
| 742 // If rs's MSB was 0, it's done. | 743 // If rs's MSB was 0, it's done. |
| 743 // Otherwise we need to add that to the FP register. | 744 // Otherwise we need to add that to the FP register. |
| 744 Branch(&conversion_done, eq, t8, Operand(zero_reg)); | 745 Branch(&conversion_done, eq, t8, Operand(zero_reg)); |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 832 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent)); | 833 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent)); |
| 833 | 834 |
| 834 // We know the exponent is smaller than 30 (biased). If it is less than | 835 // We know the exponent is smaller than 30 (biased). If it is less than |
| 835 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | 836 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie |
| 836 // it rounds to zero. | 837 // it rounds to zero. |
| 837 const uint32_t zero_exponent = | 838 const uint32_t zero_exponent = |
| 838 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | 839 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
| 839 Subu(scratch2, scratch2, Operand(zero_exponent)); | 840 Subu(scratch2, scratch2, Operand(zero_exponent)); |
| 840 // Dest already has a Smi zero. | 841 // Dest already has a Smi zero. |
| 841 Branch(&done, lt, scratch2, Operand(zero_reg)); | 842 Branch(&done, lt, scratch2, Operand(zero_reg)); |
| 842 if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) { | 843 if (!CpuFeatures::IsSupported(FPU)) { |
| 843 // We have a shifted exponent between 0 and 30 in scratch2. | 844 // We have a shifted exponent between 0 and 30 in scratch2. |
| 844 srl(dest, scratch2, HeapNumber::kExponentShift); | 845 srl(dest, scratch2, HeapNumber::kExponentShift); |
| 845 // We now have the exponent in dest. Subtract from 30 to get | 846 // We now have the exponent in dest. Subtract from 30 to get |
| 846 // how much to shift down. | 847 // how much to shift down. |
| 847 li(at, Operand(30)); | 848 li(at, Operand(30)); |
| 848 subu(dest, at, dest); | 849 subu(dest, at, dest); |
| 849 } | 850 } |
| 850 bind(&right_exponent); | 851 bind(&right_exponent); |
| 851 if (Isolate::Current()->cpu_features()->IsSupported(FPU)) { | 852 if (CpuFeatures::IsSupported(FPU)) { |
| 852 CpuFeatures::Scope scope(FPU); | 853 CpuFeatures::Scope scope(FPU); |
| 853 // MIPS FPU instructions implementing double precision to integer | 854 // MIPS FPU instructions implementing double precision to integer |
| 854 // conversion using round to zero. Since the FP value was qualified | 855 // conversion using round to zero. Since the FP value was qualified |
| 855 // above, the resulting integer should be a legal int32. | 856 // above, the resulting integer should be a legal int32. |
| 856 // The original 'Exponent' word is still in scratch. | 857 // The original 'Exponent' word is still in scratch. |
| 857 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 858 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
| 858 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1)); | 859 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1)); |
| 859 trunc_w_d(double_scratch, double_scratch); | 860 trunc_w_d(double_scratch, double_scratch); |
| 860 mfc1(dest, double_scratch); | 861 mfc1(dest, double_scratch); |
| 861 } else { | 862 } else { |
| (...skipping 29 matching lines...) Expand all Loading... |
| 891 // Trick to check sign bit (msb) held in dest, count leading zero. | 892 // Trick to check sign bit (msb) held in dest, count leading zero. |
| 892 // 0 indicates negative, save negative version with conditional move. | 893 // 0 indicates negative, save negative version with conditional move. |
| 893 clz(dest, dest); | 894 clz(dest, dest); |
| 894 movz(scratch, scratch2, dest); | 895 movz(scratch, scratch2, dest); |
| 895 mov(dest, scratch); | 896 mov(dest, scratch); |
| 896 } | 897 } |
| 897 bind(&done); | 898 bind(&done); |
| 898 } | 899 } |
| 899 | 900 |
| 900 | 901 |
| 902 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, |
| 903 Register input_high, |
| 904 Register input_low, |
| 905 Register scratch) { |
| 906 Label done, normal_exponent, restore_sign; |
| 907 // Extract the biased exponent in result. |
| 908 Ext(result, |
| 909 input_high, |
| 910 HeapNumber::kExponentShift, |
| 911 HeapNumber::kExponentBits); |
| 912 |
| 913 // Check for Infinity and NaNs, which should return 0. |
| 914 Subu(scratch, result, HeapNumber::kExponentMask); |
| 915 movz(result, zero_reg, scratch); |
| 916 Branch(&done, eq, scratch, Operand(zero_reg)); |
| 917 |
| 918 // Express exponent as delta to (number of mantissa bits + 31). |
| 919 Subu(result, |
| 920 result, |
| 921 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); |
| 922 |
| 923 // If the delta is strictly positive, all bits would be shifted away, |
| 924 // which means that we can return 0. |
| 925 Branch(&normal_exponent, le, result, Operand(zero_reg)); |
| 926 mov(result, zero_reg); |
| 927 Branch(&done); |
| 928 |
| 929 bind(&normal_exponent); |
| 930 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; |
| 931 // Calculate shift. |
| 932 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits)); |
| 933 |
| 934 // Save the sign. |
| 935 Register sign = result; |
| 936 result = no_reg; |
| 937 And(sign, input_high, Operand(HeapNumber::kSignMask)); |
| 938 |
| 939 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need |
| 940 // to check for this specific case. |
| 941 Label high_shift_needed, high_shift_done; |
| 942 Branch(&high_shift_needed, lt, scratch, Operand(32)); |
| 943 mov(input_high, zero_reg); |
| 944 Branch(&high_shift_done); |
| 945 bind(&high_shift_needed); |
| 946 |
| 947 // Set the implicit 1 before the mantissa part in input_high. |
| 948 Or(input_high, |
| 949 input_high, |
| 950 Operand(1 << HeapNumber::kMantissaBitsInTopWord)); |
| 951 // Shift the mantissa bits to the correct position. |
| 952 // We don't need to clear non-mantissa bits as they will be shifted away. |
| 953 // If they weren't, it would mean that the answer is in the 32bit range. |
| 954 sllv(input_high, input_high, scratch); |
| 955 |
| 956 bind(&high_shift_done); |
| 957 |
| 958 // Replace the shifted bits with bits from the lower mantissa word. |
| 959 Label pos_shift, shift_done; |
| 960 li(at, 32); |
| 961 subu(scratch, at, scratch); |
| 962 Branch(&pos_shift, ge, scratch, Operand(zero_reg)); |
| 963 |
| 964 // Negate scratch. |
| 965 Subu(scratch, zero_reg, scratch); |
| 966 sllv(input_low, input_low, scratch); |
| 967 Branch(&shift_done); |
| 968 |
| 969 bind(&pos_shift); |
| 970 srlv(input_low, input_low, scratch); |
| 971 |
| 972 bind(&shift_done); |
| 973 Or(input_high, input_high, Operand(input_low)); |
| 974 // Restore sign if necessary. |
| 975 mov(scratch, sign); |
| 976 result = sign; |
| 977 sign = no_reg; |
| 978 Subu(result, zero_reg, input_high); |
| 979 movz(result, input_high, scratch); |
| 980 bind(&done); |
| 981 } |
| 982 |
| 983 |
| 984 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
| 985 Register src, |
| 986 int num_least_bits) { |
| 987 Ext(dst, src, kSmiTagSize, num_least_bits); |
| 988 } |
| 989 |
| 990 |
| 991 void MacroAssembler::GetLeastBitsFromInt32(Register dst, |
| 992 Register src, |
| 993 int num_least_bits) { |
| 994 And(dst, src, Operand((1 << num_least_bits) - 1)); |
| 995 } |
| 996 |
| 997 |
| 901 // Emulated condtional branches do not emit a nop in the branch delay slot. | 998 // Emulated condtional branches do not emit a nop in the branch delay slot. |
| 902 // | 999 // |
| 903 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. | 1000 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. |
| 904 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ | 1001 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ |
| 905 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ | 1002 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ |
| 906 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) | 1003 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) |
| 907 | 1004 |
| 908 | 1005 |
| 909 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { | 1006 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { |
| 910 b(offset); | 1007 b(offset); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 930 switch (cond) { | 1027 switch (cond) { |
| 931 case cc_always: | 1028 case cc_always: |
| 932 b(offset); | 1029 b(offset); |
| 933 break; | 1030 break; |
| 934 case eq: | 1031 case eq: |
| 935 beq(rs, r2, offset); | 1032 beq(rs, r2, offset); |
| 936 break; | 1033 break; |
| 937 case ne: | 1034 case ne: |
| 938 bne(rs, r2, offset); | 1035 bne(rs, r2, offset); |
| 939 break; | 1036 break; |
| 940 // Signed comparison | 1037 // Signed comparison. |
| 941 case greater: | 1038 case greater: |
| 942 if (r2.is(zero_reg)) { | 1039 if (r2.is(zero_reg)) { |
| 943 bgtz(rs, offset); | 1040 bgtz(rs, offset); |
| 944 } else { | 1041 } else { |
| 945 slt(scratch, r2, rs); | 1042 slt(scratch, r2, rs); |
| 946 bne(scratch, zero_reg, offset); | 1043 bne(scratch, zero_reg, offset); |
| 947 } | 1044 } |
| 948 break; | 1045 break; |
| 949 case greater_equal: | 1046 case greater_equal: |
| 950 if (r2.is(zero_reg)) { | 1047 if (r2.is(zero_reg)) { |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1021 li(r2, rt); | 1118 li(r2, rt); |
| 1022 beq(rs, r2, offset); | 1119 beq(rs, r2, offset); |
| 1023 break; | 1120 break; |
| 1024 case ne: | 1121 case ne: |
| 1025 // We don't want any other register but scratch clobbered. | 1122 // We don't want any other register but scratch clobbered. |
| 1026 ASSERT(!scratch.is(rs)); | 1123 ASSERT(!scratch.is(rs)); |
| 1027 r2 = scratch; | 1124 r2 = scratch; |
| 1028 li(r2, rt); | 1125 li(r2, rt); |
| 1029 bne(rs, r2, offset); | 1126 bne(rs, r2, offset); |
| 1030 break; | 1127 break; |
| 1031 // Signed comparison | 1128 // Signed comparison. |
| 1032 case greater: | 1129 case greater: |
| 1033 if (rt.imm32_ == 0) { | 1130 if (rt.imm32_ == 0) { |
| 1034 bgtz(rs, offset); | 1131 bgtz(rs, offset); |
| 1035 } else { | 1132 } else { |
| 1036 r2 = scratch; | 1133 r2 = scratch; |
| 1037 li(r2, rt); | 1134 li(r2, rt); |
| 1038 slt(scratch, r2, rs); | 1135 slt(scratch, r2, rs); |
| 1039 bne(scratch, zero_reg, offset); | 1136 bne(scratch, zero_reg, offset); |
| 1040 } | 1137 } |
| 1041 break; | 1138 break; |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1163 b(offset); | 1260 b(offset); |
| 1164 break; | 1261 break; |
| 1165 case eq: | 1262 case eq: |
| 1166 offset = shifted_branch_offset(L, false); | 1263 offset = shifted_branch_offset(L, false); |
| 1167 beq(rs, r2, offset); | 1264 beq(rs, r2, offset); |
| 1168 break; | 1265 break; |
| 1169 case ne: | 1266 case ne: |
| 1170 offset = shifted_branch_offset(L, false); | 1267 offset = shifted_branch_offset(L, false); |
| 1171 bne(rs, r2, offset); | 1268 bne(rs, r2, offset); |
| 1172 break; | 1269 break; |
| 1173 // Signed comparison | 1270 // Signed comparison. |
| 1174 case greater: | 1271 case greater: |
| 1175 if (r2.is(zero_reg)) { | 1272 if (r2.is(zero_reg)) { |
| 1176 offset = shifted_branch_offset(L, false); | 1273 offset = shifted_branch_offset(L, false); |
| 1177 bgtz(rs, offset); | 1274 bgtz(rs, offset); |
| 1178 } else { | 1275 } else { |
| 1179 slt(scratch, r2, rs); | 1276 slt(scratch, r2, rs); |
| 1180 offset = shifted_branch_offset(L, false); | 1277 offset = shifted_branch_offset(L, false); |
| 1181 bne(scratch, zero_reg, offset); | 1278 bne(scratch, zero_reg, offset); |
| 1182 } | 1279 } |
| 1183 break; | 1280 break; |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1269 li(r2, rt); | 1366 li(r2, rt); |
| 1270 offset = shifted_branch_offset(L, false); | 1367 offset = shifted_branch_offset(L, false); |
| 1271 beq(rs, r2, offset); | 1368 beq(rs, r2, offset); |
| 1272 break; | 1369 break; |
| 1273 case ne: | 1370 case ne: |
| 1274 r2 = scratch; | 1371 r2 = scratch; |
| 1275 li(r2, rt); | 1372 li(r2, rt); |
| 1276 offset = shifted_branch_offset(L, false); | 1373 offset = shifted_branch_offset(L, false); |
| 1277 bne(rs, r2, offset); | 1374 bne(rs, r2, offset); |
| 1278 break; | 1375 break; |
| 1279 // Signed comparison | 1376 // Signed comparison. |
| 1280 case greater: | 1377 case greater: |
| 1281 if (rt.imm32_ == 0) { | 1378 if (rt.imm32_ == 0) { |
| 1282 offset = shifted_branch_offset(L, false); | 1379 offset = shifted_branch_offset(L, false); |
| 1283 bgtz(rs, offset); | 1380 bgtz(rs, offset); |
| 1284 } else { | 1381 } else { |
| 1285 r2 = scratch; | 1382 r2 = scratch; |
| 1286 li(r2, rt); | 1383 li(r2, rt); |
| 1287 slt(scratch, r2, rs); | 1384 slt(scratch, r2, rs); |
| 1288 offset = shifted_branch_offset(L, false); | 1385 offset = shifted_branch_offset(L, false); |
| 1289 bne(scratch, zero_reg, offset); | 1386 bne(scratch, zero_reg, offset); |
| (...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1437 bne(rs, r2, 2); | 1534 bne(rs, r2, 2); |
| 1438 nop(); | 1535 nop(); |
| 1439 bal(offset); | 1536 bal(offset); |
| 1440 break; | 1537 break; |
| 1441 case ne: | 1538 case ne: |
| 1442 beq(rs, r2, 2); | 1539 beq(rs, r2, 2); |
| 1443 nop(); | 1540 nop(); |
| 1444 bal(offset); | 1541 bal(offset); |
| 1445 break; | 1542 break; |
| 1446 | 1543 |
| 1447 // Signed comparison | 1544 // Signed comparison. |
| 1448 case greater: | 1545 case greater: |
| 1449 slt(scratch, r2, rs); | 1546 slt(scratch, r2, rs); |
| 1450 addiu(scratch, scratch, -1); | 1547 addiu(scratch, scratch, -1); |
| 1451 bgezal(scratch, offset); | 1548 bgezal(scratch, offset); |
| 1452 break; | 1549 break; |
| 1453 case greater_equal: | 1550 case greater_equal: |
| 1454 slt(scratch, rs, r2); | 1551 slt(scratch, rs, r2); |
| 1455 addiu(scratch, scratch, -1); | 1552 addiu(scratch, scratch, -1); |
| 1456 bltzal(scratch, offset); | 1553 bltzal(scratch, offset); |
| 1457 break; | 1554 break; |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1532 offset = shifted_branch_offset(L, false); | 1629 offset = shifted_branch_offset(L, false); |
| 1533 bal(offset); | 1630 bal(offset); |
| 1534 break; | 1631 break; |
| 1535 case ne: | 1632 case ne: |
| 1536 beq(rs, r2, 2); | 1633 beq(rs, r2, 2); |
| 1537 nop(); | 1634 nop(); |
| 1538 offset = shifted_branch_offset(L, false); | 1635 offset = shifted_branch_offset(L, false); |
| 1539 bal(offset); | 1636 bal(offset); |
| 1540 break; | 1637 break; |
| 1541 | 1638 |
| 1542 // Signed comparison | 1639 // Signed comparison. |
| 1543 case greater: | 1640 case greater: |
| 1544 slt(scratch, r2, rs); | 1641 slt(scratch, r2, rs); |
| 1545 addiu(scratch, scratch, -1); | 1642 addiu(scratch, scratch, -1); |
| 1546 offset = shifted_branch_offset(L, false); | 1643 offset = shifted_branch_offset(L, false); |
| 1547 bgezal(scratch, offset); | 1644 bgezal(scratch, offset); |
| 1548 break; | 1645 break; |
| 1549 case greater_equal: | 1646 case greater_equal: |
| 1550 slt(scratch, rs, r2); | 1647 slt(scratch, rs, r2); |
| 1551 addiu(scratch, scratch, -1); | 1648 addiu(scratch, scratch, -1); |
| 1552 offset = shifted_branch_offset(L, false); | 1649 offset = shifted_branch_offset(L, false); |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1635 jr(target.rm()); | 1732 jr(target.rm()); |
| 1636 } | 1733 } |
| 1637 } else { // Not register target. | 1734 } else { // Not register target. |
| 1638 if (!MustUseReg(target.rmode_)) { | 1735 if (!MustUseReg(target.rmode_)) { |
| 1639 if (cond == cc_always) { | 1736 if (cond == cc_always) { |
| 1640 j(target.imm32_); | 1737 j(target.imm32_); |
| 1641 } else { | 1738 } else { |
| 1642 Branch(2, NegateCondition(cond), rs, rt); | 1739 Branch(2, NegateCondition(cond), rs, rt); |
| 1643 j(target.imm32_); // Will generate only one instruction. | 1740 j(target.imm32_); // Will generate only one instruction. |
| 1644 } | 1741 } |
| 1645 } else { // MustUseReg(target) | 1742 } else { // MustUseReg(target). |
| 1646 li(t9, target); | 1743 li(t9, target); |
| 1647 if (cond == cc_always) { | 1744 if (cond == cc_always) { |
| 1648 jr(t9); | 1745 jr(t9); |
| 1649 } else { | 1746 } else { |
| 1650 Branch(2, NegateCondition(cond), rs, rt); | 1747 Branch(2, NegateCondition(cond), rs, rt); |
| 1651 jr(t9); // Will generate only one instruction. | 1748 jr(t9); // Will generate only one instruction. |
| 1652 } | 1749 } |
| 1653 } | 1750 } |
| 1654 } | 1751 } |
| 1655 // Emit a nop in the branch delay slot if required. | 1752 // Emit a nop in the branch delay slot if required. |
| 1656 if (bdslot == PROTECT) | 1753 if (bdslot == PROTECT) |
| 1657 nop(); | 1754 nop(); |
| 1658 } | 1755 } |
| 1659 | 1756 |
| 1660 | 1757 |
| 1661 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) { | 1758 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) { |
| 1662 UNIMPLEMENTED_MIPS(); | 1759 return 4 * kInstrSize; |
| 1663 return 0; | |
| 1664 } | 1760 } |
| 1665 | 1761 |
| 1666 | 1762 |
| 1667 int MacroAssembler::CallSize(Register reg) { | 1763 int MacroAssembler::CallSize(Register reg) { |
| 1668 UNIMPLEMENTED_MIPS(); | 1764 return 2 * kInstrSize; |
| 1669 return 0; | |
| 1670 } | 1765 } |
| 1671 | 1766 |
| 1672 | 1767 |
| 1673 // Note: To call gcc-compiled C code on mips, you must call thru t9. | 1768 // Note: To call gcc-compiled C code on mips, you must call thru t9. |
| 1674 void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) { | 1769 void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) { |
| 1675 BlockTrampolinePoolScope block_trampoline_pool(this); | 1770 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1676 if (target.is_reg()) { | 1771 if (target.is_reg()) { |
| 1677 jalr(target.rm()); | 1772 jalr(target.rm()); |
| 1678 } else { // !target.is_reg() | 1773 } else { // !target.is_reg(). |
| 1679 if (!MustUseReg(target.rmode_)) { | 1774 if (!MustUseReg(target.rmode_)) { |
| 1680 jal(target.imm32_); | 1775 jal(target.imm32_); |
| 1681 } else { // MustUseReg(target) | 1776 } else { // MustUseReg(target). |
| 1777 // Must record previous source positions before the |
| 1778 // li() generates a new code target. |
| 1779 positions_recorder()->WriteRecordedPositions(); |
| 1682 li(t9, target); | 1780 li(t9, target); |
| 1683 jalr(t9); | 1781 jalr(t9); |
| 1684 } | 1782 } |
| 1685 } | 1783 } |
| 1686 // Emit a nop in the branch delay slot if required. | 1784 // Emit a nop in the branch delay slot if required. |
| 1687 if (bdslot == PROTECT) | 1785 if (bdslot == PROTECT) |
| 1688 nop(); | 1786 nop(); |
| 1689 } | 1787 } |
| 1690 | 1788 |
| 1691 | 1789 |
| 1692 // Note: To call gcc-compiled C code on mips, you must call thru t9. | 1790 // Note: To call gcc-compiled C code on mips, you must call thru t9. |
| 1693 void MacroAssembler::Call(const Operand& target, | 1791 void MacroAssembler::Call(const Operand& target, |
| 1694 Condition cond, Register rs, const Operand& rt, | 1792 Condition cond, Register rs, const Operand& rt, |
| 1695 BranchDelaySlot bdslot) { | 1793 BranchDelaySlot bdslot) { |
| 1696 BlockTrampolinePoolScope block_trampoline_pool(this); | 1794 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1697 BRANCH_ARGS_CHECK(cond, rs, rt); | 1795 BRANCH_ARGS_CHECK(cond, rs, rt); |
| 1698 if (target.is_reg()) { | 1796 if (target.is_reg()) { |
| 1699 if (cond == cc_always) { | 1797 if (cond == cc_always) { |
| 1700 jalr(target.rm()); | 1798 jalr(target.rm()); |
| 1701 } else { | 1799 } else { |
| 1702 Branch(2, NegateCondition(cond), rs, rt); | 1800 Branch(2, NegateCondition(cond), rs, rt); |
| 1703 jalr(target.rm()); | 1801 jalr(target.rm()); |
| 1704 } | 1802 } |
| 1705 } else { // !target.is_reg() | 1803 } else { // !target.is_reg(). |
| 1706 if (!MustUseReg(target.rmode_)) { | 1804 if (!MustUseReg(target.rmode_)) { |
| 1707 if (cond == cc_always) { | 1805 if (cond == cc_always) { |
| 1708 jal(target.imm32_); | 1806 jal(target.imm32_); |
| 1709 } else { | 1807 } else { |
| 1710 Branch(2, NegateCondition(cond), rs, rt); | 1808 Branch(2, NegateCondition(cond), rs, rt); |
| 1711 jal(target.imm32_); // Will generate only one instruction. | 1809 jal(target.imm32_); // Will generate only one instruction. |
| 1712 } | 1810 } |
| 1713 } else { // MustUseReg(target) | 1811 } else { // MustUseReg(target) |
| 1714 li(t9, target); | 1812 li(t9, target); |
| 1715 if (cond == cc_always) { | 1813 if (cond == cc_always) { |
| 1716 jalr(t9); | 1814 jalr(t9); |
| 1717 } else { | 1815 } else { |
| 1718 Branch(2, NegateCondition(cond), rs, rt); | 1816 Branch(2, NegateCondition(cond), rs, rt); |
| 1719 jalr(t9); // Will generate only one instruction. | 1817 jalr(t9); // Will generate only one instruction. |
| 1720 } | 1818 } |
| 1721 } | 1819 } |
| 1722 } | 1820 } |
| 1723 // Emit a nop in the branch delay slot if required. | 1821 // Emit a nop in the branch delay slot if required. |
| 1724 if (bdslot == PROTECT) | 1822 if (bdslot == PROTECT) |
| 1725 nop(); | 1823 nop(); |
| 1726 } | 1824 } |
| 1727 | 1825 |
| 1728 | 1826 |
| 1827 void MacroAssembler::CallWithAstId(Handle<Code> code, |
| 1828 RelocInfo::Mode rmode, |
| 1829 unsigned ast_id, |
| 1830 Condition cond, |
| 1831 Register r1, |
| 1832 const Operand& r2) { |
| 1833 ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID); |
| 1834 ASSERT(ast_id != kNoASTId); |
| 1835 ASSERT(ast_id_for_reloc_info_ == kNoASTId); |
| 1836 ast_id_for_reloc_info_ = ast_id; |
| 1837 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2); |
| 1838 } |
| 1839 |
| 1840 |
| 1729 void MacroAssembler::Drop(int count, | 1841 void MacroAssembler::Drop(int count, |
| 1730 Condition cond, | 1842 Condition cond, |
| 1731 Register reg, | 1843 Register reg, |
| 1732 const Operand& op) { | 1844 const Operand& op) { |
| 1733 if (count <= 0) { | 1845 if (count <= 0) { |
| 1734 return; | 1846 return; |
| 1735 } | 1847 } |
| 1736 | 1848 |
| 1737 Label skip; | 1849 Label skip; |
| 1738 | 1850 |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1805 mov(a0, zero_reg); | 1917 mov(a0, zero_reg); |
| 1806 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); | 1918 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); |
| 1807 CEntryStub ces(1); | 1919 CEntryStub ces(1); |
| 1808 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); | 1920 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); |
| 1809 } | 1921 } |
| 1810 | 1922 |
| 1811 #endif // ENABLE_DEBUGGER_SUPPORT | 1923 #endif // ENABLE_DEBUGGER_SUPPORT |
| 1812 | 1924 |
| 1813 | 1925 |
| 1814 // --------------------------------------------------------------------------- | 1926 // --------------------------------------------------------------------------- |
| 1815 // Exception handling | 1927 // Exception handling. |
| 1816 | 1928 |
| 1817 void MacroAssembler::PushTryHandler(CodeLocation try_location, | 1929 void MacroAssembler::PushTryHandler(CodeLocation try_location, |
| 1818 HandlerType type) { | 1930 HandlerType type) { |
| 1819 // Adjust this code if not the case. | 1931 // Adjust this code if not the case. |
| 1820 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | 1932 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
| 1821 // The return address is passed in register ra. | 1933 // The return address is passed in register ra. |
| 1822 if (try_location == IN_JAVASCRIPT) { | 1934 if (try_location == IN_JAVASCRIPT) { |
| 1823 if (type == TRY_CATCH_HANDLER) { | 1935 if (type == TRY_CATCH_HANDLER) { |
| 1824 li(t0, Operand(StackHandler::TRY_CATCH)); | 1936 li(t0, Operand(StackHandler::TRY_CATCH)); |
| 1825 } else { | 1937 } else { |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1873 | 1985 |
| 1874 void MacroAssembler::PopTryHandler() { | 1986 void MacroAssembler::PopTryHandler() { |
| 1875 ASSERT_EQ(0, StackHandlerConstants::kNextOffset); | 1987 ASSERT_EQ(0, StackHandlerConstants::kNextOffset); |
| 1876 pop(a1); | 1988 pop(a1); |
| 1877 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); | 1989 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); |
| 1878 li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); | 1990 li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); |
| 1879 sw(a1, MemOperand(at)); | 1991 sw(a1, MemOperand(at)); |
| 1880 } | 1992 } |
| 1881 | 1993 |
| 1882 | 1994 |
| 1995 void MacroAssembler::Throw(Register value) { |
| 1996 // v0 is expected to hold the exception. |
| 1997 Move(v0, value); |
| 1998 |
| 1999 // Adjust this code if not the case. |
| 2000 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
| 2001 |
| 2002 // Drop the sp to the top of the handler. |
| 2003 li(a3, Operand(ExternalReference(Isolate::k_handler_address, |
| 2004 isolate()))); |
| 2005 lw(sp, MemOperand(a3)); |
| 2006 |
| 2007 // Restore the next handler and frame pointer, discard handler state. |
| 2008 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 2009 pop(a2); |
| 2010 sw(a2, MemOperand(a3)); |
| 2011 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); |
| 2012 MultiPop(a3.bit() | fp.bit()); |
| 2013 |
| 2014 // Before returning we restore the context from the frame pointer if |
| 2015 // not NULL. The frame pointer is NULL in the exception handler of a |
| 2016 // JS entry frame. |
| 2017 // Set cp to NULL if fp is NULL. |
| 2018 Label done; |
| 2019 Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg)); |
| 2020 mov(cp, zero_reg); // In branch delay slot. |
| 2021 lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 2022 bind(&done); |
| 2023 |
| 2024 #ifdef DEBUG |
| 2025 // When emitting debug_code, set ra as return address for the jump. |
| 2026 // 5 instructions: add: 1, pop: 2, jump: 2. |
| 2027 const int kOffsetRaInstructions = 5; |
| 2028 Label find_ra; |
| 2029 |
| 2030 if (emit_debug_code()) { |
| 2031 // Compute ra for the Jump(t9). |
| 2032 const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize; |
| 2033 |
| 2034 // This branch-and-link sequence is needed to get the current PC on mips, |
| 2035 // saved to the ra register. Then adjusted for instruction count. |
| 2036 bal(&find_ra); // bal exposes branch-delay. |
| 2037 nop(); // Branch delay slot nop. |
| 2038 bind(&find_ra); |
| 2039 addiu(ra, ra, kOffsetRaBytes); |
| 2040 } |
| 2041 #endif |
| 2042 |
| 2043 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); |
| 2044 pop(t9); // 2 instructions: lw, add sp. |
| 2045 Jump(t9); // 2 instructions: jr, nop (in delay slot). |
| 2046 |
| 2047 if (emit_debug_code()) { |
| 2048 // Make sure that the expected number of instructions were generated. |
| 2049 ASSERT_EQ(kOffsetRaInstructions, |
| 2050 InstructionsGeneratedSince(&find_ra)); |
| 2051 } |
| 2052 } |
| 2053 |
| 2054 |
| 2055 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, |
| 2056 Register value) { |
| 2057 // Adjust this code if not the case. |
| 2058 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
| 2059 |
| 2060 // v0 is expected to hold the exception. |
| 2061 Move(v0, value); |
| 2062 |
| 2063 // Drop sp to the top stack handler. |
| 2064 li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); |
| 2065 lw(sp, MemOperand(a3)); |
| 2066 |
| 2067 // Unwind the handlers until the ENTRY handler is found. |
| 2068 Label loop, done; |
| 2069 bind(&loop); |
| 2070 // Load the type of the current stack handler. |
| 2071 const int kStateOffset = StackHandlerConstants::kStateOffset; |
| 2072 lw(a2, MemOperand(sp, kStateOffset)); |
| 2073 Branch(&done, eq, a2, Operand(StackHandler::ENTRY)); |
| 2074 // Fetch the next handler in the list. |
| 2075 const int kNextOffset = StackHandlerConstants::kNextOffset; |
| 2076 lw(sp, MemOperand(sp, kNextOffset)); |
| 2077 jmp(&loop); |
| 2078 bind(&done); |
| 2079 |
| 2080 // Set the top handler address to next handler past the current ENTRY handler. |
| 2081 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 2082 pop(a2); |
| 2083 sw(a2, MemOperand(a3)); |
| 2084 |
| 2085 if (type == OUT_OF_MEMORY) { |
| 2086 // Set external caught exception to false. |
| 2087 ExternalReference external_caught( |
| 2088 Isolate::k_external_caught_exception_address, isolate()); |
| 2089 li(a0, Operand(false, RelocInfo::NONE)); |
| 2090 li(a2, Operand(external_caught)); |
| 2091 sw(a0, MemOperand(a2)); |
| 2092 |
| 2093 // Set pending exception and v0 to out of memory exception. |
| 2094 Failure* out_of_memory = Failure::OutOfMemoryException(); |
| 2095 li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); |
| 2096 li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address, |
| 2097 isolate()))); |
| 2098 sw(v0, MemOperand(a2)); |
| 2099 } |
| 2100 |
| 2101 // Stack layout at this point. See also StackHandlerConstants. |
| 2102 // sp -> state (ENTRY) |
| 2103 // fp |
| 2104 // ra |
| 2105 |
| 2106 // Discard handler state (a2 is not used) and restore frame pointer. |
| 2107 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); |
| 2108 MultiPop(a2.bit() | fp.bit()); // a2: discarded state. |
| 2109 // Before returning we restore the context from the frame pointer if |
| 2110 // not NULL. The frame pointer is NULL in the exception handler of a |
| 2111 // JS entry frame. |
| 2112 Label cp_null; |
| 2113 Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg)); |
| 2114 mov(cp, zero_reg); // In the branch delay slot. |
| 2115 lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 2116 bind(&cp_null); |
| 2117 |
| 2118 #ifdef DEBUG |
| 2119 // When emitting debug_code, set ra as return address for the jump. |
| 2120 // 5 instructions: add: 1, pop: 2, jump: 2. |
| 2121 const int kOffsetRaInstructions = 5; |
| 2122 Label find_ra; |
| 2123 |
| 2124 if (emit_debug_code()) { |
| 2125 // Compute ra for the Jump(t9). |
| 2126 const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize; |
| 2127 |
| 2128 // This branch-and-link sequence is needed to get the current PC on mips, |
| 2129 // saved to the ra register. Then adjusted for instruction count. |
| 2130 bal(&find_ra); // bal exposes branch-delay slot. |
| 2131 nop(); // Branch delay slot nop. |
| 2132 bind(&find_ra); |
| 2133 addiu(ra, ra, kOffsetRaBytes); |
| 2134 } |
| 2135 #endif |
| 2136 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); |
| 2137 pop(t9); // 2 instructions: lw, add sp. |
| 2138 Jump(t9); // 2 instructions: jr, nop (in delay slot). |
| 2139 |
| 2140 if (emit_debug_code()) { |
| 2141 // Make sure that the expected number of instructions were generated. |
| 2142 ASSERT_EQ(kOffsetRaInstructions, |
| 2143 InstructionsGeneratedSince(&find_ra)); |
| 2144 } |
| 2145 } |
| 2146 |
| 2147 |
| 1883 void MacroAssembler::AllocateInNewSpace(int object_size, | 2148 void MacroAssembler::AllocateInNewSpace(int object_size, |
| 1884 Register result, | 2149 Register result, |
| 1885 Register scratch1, | 2150 Register scratch1, |
| 1886 Register scratch2, | 2151 Register scratch2, |
| 1887 Label* gc_required, | 2152 Label* gc_required, |
| 1888 AllocationFlags flags) { | 2153 AllocationFlags flags) { |
| 1889 if (!FLAG_inline_new) { | 2154 if (!FLAG_inline_new) { |
| 1890 if (FLAG_debug_code) { | 2155 if (emit_debug_code()) { |
| 1891 // Trash the registers to simulate an allocation failure. | 2156 // Trash the registers to simulate an allocation failure. |
| 1892 li(result, 0x7091); | 2157 li(result, 0x7091); |
| 1893 li(scratch1, 0x7191); | 2158 li(scratch1, 0x7191); |
| 1894 li(scratch2, 0x7291); | 2159 li(scratch2, 0x7291); |
| 1895 } | 2160 } |
| 1896 jmp(gc_required); | 2161 jmp(gc_required); |
| 1897 return; | 2162 return; |
| 1898 } | 2163 } |
| 1899 | 2164 |
| 1900 ASSERT(!result.is(scratch1)); | 2165 ASSERT(!result.is(scratch1)); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 1928 Register obj_size_reg = scratch2; | 2193 Register obj_size_reg = scratch2; |
| 1929 li(topaddr, Operand(new_space_allocation_top)); | 2194 li(topaddr, Operand(new_space_allocation_top)); |
| 1930 li(obj_size_reg, Operand(object_size)); | 2195 li(obj_size_reg, Operand(object_size)); |
| 1931 | 2196 |
| 1932 // This code stores a temporary value in t9. | 2197 // This code stores a temporary value in t9. |
| 1933 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 2198 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 1934 // Load allocation top into result and allocation limit into t9. | 2199 // Load allocation top into result and allocation limit into t9. |
| 1935 lw(result, MemOperand(topaddr)); | 2200 lw(result, MemOperand(topaddr)); |
| 1936 lw(t9, MemOperand(topaddr, kPointerSize)); | 2201 lw(t9, MemOperand(topaddr, kPointerSize)); |
| 1937 } else { | 2202 } else { |
| 1938 if (FLAG_debug_code) { | 2203 if (emit_debug_code()) { |
| 1939 // Assert that result actually contains top on entry. t9 is used | 2204 // Assert that result actually contains top on entry. t9 is used |
| 1940 // immediately below so this use of t9 does not cause difference with | 2205 // immediately below so this use of t9 does not cause difference with |
| 1941 // respect to register content between debug and release mode. | 2206 // respect to register content between debug and release mode. |
| 1942 lw(t9, MemOperand(topaddr)); | 2207 lw(t9, MemOperand(topaddr)); |
| 1943 Check(eq, "Unexpected allocation top", result, Operand(t9)); | 2208 Check(eq, "Unexpected allocation top", result, Operand(t9)); |
| 1944 } | 2209 } |
| 1945 // Load allocation limit into t9. Result already contains allocation top. | 2210 // Load allocation limit into t9. Result already contains allocation top. |
| 1946 lw(t9, MemOperand(topaddr, limit - top)); | 2211 lw(t9, MemOperand(topaddr, limit - top)); |
| 1947 } | 2212 } |
| 1948 | 2213 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1959 } | 2224 } |
| 1960 | 2225 |
| 1961 | 2226 |
| 1962 void MacroAssembler::AllocateInNewSpace(Register object_size, | 2227 void MacroAssembler::AllocateInNewSpace(Register object_size, |
| 1963 Register result, | 2228 Register result, |
| 1964 Register scratch1, | 2229 Register scratch1, |
| 1965 Register scratch2, | 2230 Register scratch2, |
| 1966 Label* gc_required, | 2231 Label* gc_required, |
| 1967 AllocationFlags flags) { | 2232 AllocationFlags flags) { |
| 1968 if (!FLAG_inline_new) { | 2233 if (!FLAG_inline_new) { |
| 1969 if (FLAG_debug_code) { | 2234 if (emit_debug_code()) { |
| 1970 // Trash the registers to simulate an allocation failure. | 2235 // Trash the registers to simulate an allocation failure. |
| 1971 li(result, 0x7091); | 2236 li(result, 0x7091); |
| 1972 li(scratch1, 0x7191); | 2237 li(scratch1, 0x7191); |
| 1973 li(scratch2, 0x7291); | 2238 li(scratch2, 0x7291); |
| 1974 } | 2239 } |
| 1975 jmp(gc_required); | 2240 jmp(gc_required); |
| 1976 return; | 2241 return; |
| 1977 } | 2242 } |
| 1978 | 2243 |
| 1979 ASSERT(!result.is(scratch1)); | 2244 ASSERT(!result.is(scratch1)); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 1997 // Set up allocation top address and object size registers. | 2262 // Set up allocation top address and object size registers. |
| 1998 Register topaddr = scratch1; | 2263 Register topaddr = scratch1; |
| 1999 li(topaddr, Operand(new_space_allocation_top)); | 2264 li(topaddr, Operand(new_space_allocation_top)); |
| 2000 | 2265 |
| 2001 // This code stores a temporary value in t9. | 2266 // This code stores a temporary value in t9. |
| 2002 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 2267 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 2003 // Load allocation top into result and allocation limit into t9. | 2268 // Load allocation top into result and allocation limit into t9. |
| 2004 lw(result, MemOperand(topaddr)); | 2269 lw(result, MemOperand(topaddr)); |
| 2005 lw(t9, MemOperand(topaddr, kPointerSize)); | 2270 lw(t9, MemOperand(topaddr, kPointerSize)); |
| 2006 } else { | 2271 } else { |
| 2007 if (FLAG_debug_code) { | 2272 if (emit_debug_code()) { |
| 2008 // Assert that result actually contains top on entry. t9 is used | 2273 // Assert that result actually contains top on entry. t9 is used |
| 2009 // immediately below so this use of t9 does not cause difference with | 2274 // immediately below so this use of t9 does not cause difference with |
| 2010 // respect to register content between debug and release mode. | 2275 // respect to register content between debug and release mode. |
| 2011 lw(t9, MemOperand(topaddr)); | 2276 lw(t9, MemOperand(topaddr)); |
| 2012 Check(eq, "Unexpected allocation top", result, Operand(t9)); | 2277 Check(eq, "Unexpected allocation top", result, Operand(t9)); |
| 2013 } | 2278 } |
| 2014 // Load allocation limit into t9. Result already contains allocation top. | 2279 // Load allocation limit into t9. Result already contains allocation top. |
| 2015 lw(t9, MemOperand(topaddr, limit - top)); | 2280 lw(t9, MemOperand(topaddr, limit - top)); |
| 2016 } | 2281 } |
| 2017 | 2282 |
| 2018 // Calculate new top and bail out if new space is exhausted. Use result | 2283 // Calculate new top and bail out if new space is exhausted. Use result |
| 2019 // to calculate the new top. Object size may be in words so a shift is | 2284 // to calculate the new top. Object size may be in words so a shift is |
| 2020 // required to get the number of bytes. | 2285 // required to get the number of bytes. |
| 2021 if ((flags & SIZE_IN_WORDS) != 0) { | 2286 if ((flags & SIZE_IN_WORDS) != 0) { |
| 2022 sll(scratch2, object_size, kPointerSizeLog2); | 2287 sll(scratch2, object_size, kPointerSizeLog2); |
| 2023 Addu(scratch2, result, scratch2); | 2288 Addu(scratch2, result, scratch2); |
| 2024 } else { | 2289 } else { |
| 2025 Addu(scratch2, result, Operand(object_size)); | 2290 Addu(scratch2, result, Operand(object_size)); |
| 2026 } | 2291 } |
| 2027 Branch(gc_required, Ugreater, scratch2, Operand(t9)); | 2292 Branch(gc_required, Ugreater, scratch2, Operand(t9)); |
| 2028 | 2293 |
| 2029 // Update allocation top. result temporarily holds the new top. | 2294 // Update allocation top. result temporarily holds the new top. |
| 2030 if (FLAG_debug_code) { | 2295 if (emit_debug_code()) { |
| 2031 And(t9, scratch2, Operand(kObjectAlignmentMask)); | 2296 And(t9, scratch2, Operand(kObjectAlignmentMask)); |
| 2032 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg)); | 2297 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg)); |
| 2033 } | 2298 } |
| 2034 sw(scratch2, MemOperand(topaddr)); | 2299 sw(scratch2, MemOperand(topaddr)); |
| 2035 | 2300 |
| 2036 // Tag object if requested. | 2301 // Tag object if requested. |
| 2037 if ((flags & TAG_OBJECT) != 0) { | 2302 if ((flags & TAG_OBJECT) != 0) { |
| 2038 Addu(result, result, Operand(kHeapObjectTag)); | 2303 Addu(result, result, Operand(kHeapObjectTag)); |
| 2039 } | 2304 } |
| 2040 } | 2305 } |
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2211 } | 2476 } |
| 2212 ASSERT(!tmp.is(no_reg)); | 2477 ASSERT(!tmp.is(no_reg)); |
| 2213 | 2478 |
| 2214 for (int i = 0; i < field_count; i++) { | 2479 for (int i = 0; i < field_count; i++) { |
| 2215 lw(tmp, FieldMemOperand(src, i * kPointerSize)); | 2480 lw(tmp, FieldMemOperand(src, i * kPointerSize)); |
| 2216 sw(tmp, FieldMemOperand(dst, i * kPointerSize)); | 2481 sw(tmp, FieldMemOperand(dst, i * kPointerSize)); |
| 2217 } | 2482 } |
| 2218 } | 2483 } |
| 2219 | 2484 |
| 2220 | 2485 |
| 2486 void MacroAssembler::CopyBytes(Register src, |
| 2487 Register dst, |
| 2488 Register length, |
| 2489 Register scratch) { |
| 2490 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; |
| 2491 |
| 2492 // Align src before copying in word size chunks. |
| 2493 bind(&align_loop); |
| 2494 Branch(&done, eq, length, Operand(zero_reg)); |
| 2495 bind(&align_loop_1); |
| 2496 And(scratch, src, kPointerSize - 1); |
| 2497 Branch(&word_loop, eq, scratch, Operand(zero_reg)); |
| 2498 lbu(scratch, MemOperand(src)); |
| 2499 Addu(src, src, 1); |
| 2500 sb(scratch, MemOperand(dst)); |
| 2501 Addu(dst, dst, 1); |
| 2502 Subu(length, length, Operand(1)); |
| 2503 Branch(&byte_loop_1, ne, length, Operand(zero_reg)); |
| 2504 |
| 2505 // Copy bytes in word size chunks. |
| 2506 bind(&word_loop); |
| 2507 if (emit_debug_code()) { |
| 2508 And(scratch, src, kPointerSize - 1); |
| 2509 Assert(eq, "Expecting alignment for CopyBytes", |
| 2510 scratch, Operand(zero_reg)); |
| 2511 } |
| 2512 Branch(&byte_loop, lt, length, Operand(kPointerSize)); |
| 2513 lw(scratch, MemOperand(src)); |
| 2514 Addu(src, src, kPointerSize); |
| 2515 |
| 2516 // TODO(kalmard) check if this can be optimized to use sw in most cases. |
| 2517 // Can't use unaligned access - copy byte by byte. |
| 2518 sb(scratch, MemOperand(dst, 0)); |
| 2519 srl(scratch, scratch, 8); |
| 2520 sb(scratch, MemOperand(dst, 1)); |
| 2521 srl(scratch, scratch, 8); |
| 2522 sb(scratch, MemOperand(dst, 2)); |
| 2523 srl(scratch, scratch, 8); |
| 2524 sb(scratch, MemOperand(dst, 3)); |
| 2525 Addu(dst, dst, 4); |
| 2526 |
| 2527 Subu(length, length, Operand(kPointerSize)); |
| 2528 Branch(&word_loop); |
| 2529 |
| 2530 // Copy the last bytes if any left. |
| 2531 bind(&byte_loop); |
| 2532 Branch(&done, eq, length, Operand(zero_reg)); |
| 2533 bind(&byte_loop_1); |
| 2534 lbu(scratch, MemOperand(src)); |
| 2535 Addu(src, src, 1); |
| 2536 sb(scratch, MemOperand(dst)); |
| 2537 Addu(dst, dst, 1); |
| 2538 Subu(length, length, Operand(1)); |
| 2539 Branch(&byte_loop_1, ne, length, Operand(zero_reg)); |
| 2540 bind(&done); |
| 2541 } |
| 2542 |
| 2543 |
| 2221 void MacroAssembler::CheckMap(Register obj, | 2544 void MacroAssembler::CheckMap(Register obj, |
| 2222 Register scratch, | 2545 Register scratch, |
| 2223 Handle<Map> map, | 2546 Handle<Map> map, |
| 2224 Label* fail, | 2547 Label* fail, |
| 2225 bool is_heap_object) { | 2548 bool is_heap_object) { |
| 2226 if (!is_heap_object) { | 2549 if (!is_heap_object) { |
| 2227 JumpIfSmi(obj, fail); | 2550 JumpIfSmi(obj, fail); |
| 2228 } | 2551 } |
| 2229 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2552 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 2230 li(at, Operand(map)); | 2553 li(at, Operand(map)); |
| 2231 Branch(fail, ne, scratch, Operand(at)); | 2554 Branch(fail, ne, scratch, Operand(at)); |
| 2232 } | 2555 } |
| 2233 | 2556 |
| 2234 | 2557 |
| 2235 void MacroAssembler::CheckMap(Register obj, | 2558 void MacroAssembler::CheckMap(Register obj, |
| 2236 Register scratch, | 2559 Register scratch, |
| 2237 Heap::RootListIndex index, | 2560 Heap::RootListIndex index, |
| 2238 Label* fail, | 2561 Label* fail, |
| 2239 bool is_heap_object) { | 2562 bool is_heap_object) { |
| 2240 if (!is_heap_object) { | 2563 if (!is_heap_object) { |
| 2241 JumpIfSmi(obj, fail); | 2564 JumpIfSmi(obj, fail); |
| 2242 } | 2565 } |
| 2243 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2566 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 2244 LoadRoot(at, index); | 2567 LoadRoot(at, index); |
| 2245 Branch(fail, ne, scratch, Operand(at)); | 2568 Branch(fail, ne, scratch, Operand(at)); |
| 2246 } | 2569 } |
| 2247 | 2570 |
| 2248 | 2571 |
| 2572 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { |
| 2573 if (IsMipsSoftFloatABI) { |
| 2574 mtc1(v0, dst); |
| 2575 mtc1(v1, FPURegister::from_code(dst.code() + 1)); |
| 2576 } else { |
| 2577 if (!dst.is(f0)) { |
| 2578 mov_d(dst, f0); // Reg f0 is o32 ABI FP return value. |
| 2579 } |
| 2580 } |
| 2581 } |
| 2582 |
| 2583 |
| 2249 // ----------------------------------------------------------------------------- | 2584 // ----------------------------------------------------------------------------- |
| 2250 // JavaScript invokes | 2585 // JavaScript invokes. |
| 2251 | 2586 |
| 2252 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | 2587 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
| 2253 const ParameterCount& actual, | 2588 const ParameterCount& actual, |
| 2254 Handle<Code> code_constant, | 2589 Handle<Code> code_constant, |
| 2255 Register code_reg, | 2590 Register code_reg, |
| 2256 Label* done, | 2591 Label* done, |
| 2257 InvokeFlag flag, | 2592 InvokeFlag flag, |
| 2258 const CallWrapper& call_wrapper) { | 2593 const CallWrapper& call_wrapper) { |
| 2259 bool definitely_matches = false; | 2594 bool definitely_matches = false; |
| 2260 Label regular_invoke; | 2595 Label regular_invoke; |
| (...skipping 22 matching lines...) Expand all Loading... |
| 2283 if (expected.immediate() == sentinel) { | 2618 if (expected.immediate() == sentinel) { |
| 2284 // Don't worry about adapting arguments for builtins that | 2619 // Don't worry about adapting arguments for builtins that |
| 2285 // don't want that done. Skip adaption code by making it look | 2620 // don't want that done. Skip adaption code by making it look |
| 2286 // like we have a match between expected and actual number of | 2621 // like we have a match between expected and actual number of |
| 2287 // arguments. | 2622 // arguments. |
| 2288 definitely_matches = true; | 2623 definitely_matches = true; |
| 2289 } else { | 2624 } else { |
| 2290 li(a2, Operand(expected.immediate())); | 2625 li(a2, Operand(expected.immediate())); |
| 2291 } | 2626 } |
| 2292 } | 2627 } |
| 2628 } else if (actual.is_immediate()) { |
| 2629 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate())); |
| 2630 li(a0, Operand(actual.immediate())); |
| 2293 } else { | 2631 } else { |
| 2294 if (actual.is_immediate()) { | 2632 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg())); |
| 2295 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate())); | |
| 2296 li(a0, Operand(actual.immediate())); | |
| 2297 } else { | |
| 2298 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg())); | |
| 2299 } | |
| 2300 } | 2633 } |
| 2301 | 2634 |
| 2302 if (!definitely_matches) { | 2635 if (!definitely_matches) { |
| 2303 if (!code_constant.is_null()) { | 2636 if (!code_constant.is_null()) { |
| 2304 li(a3, Operand(code_constant)); | 2637 li(a3, Operand(code_constant)); |
| 2305 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); | 2638 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); |
| 2306 } | 2639 } |
| 2307 | 2640 |
| 2308 Handle<Code> adaptor = | 2641 Handle<Code> adaptor = |
| 2309 isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 2642 isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| (...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2484 | 2817 |
| 2485 void MacroAssembler::GetObjectType(Register object, | 2818 void MacroAssembler::GetObjectType(Register object, |
| 2486 Register map, | 2819 Register map, |
| 2487 Register type_reg) { | 2820 Register type_reg) { |
| 2488 lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 2821 lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2489 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 2822 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 2490 } | 2823 } |
| 2491 | 2824 |
| 2492 | 2825 |
| 2493 // ----------------------------------------------------------------------------- | 2826 // ----------------------------------------------------------------------------- |
| 2494 // Runtime calls | 2827 // Runtime calls. |
| 2495 | 2828 |
| 2496 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, | 2829 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, |
| 2497 Register r1, const Operand& r2) { | 2830 Register r1, const Operand& r2) { |
| 2498 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. | 2831 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. |
| 2499 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); | 2832 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); |
| 2500 } | 2833 } |
| 2501 | 2834 |
| 2502 | 2835 |
| 2836 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond, |
| 2837 Register r1, const Operand& r2) { |
| 2838 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. |
| 2839 Object* result; |
| 2840 { MaybeObject* maybe_result = stub->TryGetCode(); |
| 2841 if (!maybe_result->ToObject(&result)) return maybe_result; |
| 2842 } |
| 2843 Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2); |
| 2844 return result; |
| 2845 } |
| 2846 |
| 2847 |
| 2848 |
| 2503 void MacroAssembler::TailCallStub(CodeStub* stub) { | 2849 void MacroAssembler::TailCallStub(CodeStub* stub) { |
| 2504 ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs | 2850 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. |
| 2505 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); | 2851 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); |
| 2506 } | 2852 } |
| 2507 | 2853 |
| 2854 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, |
| 2855 Condition cond, |
| 2856 Register r1, |
| 2857 const Operand& r2) { |
| 2858 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. |
| 2859 Object* result; |
| 2860 { MaybeObject* maybe_result = stub->TryGetCode(); |
| 2861 if (!maybe_result->ToObject(&result)) return maybe_result; |
| 2862 } |
| 2863 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); |
| 2864 return result; |
| 2865 } |
| 2866 |
| 2867 |
| 2868 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { |
| 2869 return ref0.address() - ref1.address(); |
| 2870 } |
| 2871 |
| 2872 |
| 2873 MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( |
| 2874 ExternalReference function, int stack_space) { |
| 2875 ExternalReference next_address = |
| 2876 ExternalReference::handle_scope_next_address(); |
| 2877 const int kNextOffset = 0; |
| 2878 const int kLimitOffset = AddressOffset( |
| 2879 ExternalReference::handle_scope_limit_address(), |
| 2880 next_address); |
| 2881 const int kLevelOffset = AddressOffset( |
| 2882 ExternalReference::handle_scope_level_address(), |
| 2883 next_address); |
| 2884 |
| 2885 // Allocate HandleScope in callee-save registers. |
| 2886 li(s3, Operand(next_address)); |
| 2887 lw(s0, MemOperand(s3, kNextOffset)); |
| 2888 lw(s1, MemOperand(s3, kLimitOffset)); |
| 2889 lw(s2, MemOperand(s3, kLevelOffset)); |
| 2890 Addu(s2, s2, Operand(1)); |
| 2891 sw(s2, MemOperand(s3, kLevelOffset)); |
| 2892 |
| 2893 // The O32 ABI requires us to pass a pointer in a0 where the returned struct |
| 2894 // (4 bytes) will be placed. This is also built into the Simulator. |
| 2895 // Set up the pointer to the returned value (a0). It was allocated in |
| 2896 // EnterExitFrame. |
| 2897 addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset); |
| 2898 |
| 2899 // Native call returns to the DirectCEntry stub which redirects to the |
| 2900 // return address pushed on stack (could have moved after GC). |
| 2901 // DirectCEntry stub itself is generated early and never moves. |
| 2902 DirectCEntryStub stub; |
| 2903 stub.GenerateCall(this, function); |
| 2904 |
| 2905 // As mentioned above, on MIPS a pointer is returned - we need to dereference |
| 2906 // it to get the actual return value (which is also a pointer). |
| 2907 lw(v0, MemOperand(v0)); |
| 2908 |
| 2909 Label promote_scheduled_exception; |
| 2910 Label delete_allocated_handles; |
| 2911 Label leave_exit_frame; |
| 2912 |
| 2913 // If result is non-zero, dereference to get the result value |
| 2914 // otherwise set it to undefined. |
| 2915 Label skip; |
| 2916 LoadRoot(a0, Heap::kUndefinedValueRootIndex); |
| 2917 Branch(&skip, eq, v0, Operand(zero_reg)); |
| 2918 lw(a0, MemOperand(v0)); |
| 2919 bind(&skip); |
| 2920 mov(v0, a0); |
| 2921 |
| 2922 // No more valid handles (the result handle was the last one). Restore |
| 2923 // previous handle scope. |
| 2924 sw(s0, MemOperand(s3, kNextOffset)); |
| 2925 if (emit_debug_code()) { |
| 2926 lw(a1, MemOperand(s3, kLevelOffset)); |
| 2927 Check(eq, "Unexpected level after return from api call", a1, Operand(s2)); |
| 2928 } |
| 2929 Subu(s2, s2, Operand(1)); |
| 2930 sw(s2, MemOperand(s3, kLevelOffset)); |
| 2931 lw(at, MemOperand(s3, kLimitOffset)); |
| 2932 Branch(&delete_allocated_handles, ne, s1, Operand(at)); |
| 2933 |
| 2934 // Check if the function scheduled an exception. |
| 2935 bind(&leave_exit_frame); |
| 2936 LoadRoot(t0, Heap::kTheHoleValueRootIndex); |
| 2937 li(at, Operand(ExternalReference::scheduled_exception_address(isolate()))); |
| 2938 lw(t1, MemOperand(at)); |
| 2939 Branch(&promote_scheduled_exception, ne, t0, Operand(t1)); |
| 2940 li(s0, Operand(stack_space)); |
| 2941 LeaveExitFrame(false, s0); |
| 2942 Ret(); |
| 2943 |
| 2944 bind(&promote_scheduled_exception); |
| 2945 MaybeObject* result = TryTailCallExternalReference( |
| 2946 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1); |
| 2947 if (result->IsFailure()) { |
| 2948 return result; |
| 2949 } |
| 2950 |
| 2951 // HandleScope limit has changed. Delete allocated extensions. |
| 2952 bind(&delete_allocated_handles); |
| 2953 sw(s1, MemOperand(s3, kLimitOffset)); |
| 2954 mov(s0, v0); |
| 2955 mov(a0, v0); |
| 2956 PrepareCallCFunction(1, s1); |
| 2957 li(a0, Operand(ExternalReference::isolate_address())); |
| 2958 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()), |
| 2959 1); |
| 2960 mov(v0, s0); |
| 2961 jmp(&leave_exit_frame); |
| 2962 |
| 2963 return result; |
| 2964 } |
| 2965 |
| 2508 | 2966 |
| 2509 void MacroAssembler::IllegalOperation(int num_arguments) { | 2967 void MacroAssembler::IllegalOperation(int num_arguments) { |
| 2510 if (num_arguments > 0) { | 2968 if (num_arguments > 0) { |
| 2511 addiu(sp, sp, num_arguments * kPointerSize); | 2969 addiu(sp, sp, num_arguments * kPointerSize); |
| 2512 } | 2970 } |
| 2513 LoadRoot(v0, Heap::kUndefinedValueRootIndex); | 2971 LoadRoot(v0, Heap::kUndefinedValueRootIndex); |
| 2514 } | 2972 } |
| 2515 | 2973 |
| 2516 | 2974 |
| 2517 void MacroAssembler::IndexFromHash(Register hash, | 2975 void MacroAssembler::IndexFromHash(Register hash, |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2560 li(mask_reg, HeapNumber::kExponentMask); | 3018 li(mask_reg, HeapNumber::kExponentMask); |
| 2561 | 3019 |
| 2562 And(exponent, exponent, mask_reg); | 3020 And(exponent, exponent, mask_reg); |
| 2563 Branch(not_number, eq, exponent, Operand(mask_reg)); | 3021 Branch(not_number, eq, exponent, Operand(mask_reg)); |
| 2564 } | 3022 } |
| 2565 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); | 3023 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 2566 bind(&done); | 3024 bind(&done); |
| 2567 } | 3025 } |
| 2568 | 3026 |
| 2569 | 3027 |
| 2570 | |
| 2571 void MacroAssembler::SmiToDoubleFPURegister(Register smi, | 3028 void MacroAssembler::SmiToDoubleFPURegister(Register smi, |
| 2572 FPURegister value, | 3029 FPURegister value, |
| 2573 Register scratch1) { | 3030 Register scratch1) { |
| 2574 sra(scratch1, smi, kSmiTagSize); | 3031 sra(scratch1, smi, kSmiTagSize); |
| 2575 mtc1(scratch1, value); | 3032 mtc1(scratch1, value); |
| 2576 cvt_d_w(value, value); | 3033 cvt_d_w(value, value); |
| 2577 } | 3034 } |
| 2578 | 3035 |
| 2579 | 3036 |
| 3037 void MacroAssembler::AdduAndCheckForOverflow(Register dst, |
| 3038 Register left, |
| 3039 Register right, |
| 3040 Register overflow_dst, |
| 3041 Register scratch) { |
| 3042 ASSERT(!dst.is(overflow_dst)); |
| 3043 ASSERT(!dst.is(scratch)); |
| 3044 ASSERT(!overflow_dst.is(scratch)); |
| 3045 ASSERT(!overflow_dst.is(left)); |
| 3046 ASSERT(!overflow_dst.is(right)); |
| 3047 ASSERT(!left.is(right)); |
| 3048 |
| 3049 // TODO(kalmard) There must be a way to optimize dst == left and dst == right |
| 3050 // cases. |
| 3051 |
| 3052 if (dst.is(left)) { |
| 3053 addu(overflow_dst, left, right); |
| 3054 xor_(dst, overflow_dst, left); |
| 3055 xor_(scratch, overflow_dst, right); |
| 3056 and_(scratch, scratch, dst); |
| 3057 mov(dst, overflow_dst); |
| 3058 mov(overflow_dst, scratch); |
| 3059 } else if (dst.is(right)) { |
| 3060 addu(overflow_dst, left, right); |
| 3061 xor_(dst, overflow_dst, right); |
| 3062 xor_(scratch, overflow_dst, left); |
| 3063 and_(scratch, scratch, dst); |
| 3064 mov(dst, overflow_dst); |
| 3065 mov(overflow_dst, scratch); |
| 3066 } else { |
| 3067 addu(dst, left, right); |
| 3068 xor_(overflow_dst, dst, left); |
| 3069 xor_(scratch, dst, right); |
| 3070 and_(overflow_dst, scratch, overflow_dst); |
| 3071 } |
| 3072 } |
| 3073 |
| 3074 |
| 3075 void MacroAssembler::SubuAndCheckForOverflow(Register dst, |
| 3076 Register left, |
| 3077 Register right, |
| 3078 Register overflow_dst, |
| 3079 Register scratch) { |
| 3080 ASSERT(!dst.is(overflow_dst)); |
| 3081 ASSERT(!dst.is(scratch)); |
| 3082 ASSERT(!overflow_dst.is(scratch)); |
| 3083 ASSERT(!overflow_dst.is(left)); |
| 3084 ASSERT(!overflow_dst.is(right)); |
| 3085 ASSERT(!left.is(right)); |
| 3086 ASSERT(!scratch.is(left)); |
| 3087 ASSERT(!scratch.is(right)); |
| 3088 |
| 3089 // TODO(kalmard) There must be a way to optimize dst == left and dst == right |
| 3090 // cases. |
| 3091 |
| 3092 if (dst.is(left)) { |
| 3093 subu(overflow_dst, left, right); |
| 3094 xor_(scratch, overflow_dst, left); |
| 3095 xor_(dst, left, right); |
| 3096 and_(scratch, scratch, dst); |
| 3097 mov(dst, overflow_dst); |
| 3098 mov(overflow_dst, scratch); |
| 3099 } else if (dst.is(right)) { |
| 3100 subu(overflow_dst, left, right); |
| 3101 xor_(dst, left, right); |
| 3102 xor_(scratch, overflow_dst, left); |
| 3103 and_(scratch, scratch, dst); |
| 3104 mov(dst, overflow_dst); |
| 3105 mov(overflow_dst, scratch); |
| 3106 } else { |
| 3107 subu(dst, left, right); |
| 3108 xor_(overflow_dst, dst, left); |
| 3109 xor_(scratch, left, right); |
| 3110 and_(overflow_dst, scratch, overflow_dst); |
| 3111 } |
| 3112 } |
| 3113 |
| 3114 |
| 2580 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 3115 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
| 2581 int num_arguments) { | 3116 int num_arguments) { |
| 2582 // All parameters are on the stack. v0 has the return value after call. | 3117 // All parameters are on the stack. v0 has the return value after call. |
| 2583 | 3118 |
| 2584 // If the expected number of arguments of the runtime function is | 3119 // If the expected number of arguments of the runtime function is |
| 2585 // constant, we check that the actual number of arguments match the | 3120 // constant, we check that the actual number of arguments match the |
| 2586 // expectation. | 3121 // expectation. |
| 2587 if (f->nargs >= 0 && f->nargs != num_arguments) { | 3122 if (f->nargs >= 0 && f->nargs != num_arguments) { |
| 2588 IllegalOperation(num_arguments); | 3123 IllegalOperation(num_arguments); |
| 2589 return; | 3124 return; |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2629 int num_arguments, | 3164 int num_arguments, |
| 2630 int result_size) { | 3165 int result_size) { |
| 2631 // TODO(1236192): Most runtime routines don't need the number of | 3166 // TODO(1236192): Most runtime routines don't need the number of |
| 2632 // arguments passed in because it is constant. At some point we | 3167 // arguments passed in because it is constant. At some point we |
| 2633 // should remove this need and make the runtime routine entry code | 3168 // should remove this need and make the runtime routine entry code |
| 2634 // smarter. | 3169 // smarter. |
| 2635 li(a0, Operand(num_arguments)); | 3170 li(a0, Operand(num_arguments)); |
| 2636 JumpToExternalReference(ext); | 3171 JumpToExternalReference(ext); |
| 2637 } | 3172 } |
| 2638 | 3173 |
| 3174 MaybeObject* MacroAssembler::TryTailCallExternalReference( |
| 3175 const ExternalReference& ext, int num_arguments, int result_size) { |
| 3176 // TODO(1236192): Most runtime routines don't need the number of |
| 3177 // arguments passed in because it is constant. At some point we |
| 3178 // should remove this need and make the runtime routine entry code |
| 3179 // smarter. |
| 3180 li(a0, num_arguments); |
| 3181 return TryJumpToExternalReference(ext); |
| 3182 } |
| 3183 |
| 2639 | 3184 |
| 2640 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, | 3185 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, |
| 2641 int num_arguments, | 3186 int num_arguments, |
| 2642 int result_size) { | 3187 int result_size) { |
| 2643 TailCallExternalReference(ExternalReference(fid, isolate()), | 3188 TailCallExternalReference(ExternalReference(fid, isolate()), |
| 2644 num_arguments, | 3189 num_arguments, |
| 2645 result_size); | 3190 result_size); |
| 2646 } | 3191 } |
| 2647 | 3192 |
| 2648 | 3193 |
| 2649 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { | 3194 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { |
| 2650 li(a1, Operand(builtin)); | 3195 li(a1, Operand(builtin)); |
| 2651 CEntryStub stub(1); | 3196 CEntryStub stub(1); |
| 2652 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 3197 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 2653 } | 3198 } |
| 2654 | 3199 |
| 2655 | 3200 |
| 3201 MaybeObject* MacroAssembler::TryJumpToExternalReference( |
| 3202 const ExternalReference& builtin) { |
| 3203 li(a1, Operand(builtin)); |
| 3204 CEntryStub stub(1); |
| 3205 return TryTailCallStub(&stub); |
| 3206 } |
| 3207 |
| 3208 |
| 2656 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | 3209 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
| 2657 InvokeFlag flag, | 3210 InvokeFlag flag, |
| 2658 const CallWrapper& call_wrapper) { | 3211 const CallWrapper& call_wrapper) { |
| 2659 GetBuiltinEntry(t9, id); | 3212 GetBuiltinEntry(t9, id); |
| 2660 if (flag == CALL_FUNCTION) { | 3213 if (flag == CALL_FUNCTION) { |
| 2661 call_wrapper.BeforeCall(CallSize(t9)); | 3214 call_wrapper.BeforeCall(CallSize(t9)); |
| 2662 Call(t9); | 3215 Call(t9); |
| 2663 call_wrapper.AfterCall(); | 3216 call_wrapper.AfterCall(); |
| 2664 } else { | 3217 } else { |
| 2665 ASSERT(flag == JUMP_FUNCTION); | 3218 ASSERT(flag == JUMP_FUNCTION); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2715 if (FLAG_native_code_counters && counter->Enabled()) { | 3268 if (FLAG_native_code_counters && counter->Enabled()) { |
| 2716 li(scratch2, Operand(ExternalReference(counter))); | 3269 li(scratch2, Operand(ExternalReference(counter))); |
| 2717 lw(scratch1, MemOperand(scratch2)); | 3270 lw(scratch1, MemOperand(scratch2)); |
| 2718 Subu(scratch1, scratch1, Operand(value)); | 3271 Subu(scratch1, scratch1, Operand(value)); |
| 2719 sw(scratch1, MemOperand(scratch2)); | 3272 sw(scratch1, MemOperand(scratch2)); |
| 2720 } | 3273 } |
| 2721 } | 3274 } |
| 2722 | 3275 |
| 2723 | 3276 |
| 2724 // ----------------------------------------------------------------------------- | 3277 // ----------------------------------------------------------------------------- |
| 2725 // Debugging | 3278 // Debugging. |
| 2726 | 3279 |
| 2727 void MacroAssembler::Assert(Condition cc, const char* msg, | 3280 void MacroAssembler::Assert(Condition cc, const char* msg, |
| 2728 Register rs, Operand rt) { | 3281 Register rs, Operand rt) { |
| 2729 if (FLAG_debug_code) | 3282 if (emit_debug_code()) |
| 2730 Check(cc, msg, rs, rt); | 3283 Check(cc, msg, rs, rt); |
| 2731 } | 3284 } |
| 2732 | 3285 |
| 2733 | 3286 |
| 2734 void MacroAssembler::AssertRegisterIsRoot(Register reg, | 3287 void MacroAssembler::AssertRegisterIsRoot(Register reg, |
| 2735 Heap::RootListIndex index) { | 3288 Heap::RootListIndex index) { |
| 2736 if (FLAG_debug_code) { | 3289 if (emit_debug_code()) { |
| 2737 LoadRoot(at, index); | 3290 LoadRoot(at, index); |
| 2738 Check(eq, "Register did not match expected root", reg, Operand(at)); | 3291 Check(eq, "Register did not match expected root", reg, Operand(at)); |
| 2739 } | 3292 } |
| 2740 } | 3293 } |
| 2741 | 3294 |
| 2742 | 3295 |
| 2743 void MacroAssembler::AssertFastElements(Register elements) { | 3296 void MacroAssembler::AssertFastElements(Register elements) { |
| 2744 if (FLAG_debug_code) { | 3297 if (emit_debug_code()) { |
| 2745 ASSERT(!elements.is(at)); | 3298 ASSERT(!elements.is(at)); |
| 2746 Label ok; | 3299 Label ok; |
| 2747 Push(elements); | 3300 push(elements); |
| 2748 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); | 3301 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 2749 LoadRoot(at, Heap::kFixedArrayMapRootIndex); | 3302 LoadRoot(at, Heap::kFixedArrayMapRootIndex); |
| 2750 Branch(&ok, eq, elements, Operand(at)); | 3303 Branch(&ok, eq, elements, Operand(at)); |
| 2751 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); | 3304 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); |
| 2752 Branch(&ok, eq, elements, Operand(at)); | 3305 Branch(&ok, eq, elements, Operand(at)); |
| 2753 Abort("JSObject with fast elements map has slow elements"); | 3306 Abort("JSObject with fast elements map has slow elements"); |
| 2754 bind(&ok); | 3307 bind(&ok); |
| 2755 Pop(elements); | 3308 pop(elements); |
| 2756 } | 3309 } |
| 2757 } | 3310 } |
| 2758 | 3311 |
| 2759 | 3312 |
| 2760 void MacroAssembler::Check(Condition cc, const char* msg, | 3313 void MacroAssembler::Check(Condition cc, const char* msg, |
| 2761 Register rs, Operand rt) { | 3314 Register rs, Operand rt) { |
| 2762 Label L; | 3315 Label L; |
| 2763 Branch(&L, cc, rs, rt); | 3316 Branch(&L, cc, rs, rt); |
| 2764 Abort(msg); | 3317 Abort(msg); |
| 2765 // will not return here | 3318 // Will not return here. |
| 2766 bind(&L); | 3319 bind(&L); |
| 2767 } | 3320 } |
| 2768 | 3321 |
| 2769 | 3322 |
| 2770 void MacroAssembler::Abort(const char* msg) { | 3323 void MacroAssembler::Abort(const char* msg) { |
| 2771 Label abort_start; | 3324 Label abort_start; |
| 2772 bind(&abort_start); | 3325 bind(&abort_start); |
| 2773 // We want to pass the msg string like a smi to avoid GC | 3326 // We want to pass the msg string like a smi to avoid GC |
| 2774 // problems, however msg is not guaranteed to be aligned | 3327 // problems, however msg is not guaranteed to be aligned |
| 2775 // properly. Instead, we pass an aligned pointer that is | 3328 // properly. Instead, we pass an aligned pointer that is |
| 2776 // a proper v8 smi, but also pass the alignment difference | 3329 // a proper v8 smi, but also pass the alignment difference |
| 2777 // from the real pointer as a smi. | 3330 // from the real pointer as a smi. |
| 2778 intptr_t p1 = reinterpret_cast<intptr_t>(msg); | 3331 intptr_t p1 = reinterpret_cast<intptr_t>(msg); |
| 2779 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; | 3332 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; |
| 2780 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); | 3333 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); |
| 2781 #ifdef DEBUG | 3334 #ifdef DEBUG |
| 2782 if (msg != NULL) { | 3335 if (msg != NULL) { |
| 2783 RecordComment("Abort message: "); | 3336 RecordComment("Abort message: "); |
| 2784 RecordComment(msg); | 3337 RecordComment(msg); |
| 2785 } | 3338 } |
| 2786 #endif | 3339 #endif |
| 2787 // Disable stub call restrictions to always allow calls to abort. | 3340 // Disable stub call restrictions to always allow calls to abort. |
| 2788 AllowStubCallsScope allow_scope(this, true); | 3341 AllowStubCallsScope allow_scope(this, true); |
| 2789 | 3342 |
| 2790 li(a0, Operand(p0)); | 3343 li(a0, Operand(p0)); |
| 2791 Push(a0); | 3344 push(a0); |
| 2792 li(a0, Operand(Smi::FromInt(p1 - p0))); | 3345 li(a0, Operand(Smi::FromInt(p1 - p0))); |
| 2793 Push(a0); | 3346 push(a0); |
| 2794 CallRuntime(Runtime::kAbort, 2); | 3347 CallRuntime(Runtime::kAbort, 2); |
| 2795 // will not return here | 3348 // Will not return here. |
| 2796 if (is_trampoline_pool_blocked()) { | 3349 if (is_trampoline_pool_blocked()) { |
| 2797 // If the calling code cares about the exact number of | 3350 // If the calling code cares about the exact number of |
| 2798 // instructions generated, we insert padding here to keep the size | 3351 // instructions generated, we insert padding here to keep the size |
| 2799 // of the Abort macro constant. | 3352 // of the Abort macro constant. |
| 2800 // Currently in debug mode with debug_code enabled the number of | 3353 // Currently in debug mode with debug_code enabled the number of |
| 2801 // generated instructions is 14, so we use this as a maximum value. | 3354 // generated instructions is 14, so we use this as a maximum value. |
| 2802 static const int kExpectedAbortInstructions = 14; | 3355 static const int kExpectedAbortInstructions = 14; |
| 2803 int abort_instructions = InstructionsGeneratedSince(&abort_start); | 3356 int abort_instructions = InstructionsGeneratedSince(&abort_start); |
| 2804 ASSERT(abort_instructions <= kExpectedAbortInstructions); | 3357 ASSERT(abort_instructions <= kExpectedAbortInstructions); |
| 2805 while (abort_instructions++ < kExpectedAbortInstructions) { | 3358 while (abort_instructions++ < kExpectedAbortInstructions) { |
| 2806 nop(); | 3359 nop(); |
| 2807 } | 3360 } |
| 2808 } | 3361 } |
| 2809 } | 3362 } |
| 2810 | 3363 |
| 2811 | 3364 |
| 2812 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 3365 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
| 2813 if (context_chain_length > 0) { | 3366 if (context_chain_length > 0) { |
| 2814 // Move up the chain of contexts to the context containing the slot. | 3367 // Move up the chain of contexts to the context containing the slot. |
| 2815 lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX))); | 3368 lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| 2816 // Load the function context (which is the incoming, outer context). | 3369 // Load the function context (which is the incoming, outer context). |
| 2817 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); | 3370 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); |
| 2818 for (int i = 1; i < context_chain_length; i++) { | 3371 for (int i = 1; i < context_chain_length; i++) { |
| 2819 lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); | 3372 lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| 2820 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); | 3373 lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); |
| 2821 } | 3374 } |
| 2822 // The context may be an intermediate context, not a function context. | 3375 } else { |
| 2823 lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); | 3376 // Slot is in the current function context. Move it into the |
| 2824 } else { // Slot is in the current function context. | 3377 // destination register in case we store into it (the write barrier |
| 2825 // The context may be an intermediate context, not a function context. | 3378 // cannot be allowed to destroy the context in esi). |
| 2826 lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX))); | 3379 Move(dst, cp); |
| 3380 } |
| 3381 |
| 3382 // We should not have found a 'with' context by walking the context chain |
| 3383 // (i.e., the static scope chain and runtime context chain do not agree). |
| 3384 // A variable occurring in such a scope should have slot type LOOKUP and |
| 3385 // not CONTEXT. |
| 3386 if (emit_debug_code()) { |
| 3387 lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); |
| 3388 Check(eq, "Yo dawg, I heard you liked function contexts " |
| 3389 "so I put function contexts in all your contexts", |
| 3390 dst, Operand(t9)); |
| 2827 } | 3391 } |
| 2828 } | 3392 } |
| 2829 | 3393 |
| 2830 | 3394 |
| 2831 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | 3395 void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
| 2832 // Load the global or builtins object from the current context. | 3396 // Load the global or builtins object from the current context. |
| 2833 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 3397 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 2834 // Load the global context from the global or builtins object. | 3398 // Load the global context from the global or builtins object. |
| 2835 lw(function, FieldMemOperand(function, | 3399 lw(function, FieldMemOperand(function, |
| 2836 GlobalObject::kGlobalContextOffset)); | 3400 GlobalObject::kGlobalContextOffset)); |
| 2837 // Load the function from the global context. | 3401 // Load the function from the global context. |
| 2838 lw(function, MemOperand(function, Context::SlotOffset(index))); | 3402 lw(function, MemOperand(function, Context::SlotOffset(index))); |
| 2839 } | 3403 } |
| 2840 | 3404 |
| 2841 | 3405 |
| 2842 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, | 3406 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, |
| 2843 Register map, | 3407 Register map, |
| 2844 Register scratch) { | 3408 Register scratch) { |
| 2845 // Load the initial map. The global functions all have initial maps. | 3409 // Load the initial map. The global functions all have initial maps. |
| 2846 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3410 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2847 if (FLAG_debug_code) { | 3411 if (emit_debug_code()) { |
| 2848 Label ok, fail; | 3412 Label ok, fail; |
| 2849 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false); | 3413 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false); |
| 2850 Branch(&ok); | 3414 Branch(&ok); |
| 2851 bind(&fail); | 3415 bind(&fail); |
| 2852 Abort("Global functions must have initial map"); | 3416 Abort("Global functions must have initial map"); |
| 2853 bind(&ok); | 3417 bind(&ok); |
| 2854 } | 3418 } |
| 2855 } | 3419 } |
| 2856 | 3420 |
| 2857 | 3421 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 2869 | 3433 |
| 2870 | 3434 |
| 2871 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | 3435 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
| 2872 mov(sp, fp); | 3436 mov(sp, fp); |
| 2873 lw(fp, MemOperand(sp, 0 * kPointerSize)); | 3437 lw(fp, MemOperand(sp, 0 * kPointerSize)); |
| 2874 lw(ra, MemOperand(sp, 1 * kPointerSize)); | 3438 lw(ra, MemOperand(sp, 1 * kPointerSize)); |
| 2875 addiu(sp, sp, 2 * kPointerSize); | 3439 addiu(sp, sp, 2 * kPointerSize); |
| 2876 } | 3440 } |
| 2877 | 3441 |
| 2878 | 3442 |
| 2879 void MacroAssembler::EnterExitFrame(Register hold_argc, | 3443 void MacroAssembler::EnterExitFrame(bool save_doubles, |
| 2880 Register hold_argv, | 3444 int stack_space) { |
| 2881 Register hold_function, | 3445 // Setup the frame structure on the stack. |
| 2882 bool save_doubles) { | 3446 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); |
| 2883 // a0 is argc. | 3447 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); |
| 2884 sll(t8, a0, kPointerSizeLog2); | 3448 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); |
| 2885 addu(hold_argv, sp, t8); | |
| 2886 addiu(hold_argv, hold_argv, -kPointerSize); | |
| 2887 | 3449 |
| 2888 // Compute callee's stack pointer before making changes and save it as | 3450 // This is how the stack will look: |
| 2889 // t9 register so that it is restored as sp register on exit, thereby | 3451 // fp + 2 (==kCallerSPDisplacement) - old stack's end |
| 2890 // popping the args. | 3452 // [fp + 1 (==kCallerPCOffset)] - saved old ra |
| 2891 // t9 = sp + kPointerSize * #args | 3453 // [fp + 0 (==kCallerFPOffset)] - saved old fp |
| 2892 addu(t9, sp, t8); | 3454 // [fp - 1 (==kSPOffset)] - sp of the called function |
| 2893 | 3455 // [fp - 2 (==kCodeOffset)] - CodeObject |
| 2894 // Compute the argv pointer and keep it in a callee-saved register. | 3456 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the |
| 2895 // This only seems to be needed for crankshaft and may cause problems | 3457 // new stack (will contain saved ra) |
| 2896 // so it's disabled for now. | |
| 2897 // Subu(s6, t9, Operand(kPointerSize)); | |
| 2898 | |
| 2899 // Align the stack at this point. | |
| 2900 AlignStack(0); | |
| 2901 | 3458 |
| 2902 // Save registers. | 3459 // Save registers. |
| 2903 addiu(sp, sp, -12); | 3460 addiu(sp, sp, -4 * kPointerSize); |
| 2904 sw(t9, MemOperand(sp, 8)); | 3461 sw(ra, MemOperand(sp, 3 * kPointerSize)); |
| 2905 sw(ra, MemOperand(sp, 4)); | 3462 sw(fp, MemOperand(sp, 2 * kPointerSize)); |
| 2906 sw(fp, MemOperand(sp, 0)); | 3463 addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer. |
| 2907 mov(fp, sp); // Setup new frame pointer. | |
| 2908 | 3464 |
| 2909 li(t8, Operand(CodeObject())); | 3465 if (emit_debug_code()) { |
| 2910 Push(t8); // Accessed from ExitFrame::code_slot. | 3466 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
| 3467 } |
| 3468 |
| 3469 li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot. |
| 3470 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); |
| 2911 | 3471 |
| 2912 // Save the frame pointer and the context in top. | 3472 // Save the frame pointer and the context in top. |
| 2913 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); | 3473 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); |
| 2914 sw(fp, MemOperand(t8)); | 3474 sw(fp, MemOperand(t8)); |
| 2915 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate()))); | 3475 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate()))); |
| 2916 sw(cp, MemOperand(t8)); | 3476 sw(cp, MemOperand(t8)); |
| 2917 | 3477 |
| 2918 // Setup argc and the builtin function in callee-saved registers. | 3478 // Ensure we are not saving doubles, since it's not implemented yet. |
| 2919 mov(hold_argc, a0); | 3479 ASSERT(save_doubles == 0); |
| 2920 mov(hold_function, a1); | |
| 2921 | 3480 |
| 2922 // Optionally save all double registers. | 3481 // Reserve place for the return address, stack space and an optional slot |
| 2923 if (save_doubles) { | 3482 // (used by the DirectCEntryStub to hold the return value if a struct is |
| 2924 #ifdef DEBUG | 3483 // returned) and align the frame preparing for calling the runtime function. |
| 2925 int frame_alignment = ActivationFrameAlignment(); | 3484 ASSERT(stack_space >= 0); |
| 2926 #endif | 3485 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); |
| 2927 // The stack alignment code above made sp unaligned, so add space for one | 3486 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); |
| 2928 // more double register and use aligned addresses. | 3487 if (frame_alignment > 0) { |
| 2929 ASSERT(kDoubleSize == frame_alignment); | 3488 ASSERT(IsPowerOf2(frame_alignment)); |
| 2930 // Mark the frame as containing doubles by pushing a non-valid return | 3489 And(sp, sp, Operand(-frame_alignment)); // Align stack. |
| 2931 // address, i.e. 0. | |
| 2932 ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize); | |
| 2933 push(zero_reg); // Marker and alignment word. | |
| 2934 int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize; | |
| 2935 Subu(sp, sp, Operand(space)); | |
| 2936 // Remember: we only need to save every 2nd double FPU value. | |
| 2937 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) { | |
| 2938 FPURegister reg = FPURegister::from_code(i); | |
| 2939 sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize)); | |
| 2940 } | |
| 2941 // Note that f0 will be accessible at fp - 2*kPointerSize - | |
| 2942 // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the | |
| 2943 // alignment word were pushed after the fp. | |
| 2944 } | 3490 } |
| 3491 |
| 3492 // Set the exit frame sp value to point just before the return address |
| 3493 // location. |
| 3494 addiu(at, sp, kPointerSize); |
| 3495 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
| 2945 } | 3496 } |
| 2946 | 3497 |
| 2947 | 3498 |
| 2948 void MacroAssembler::LeaveExitFrame(bool save_doubles) { | 3499 void MacroAssembler::LeaveExitFrame(bool save_doubles, |
| 2949 // Optionally restore all double registers. | 3500 Register argument_count) { |
| 2950 if (save_doubles) { | 3501 // Ensure we are not restoring doubles, since it's not implemented yet. |
| 2951 // TODO(regis): Use vldrm instruction. | 3502 ASSERT(save_doubles == 0); |
| 2952 // Remember: we only need to restore every 2nd double FPU value. | |
| 2953 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) { | |
| 2954 FPURegister reg = FPURegister::from_code(i); | |
| 2955 // Register f30-f31 is just below the marker. | |
| 2956 const int offset = ExitFrameConstants::kMarkerOffset; | |
| 2957 ldc1(reg, MemOperand(fp, | |
| 2958 (i - FPURegister::kNumRegisters) * kDoubleSize + offset)); | |
| 2959 } | |
| 2960 } | |
| 2961 | 3503 |
| 2962 // Clear top frame. | 3504 // Clear top frame. |
| 2963 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); | 3505 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); |
| 2964 sw(zero_reg, MemOperand(t8)); | 3506 sw(zero_reg, MemOperand(t8)); |
| 2965 | 3507 |
| 2966 // Restore current context from top and clear it in debug mode. | 3508 // Restore current context from top and clear it in debug mode. |
| 2967 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate()))); | 3509 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate()))); |
| 2968 lw(cp, MemOperand(t8)); | 3510 lw(cp, MemOperand(t8)); |
| 2969 #ifdef DEBUG | 3511 #ifdef DEBUG |
| 2970 sw(a3, MemOperand(t8)); | 3512 sw(a3, MemOperand(t8)); |
| 2971 #endif | 3513 #endif |
| 2972 | 3514 |
| 2973 // Pop the arguments, restore registers, and return. | 3515 // Pop the arguments, restore registers, and return. |
| 2974 mov(sp, fp); // Respect ABI stack constraint. | 3516 mov(sp, fp); // Respect ABI stack constraint. |
| 2975 lw(fp, MemOperand(sp, 0)); | 3517 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); |
| 2976 lw(ra, MemOperand(sp, 4)); | 3518 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); |
| 2977 lw(sp, MemOperand(sp, 8)); | 3519 addiu(sp, sp, 8); |
| 2978 jr(ra); | 3520 if (argument_count.is_valid()) { |
| 2979 nop(); // Branch delay slot nop. | 3521 sll(t8, argument_count, kPointerSizeLog2); |
| 3522 addu(sp, sp, t8); |
| 3523 } |
| 2980 } | 3524 } |
| 2981 | 3525 |
| 2982 | 3526 |
| 2983 void MacroAssembler::InitializeNewString(Register string, | 3527 void MacroAssembler::InitializeNewString(Register string, |
| 2984 Register length, | 3528 Register length, |
| 2985 Heap::RootListIndex map_index, | 3529 Heap::RootListIndex map_index, |
| 2986 Register scratch1, | 3530 Register scratch1, |
| 2987 Register scratch2) { | 3531 Register scratch2) { |
| 2988 sll(scratch1, length, kSmiTagSize); | 3532 sll(scratch1, length, kSmiTagSize); |
| 2989 LoadRoot(scratch2, map_index); | 3533 LoadRoot(scratch2, map_index); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 3003 return OS::ActivationFrameAlignment(); | 3547 return OS::ActivationFrameAlignment(); |
| 3004 #else // defined(V8_HOST_ARCH_MIPS) | 3548 #else // defined(V8_HOST_ARCH_MIPS) |
| 3005 // If we are using the simulator then we should always align to the expected | 3549 // If we are using the simulator then we should always align to the expected |
| 3006 // alignment. As the simulator is used to generate snapshots we do not know | 3550 // alignment. As the simulator is used to generate snapshots we do not know |
| 3007 // if the target platform will need alignment, so this is controlled from a | 3551 // if the target platform will need alignment, so this is controlled from a |
| 3008 // flag. | 3552 // flag. |
| 3009 return FLAG_sim_stack_alignment; | 3553 return FLAG_sim_stack_alignment; |
| 3010 #endif // defined(V8_HOST_ARCH_MIPS) | 3554 #endif // defined(V8_HOST_ARCH_MIPS) |
| 3011 } | 3555 } |
| 3012 | 3556 |
| 3557 void MacroAssembler::AssertStackIsAligned() { |
| 3558 if (emit_debug_code()) { |
| 3559 const int frame_alignment = ActivationFrameAlignment(); |
| 3560 const int frame_alignment_mask = frame_alignment - 1; |
| 3013 | 3561 |
| 3014 void MacroAssembler::AlignStack(int offset) { | 3562 if (frame_alignment > kPointerSize) { |
| 3015 // On MIPS an offset of 0 aligns to 0 modulo 8 bytes, | 3563 Label alignment_as_expected; |
| 3016 // and an offset of 1 aligns to 4 modulo 8 bytes. | 3564 ASSERT(IsPowerOf2(frame_alignment)); |
| 3017 #if defined(V8_HOST_ARCH_MIPS) | 3565 andi(at, sp, frame_alignment_mask); |
| 3018 // Running on the real platform. Use the alignment as mandated by the local | 3566 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); |
| 3019 // environment. | 3567 // Don't use Check here, as it will call Runtime_Abort re-entering here. |
| 3020 // Note: This will break if we ever start generating snapshots on one MIPS | 3568 stop("Unexpected stack alignment"); |
| 3021 // platform for another MIPS platform with a different alignment. | 3569 bind(&alignment_as_expected); |
| 3022 int activation_frame_alignment = OS::ActivationFrameAlignment(); | 3570 } |
| 3023 #else // defined(V8_HOST_ARCH_MIPS) | |
| 3024 // If we are using the simulator then we should always align to the expected | |
| 3025 // alignment. As the simulator is used to generate snapshots we do not know | |
| 3026 // if the target platform will need alignment, so we will always align at | |
| 3027 // this point here. | |
| 3028 int activation_frame_alignment = 2 * kPointerSize; | |
| 3029 #endif // defined(V8_HOST_ARCH_MIPS) | |
| 3030 if (activation_frame_alignment != kPointerSize) { | |
| 3031 // This code needs to be made more general if this assert doesn't hold. | |
| 3032 ASSERT(activation_frame_alignment == 2 * kPointerSize); | |
| 3033 if (offset == 0) { | |
| 3034 andi(t8, sp, activation_frame_alignment - 1); | |
| 3035 Push(zero_reg, eq, t8, zero_reg); | |
| 3036 } else { | |
| 3037 andi(t8, sp, activation_frame_alignment - 1); | |
| 3038 addiu(t8, t8, -4); | |
| 3039 Push(zero_reg, eq, t8, zero_reg); | |
| 3040 } | 3571 } |
| 3041 } | |
| 3042 } | 3572 } |
| 3043 | 3573 |
| 3044 | 3574 |
| 3045 | |
| 3046 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( | 3575 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( |
| 3047 Register reg, | 3576 Register reg, |
| 3048 Register scratch, | 3577 Register scratch, |
| 3049 Label* not_power_of_two_or_zero) { | 3578 Label* not_power_of_two_or_zero) { |
| 3050 Subu(scratch, reg, Operand(1)); | 3579 Subu(scratch, reg, Operand(1)); |
| 3051 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, | 3580 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, |
| 3052 scratch, Operand(zero_reg)); | 3581 scratch, Operand(zero_reg)); |
| 3053 and_(at, scratch, reg); // In the delay slot. | 3582 and_(at, scratch, reg); // In the delay slot. |
| 3054 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); | 3583 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); |
| 3055 } | 3584 } |
| (...skipping 29 matching lines...) Expand all Loading... |
| 3085 } | 3614 } |
| 3086 | 3615 |
| 3087 | 3616 |
| 3088 void MacroAssembler::AbortIfNotSmi(Register object) { | 3617 void MacroAssembler::AbortIfNotSmi(Register object) { |
| 3089 STATIC_ASSERT(kSmiTag == 0); | 3618 STATIC_ASSERT(kSmiTag == 0); |
| 3090 andi(at, object, kSmiTagMask); | 3619 andi(at, object, kSmiTagMask); |
| 3091 Assert(eq, "Operand is a smi", at, Operand(zero_reg)); | 3620 Assert(eq, "Operand is a smi", at, Operand(zero_reg)); |
| 3092 } | 3621 } |
| 3093 | 3622 |
| 3094 | 3623 |
| 3624 void MacroAssembler::AbortIfNotString(Register object) { |
| 3625 STATIC_ASSERT(kSmiTag == 0); |
| 3626 And(t0, object, Operand(kSmiTagMask)); |
| 3627 Assert(ne, "Operand is not a string", t0, Operand(zero_reg)); |
| 3628 push(object); |
| 3629 lw(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 3630 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); |
| 3631 Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE)); |
| 3632 pop(object); |
| 3633 } |
| 3634 |
| 3635 |
| 3095 void MacroAssembler::AbortIfNotRootValue(Register src, | 3636 void MacroAssembler::AbortIfNotRootValue(Register src, |
| 3096 Heap::RootListIndex root_value_index, | 3637 Heap::RootListIndex root_value_index, |
| 3097 const char* message) { | 3638 const char* message) { |
| 3098 ASSERT(!src.is(at)); | 3639 ASSERT(!src.is(at)); |
| 3099 LoadRoot(at, root_value_index); | 3640 LoadRoot(at, root_value_index); |
| 3100 Assert(eq, message, src, Operand(at)); | 3641 Assert(eq, message, src, Operand(at)); |
| 3101 } | 3642 } |
| 3102 | 3643 |
| 3103 | 3644 |
| 3104 void MacroAssembler::JumpIfNotHeapNumber(Register object, | 3645 void MacroAssembler::JumpIfNotHeapNumber(Register object, |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3176 And(scratch, type, Operand(kFlatAsciiStringMask)); | 3717 And(scratch, type, Operand(kFlatAsciiStringMask)); |
| 3177 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); | 3718 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); |
| 3178 } | 3719 } |
| 3179 | 3720 |
| 3180 | 3721 |
| 3181 static const int kRegisterPassedArguments = 4; | 3722 static const int kRegisterPassedArguments = 4; |
| 3182 | 3723 |
| 3183 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { | 3724 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { |
| 3184 int frame_alignment = ActivationFrameAlignment(); | 3725 int frame_alignment = ActivationFrameAlignment(); |
| 3185 | 3726 |
| 3186 // Reserve space for Isolate address which is always passed as last parameter | |
| 3187 num_arguments += 1; | |
| 3188 | |
| 3189 // Up to four simple arguments are passed in registers a0..a3. | 3727 // Up to four simple arguments are passed in registers a0..a3. |
| 3190 // Those four arguments must have reserved argument slots on the stack for | 3728 // Those four arguments must have reserved argument slots on the stack for |
| 3191 // mips, even though those argument slots are not normally used. | 3729 // mips, even though those argument slots are not normally used. |
| 3192 // Remaining arguments are pushed on the stack, above (higher address than) | 3730 // Remaining arguments are pushed on the stack, above (higher address than) |
| 3193 // the argument slots. | 3731 // the argument slots. |
| 3194 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); | 3732 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); |
| 3195 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? | 3733 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? |
| 3196 0 : num_arguments - kRegisterPassedArguments) + | 3734 0 : num_arguments - kRegisterPassedArguments) + |
| 3197 (StandardFrameConstants::kCArgsSlotsSize / | 3735 (StandardFrameConstants::kCArgsSlotsSize / |
| 3198 kPointerSize); | 3736 kPointerSize); |
| 3199 if (frame_alignment > kPointerSize) { | 3737 if (frame_alignment > kPointerSize) { |
| 3200 // Make stack end at alignment and make room for num_arguments - 4 words | 3738 // Make stack end at alignment and make room for num_arguments - 4 words |
| 3201 // and the original value of sp. | 3739 // and the original value of sp. |
| 3202 mov(scratch, sp); | 3740 mov(scratch, sp); |
| 3203 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); | 3741 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); |
| 3204 ASSERT(IsPowerOf2(frame_alignment)); | 3742 ASSERT(IsPowerOf2(frame_alignment)); |
| 3205 And(sp, sp, Operand(-frame_alignment)); | 3743 And(sp, sp, Operand(-frame_alignment)); |
| 3206 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3744 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
| 3207 } else { | 3745 } else { |
| 3208 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); | 3746 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); |
| 3209 } | 3747 } |
| 3210 } | 3748 } |
| 3211 | 3749 |
| 3212 | 3750 |
| 3213 void MacroAssembler::CallCFunction(ExternalReference function, | 3751 void MacroAssembler::CallCFunction(ExternalReference function, |
| 3214 int num_arguments) { | 3752 int num_arguments) { |
| 3215 CallCFunctionHelper(no_reg, function, at, num_arguments); | 3753 CallCFunctionHelper(no_reg, function, t8, num_arguments); |
| 3216 } | 3754 } |
| 3217 | 3755 |
| 3218 | 3756 |
| 3219 void MacroAssembler::CallCFunction(Register function, | 3757 void MacroAssembler::CallCFunction(Register function, |
| 3220 Register scratch, | 3758 Register scratch, |
| 3221 int num_arguments) { | 3759 int num_arguments) { |
| 3222 CallCFunctionHelper(function, | 3760 CallCFunctionHelper(function, |
| 3223 ExternalReference::the_hole_value_location(isolate()), | 3761 ExternalReference::the_hole_value_location(isolate()), |
| 3224 scratch, | 3762 scratch, |
| 3225 num_arguments); | 3763 num_arguments); |
| 3226 } | 3764 } |
| 3227 | 3765 |
| 3228 | 3766 |
| 3229 void MacroAssembler::CallCFunctionHelper(Register function, | 3767 void MacroAssembler::CallCFunctionHelper(Register function, |
| 3230 ExternalReference function_reference, | 3768 ExternalReference function_reference, |
| 3231 Register scratch, | 3769 Register scratch, |
| 3232 int num_arguments) { | 3770 int num_arguments) { |
| 3233 // Push Isolate address as the last argument. | |
| 3234 if (num_arguments < kRegisterPassedArguments) { | |
| 3235 Register arg_to_reg[] = {a0, a1, a2, a3}; | |
| 3236 Register r = arg_to_reg[num_arguments]; | |
| 3237 li(r, Operand(ExternalReference::isolate_address())); | |
| 3238 } else { | |
| 3239 int stack_passed_arguments = num_arguments - kRegisterPassedArguments + | |
| 3240 (StandardFrameConstants::kCArgsSlotsSize / | |
| 3241 kPointerSize); | |
| 3242 // Push Isolate address on the stack after the arguments. | |
| 3243 li(scratch, Operand(ExternalReference::isolate_address())); | |
| 3244 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | |
| 3245 } | |
| 3246 num_arguments += 1; | |
| 3247 | |
| 3248 // Make sure that the stack is aligned before calling a C function unless | 3771 // Make sure that the stack is aligned before calling a C function unless |
| 3249 // running in the simulator. The simulator has its own alignment check which | 3772 // running in the simulator. The simulator has its own alignment check which |
| 3250 // provides more information. | 3773 // provides more information. |
| 3251 // The argument stots are presumed to have been set up by | 3774 // The argument stots are presumed to have been set up by |
| 3252 // PrepareCallCFunction. The C function must be called via t9, for mips ABI. | 3775 // PrepareCallCFunction. The C function must be called via t9, for mips ABI. |
| 3253 | 3776 |
| 3254 #if defined(V8_HOST_ARCH_MIPS) | 3777 #if defined(V8_HOST_ARCH_MIPS) |
| 3255 if (emit_debug_code()) { | 3778 if (emit_debug_code()) { |
| 3256 int frame_alignment = OS::ActivationFrameAlignment(); | 3779 int frame_alignment = OS::ActivationFrameAlignment(); |
| 3257 int frame_alignment_mask = frame_alignment - 1; | 3780 int frame_alignment_mask = frame_alignment - 1; |
| 3258 if (frame_alignment > kPointerSize) { | 3781 if (frame_alignment > kPointerSize) { |
| 3259 ASSERT(IsPowerOf2(frame_alignment)); | 3782 ASSERT(IsPowerOf2(frame_alignment)); |
| 3260 Label alignment_as_expected; | 3783 Label alignment_as_expected; |
| 3261 And(at, sp, Operand(frame_alignment_mask)); | 3784 And(at, sp, Operand(frame_alignment_mask)); |
| 3262 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); | 3785 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); |
| 3263 // Don't use Check here, as it will call Runtime_Abort possibly | 3786 // Don't use Check here, as it will call Runtime_Abort possibly |
| 3264 // re-entering here. | 3787 // re-entering here. |
| 3265 stop("Unexpected alignment in CallCFunction"); | 3788 stop("Unexpected alignment in CallCFunction"); |
| 3266 bind(&alignment_as_expected); | 3789 bind(&alignment_as_expected); |
| 3267 } | 3790 } |
| 3268 } | 3791 } |
| 3269 #endif // V8_HOST_ARCH_MIPS | 3792 #endif // V8_HOST_ARCH_MIPS |
| 3270 | 3793 |
| 3271 // Just call directly. The function called cannot cause a GC, or | 3794 // Just call directly. The function called cannot cause a GC, or |
| 3272 // allow preemption, so the return address in the link register | 3795 // allow preemption, so the return address in the link register |
| 3273 // stays correct. | 3796 // stays correct. |
| 3274 if (!function.is(t9)) { | 3797 |
| 3798 if (function.is(no_reg)) { |
| 3799 function = t9; |
| 3800 li(function, Operand(function_reference)); |
| 3801 } else if (!function.is(t9)) { |
| 3275 mov(t9, function); | 3802 mov(t9, function); |
| 3276 function = t9; | 3803 function = t9; |
| 3277 } | 3804 } |
| 3278 | 3805 |
| 3279 if (function.is(no_reg)) { | |
| 3280 li(t9, Operand(function_reference)); | |
| 3281 function = t9; | |
| 3282 } | |
| 3283 | |
| 3284 Call(function); | 3806 Call(function); |
| 3285 | 3807 |
| 3286 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); | 3808 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); |
| 3287 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? | 3809 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? |
| 3288 0 : num_arguments - kRegisterPassedArguments) + | 3810 0 : num_arguments - kRegisterPassedArguments) + |
| 3289 (StandardFrameConstants::kCArgsSlotsSize / | 3811 (StandardFrameConstants::kCArgsSlotsSize / |
| 3290 kPointerSize); | 3812 kPointerSize); |
| 3291 | 3813 |
| 3292 if (OS::ActivationFrameAlignment() > kPointerSize) { | 3814 if (OS::ActivationFrameAlignment() > kPointerSize) { |
| 3293 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3815 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
| 3294 } else { | 3816 } else { |
| 3295 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); | 3817 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); |
| 3296 } | 3818 } |
| 3297 } | 3819 } |
| 3298 | 3820 |
| 3299 | 3821 |
| 3300 #undef BRANCH_ARGS_CHECK | 3822 #undef BRANCH_ARGS_CHECK |
| 3301 | 3823 |
| 3302 | 3824 |
| 3303 #ifdef ENABLE_DEBUGGER_SUPPORT | |
| 3304 CodePatcher::CodePatcher(byte* address, int instructions) | 3825 CodePatcher::CodePatcher(byte* address, int instructions) |
| 3305 : address_(address), | 3826 : address_(address), |
| 3306 instructions_(instructions), | 3827 instructions_(instructions), |
| 3307 size_(instructions * Assembler::kInstrSize), | 3828 size_(instructions * Assembler::kInstrSize), |
| 3308 masm_(address, size_ + Assembler::kGap) { | 3829 masm_(Isolate::Current(), address, size_ + Assembler::kGap) { |
| 3309 // Create a new macro assembler pointing to the address of the code to patch. | 3830 // Create a new macro assembler pointing to the address of the code to patch. |
| 3310 // The size is adjusted with kGap on order for the assembler to generate size | 3831 // The size is adjusted with kGap on order for the assembler to generate size |
| 3311 // bytes of instructions without failing with buffer size constraints. | 3832 // bytes of instructions without failing with buffer size constraints. |
| 3312 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 3833 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 3313 } | 3834 } |
| 3314 | 3835 |
| 3315 | 3836 |
| 3316 CodePatcher::~CodePatcher() { | 3837 CodePatcher::~CodePatcher() { |
| 3317 // Indicate that code has changed. | 3838 // Indicate that code has changed. |
| 3318 CPU::FlushICache(address_, size_); | 3839 CPU::FlushICache(address_, size_); |
| 3319 | 3840 |
| 3320 // Check that the code was patched as expected. | 3841 // Check that the code was patched as expected. |
| 3321 ASSERT(masm_.pc_ == address_ + size_); | 3842 ASSERT(masm_.pc_ == address_ + size_); |
| 3322 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 3843 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 3323 } | 3844 } |
| 3324 | 3845 |
| 3325 | 3846 |
| 3326 void CodePatcher::Emit(Instr x) { | 3847 void CodePatcher::Emit(Instr instr) { |
| 3327 masm()->emit(x); | 3848 masm()->emit(instr); |
| 3328 } | 3849 } |
| 3329 | 3850 |
| 3330 | 3851 |
| 3331 void CodePatcher::Emit(Address addr) { | 3852 void CodePatcher::Emit(Address addr) { |
| 3332 masm()->emit(reinterpret_cast<Instr>(addr)); | 3853 masm()->emit(reinterpret_cast<Instr>(addr)); |
| 3333 } | 3854 } |
| 3334 | 3855 |
| 3335 | 3856 |
| 3336 #endif // ENABLE_DEBUGGER_SUPPORT | 3857 void CodePatcher::ChangeBranchCondition(Condition cond) { |
| 3858 Instr instr = Assembler::instr_at(masm_.pc_); |
| 3859 ASSERT(Assembler::IsBranch(instr)); |
| 3860 uint32_t opcode = Assembler::GetOpcodeField(instr); |
| 3861 // Currently only the 'eq' and 'ne' cond values are supported and the simple |
| 3862 // branch instructions (with opcode being the branch type). |
| 3863 // There are some special cases (see Assembler::IsBranch()) so extending this |
| 3864 // would be tricky. |
| 3865 ASSERT(opcode == BEQ || |
| 3866 opcode == BNE || |
| 3867 opcode == BLEZ || |
| 3868 opcode == BGTZ || |
| 3869 opcode == BEQL || |
| 3870 opcode == BNEL || |
| 3871 opcode == BLEZL || |
| 3872 opcode == BGTZL); |
| 3873 opcode = (cond == eq) ? BEQ : BNE; |
| 3874 instr = (instr & ~kOpcodeMask) | opcode; |
| 3875 masm_.emit(instr); |
| 3876 } |
| 3337 | 3877 |
| 3338 | 3878 |
| 3339 } } // namespace v8::internal | 3879 } } // namespace v8::internal |
| 3340 | 3880 |
| 3341 #endif // V8_TARGET_ARCH_MIPS | 3881 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |