| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 43 // Registers t8, t9, and at are reserved for use by the MacroAssembler. | 43 // Registers t8, t9, and at are reserved for use by the MacroAssembler. |
| 44 // | 44 // |
| 45 // The programmer should know that the MacroAssembler may clobber these three, | 45 // The programmer should know that the MacroAssembler may clobber these three, |
| 46 // but won't touch other registers except in special cases. | 46 // but won't touch other registers except in special cases. |
| 47 // | 47 // |
| 48 // Per the MIPS ABI, register t9 must be used for indirect function call | 48 // Per the MIPS ABI, register t9 must be used for indirect function call |
| 49 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when | 49 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when |
| 50 // trying to update gp register for position-independent-code. Whenever | 50 // trying to update gp register for position-independent-code. Whenever |
| 51 // MIPS generated code calls C code, it must be via t9 register. | 51 // MIPS generated code calls C code, it must be via t9 register. |
| 52 | 52 |
| 53 // Registers aliases | 53 |
| 54 // Register aliases. |
| 54 // cp is assumed to be a callee saved register. | 55 // cp is assumed to be a callee saved register. |
| 56 const Register lithiumScratchReg = s3; // Scratch register. |
| 57 const Register lithiumScratchReg2 = s4; // Scratch register. |
| 58 const Register condReg = s5; // Simulated (partial) condition code for mips. |
| 55 const Register roots = s6; // Roots array pointer. | 59 const Register roots = s6; // Roots array pointer. |
| 56 const Register cp = s7; // JavaScript context pointer. | 60 const Register cp = s7; // JavaScript context pointer. |
| 57 const Register fp = s8_fp; // Alias for fp. | 61 const Register fp = s8_fp; // Alias for fp. |
| 58 // Registers used for condition evaluation. | 62 const DoubleRegister lithiumScratchDouble = f30; // Double scratch register. |
| 59 const Register condReg1 = s4; | |
| 60 const Register condReg2 = s5; | |
| 61 | |
| 62 | 63 |
| 63 // Flags used for the AllocateInNewSpace functions. | 64 // Flags used for the AllocateInNewSpace functions. |
| 64 enum AllocationFlags { | 65 enum AllocationFlags { |
| 65 // No special flags. | 66 // No special flags. |
| 66 NO_ALLOCATION_FLAGS = 0, | 67 NO_ALLOCATION_FLAGS = 0, |
| 67 // Return the pointer to the allocated already tagged as a heap object. | 68 // Return the pointer to the allocated already tagged as a heap object. |
| 68 TAG_OBJECT = 1 << 0, | 69 TAG_OBJECT = 1 << 0, |
| 69 // The content of the result register already contains the allocation top in | 70 // The content of the result register already contains the allocation top in |
| 70 // new space. | 71 // new space. |
| 71 RESULT_CONTAINS_TOP = 1 << 1, | 72 RESULT_CONTAINS_TOP = 1 << 1, |
| (...skipping 11 matching lines...) Expand all Loading... |
| 83 // Don't load NaNs or infinities, branch to the non number case instead. | 84 // Don't load NaNs or infinities, branch to the non number case instead. |
| 84 AVOID_NANS_AND_INFINITIES = 1 << 1 | 85 AVOID_NANS_AND_INFINITIES = 1 << 1 |
| 85 }; | 86 }; |
| 86 | 87 |
| 87 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. | 88 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. |
| 88 enum BranchDelaySlot { | 89 enum BranchDelaySlot { |
| 89 USE_DELAY_SLOT, | 90 USE_DELAY_SLOT, |
| 90 PROTECT | 91 PROTECT |
| 91 }; | 92 }; |
| 92 | 93 |
| 94 |
| 95 // ----------------------------------------------------------------------------- |
| 96 // Static helper functions. |
| 97 |
| 98 static MemOperand ContextOperand(Register context, int index) { |
| 99 return MemOperand(context, Context::SlotOffset(index)); |
| 100 } |
| 101 |
| 102 |
| 103 static inline MemOperand GlobalObjectOperand() { |
| 104 return ContextOperand(cp, Context::GLOBAL_INDEX); |
| 105 } |
| 106 |
| 107 |
| 108 // Generate a MemOperand for loading a field from an object. |
| 109 static inline MemOperand FieldMemOperand(Register object, int offset) { |
| 110 return MemOperand(object, offset - kHeapObjectTag); |
| 111 } |
| 112 |
| 113 |
| 114 // Generate a MemOperand for storing arguments 5..N on the stack |
| 115 // when calling CallCFunction(). |
| 116 static inline MemOperand CFunctionArgumentOperand(int index) { |
| 117 ASSERT(index > kCArgSlotCount); |
| 118 // Argument 5 takes the slot just past the four Arg-slots. |
| 119 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; |
| 120 return MemOperand(sp, offset); |
| 121 } |
| 122 |
| 123 |
| 93 // MacroAssembler implements a collection of frequently used macros. | 124 // MacroAssembler implements a collection of frequently used macros. |
| 94 class MacroAssembler: public Assembler { | 125 class MacroAssembler: public Assembler { |
| 95 public: | 126 public: |
| 96 // The isolate parameter can be NULL if the macro assembler should | 127 // The isolate parameter can be NULL if the macro assembler should |
| 97 // not use isolate-dependent functionality. In this case, it's the | 128 // not use isolate-dependent functionality. In this case, it's the |
| 98 // responsibility of the caller to never invoke such function on the | 129 // responsibility of the caller to never invoke such function on the |
| 99 // macro assembler. | 130 // macro assembler. |
| 100 MacroAssembler(Isolate* isolate, void* buffer, int size); | 131 MacroAssembler(Isolate* isolate, void* buffer, int size); |
| 101 | 132 |
| 102 // Arguments macros. | 133 // Arguments macros. |
| (...skipping 28 matching lines...) Expand all Loading... |
| 131 | 162 |
| 132 | 163 |
| 133 // Jump, Call, and Ret pseudo instructions implementing inter-working. | 164 // Jump, Call, and Ret pseudo instructions implementing inter-working. |
| 134 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \ | 165 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \ |
| 135 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT | 166 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT |
| 136 | 167 |
| 137 void Jump(Register target, COND_ARGS); | 168 void Jump(Register target, COND_ARGS); |
| 138 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); | 169 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); |
| 139 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); | 170 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); |
| 140 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS); | 171 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS); |
| 141 int CallSize(Register target, COND_ARGS); | 172 static int CallSize(Register target, COND_ARGS); |
| 142 void Call(Register target, COND_ARGS); | 173 void Call(Register target, COND_ARGS); |
| 143 int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS); | 174 static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS); |
| 144 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); | 175 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); |
| 145 int CallSize(Handle<Code> code, | 176 static int CallSize(Handle<Code> code, |
| 146 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, | 177 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, |
| 147 unsigned ast_id = kNoASTId, | 178 unsigned ast_id = kNoASTId, |
| 148 COND_ARGS); | 179 COND_ARGS); |
| 149 void Call(Handle<Code> code, | 180 void Call(Handle<Code> code, |
| 150 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, | 181 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, |
| 151 unsigned ast_id = kNoASTId, | 182 unsigned ast_id = kNoASTId, |
| 152 COND_ARGS); | 183 COND_ARGS); |
| 153 void Ret(COND_ARGS); | 184 void Ret(COND_ARGS); |
| 154 inline void Ret(BranchDelaySlot bd) { | 185 inline void Ret(BranchDelaySlot bd, Condition cond = al, |
| 155 Ret(al, zero_reg, Operand(zero_reg), bd); | 186 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) { |
| 187 Ret(cond, rs, rt, bd); |
| 156 } | 188 } |
| 157 | 189 |
| 158 #undef COND_ARGS | 190 #undef COND_ARGS |
| 159 | 191 |
| 160 // Emit code to discard a non-negative number of pointer-sized elements | 192 // Emit code to discard a non-negative number of pointer-sized elements |
| 161 // from the stack, clobbering only the sp register. | 193 // from the stack, clobbering only the sp register. |
| 162 void Drop(int count, | 194 void Drop(int count, |
| 163 Condition cond = cc_always, | 195 Condition cond = cc_always, |
| 164 Register reg = no_reg, | 196 Register reg = no_reg, |
| 165 const Operand& op = Operand(no_reg)); | 197 const Operand& op = Operand(no_reg)); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 190 inline void Move(Register dst_low, Register dst_high, FPURegister src) { | 222 inline void Move(Register dst_low, Register dst_high, FPURegister src) { |
| 191 mfc1(dst_low, src); | 223 mfc1(dst_low, src); |
| 192 mfc1(dst_high, FPURegister::from_code(src.code() + 1)); | 224 mfc1(dst_high, FPURegister::from_code(src.code() + 1)); |
| 193 } | 225 } |
| 194 | 226 |
| 195 inline void Move(FPURegister dst, Register src_low, Register src_high) { | 227 inline void Move(FPURegister dst, Register src_low, Register src_high) { |
| 196 mtc1(src_low, dst); | 228 mtc1(src_low, dst); |
| 197 mtc1(src_high, FPURegister::from_code(dst.code() + 1)); | 229 mtc1(src_high, FPURegister::from_code(dst.code() + 1)); |
| 198 } | 230 } |
| 199 | 231 |
| 232 void Move(FPURegister dst, double imm); |
| 233 |
| 200 // Jump unconditionally to given label. | 234 // Jump unconditionally to given label. |
| 201 // We NEED a nop in the branch delay slot, as it used by v8, for example in | 235 // We NEED a nop in the branch delay slot, as it used by v8, for example in |
| 202 // CodeGenerator::ProcessDeferred(). | 236 // CodeGenerator::ProcessDeferred(). |
| 203 // Currently the branch delay slot is filled by the MacroAssembler. | 237 // Currently the branch delay slot is filled by the MacroAssembler. |
| 204 // Use rather b(Label) for code generation. | 238 // Use rather b(Label) for code generation. |
| 205 void jmp(Label* L) { | 239 void jmp(Label* L) { |
| 206 Branch(L); | 240 Branch(L); |
| 207 } | 241 } |
| 208 | 242 |
| 209 // Load an object from the root table. | 243 // Load an object from the root table. |
| (...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 510 } | 544 } |
| 511 | 545 |
| 512 // Pop two registers. Pops rightmost register first (from lower address). | 546 // Pop two registers. Pops rightmost register first (from lower address). |
| 513 void Pop(Register src1, Register src2) { | 547 void Pop(Register src1, Register src2) { |
| 514 ASSERT(!src1.is(src2)); | 548 ASSERT(!src1.is(src2)); |
| 515 lw(src2, MemOperand(sp, 0 * kPointerSize)); | 549 lw(src2, MemOperand(sp, 0 * kPointerSize)); |
| 516 lw(src1, MemOperand(sp, 1 * kPointerSize)); | 550 lw(src1, MemOperand(sp, 1 * kPointerSize)); |
| 517 Addu(sp, sp, 2 * kPointerSize); | 551 Addu(sp, sp, 2 * kPointerSize); |
| 518 } | 552 } |
| 519 | 553 |
| 554 // Pop three registers. Pops rightmost register first (from lower address). |
| 555 void Pop(Register src1, Register src2, Register src3) { |
| 556 lw(src3, MemOperand(sp, 0 * kPointerSize)); |
| 557 lw(src2, MemOperand(sp, 1 * kPointerSize)); |
| 558 lw(src1, MemOperand(sp, 2 * kPointerSize)); |
| 559 Addu(sp, sp, 3 * kPointerSize); |
| 560 } |
| 561 |
| 520 void Pop(uint32_t count = 1) { | 562 void Pop(uint32_t count = 1) { |
| 521 Addu(sp, sp, Operand(count * kPointerSize)); | 563 Addu(sp, sp, Operand(count * kPointerSize)); |
| 522 } | 564 } |
| 523 | 565 |
| 524 // Push and pop the registers that can hold pointers, as defined by the | 566 // Push and pop the registers that can hold pointers, as defined by the |
| 525 // RegList constant kSafepointSavedRegisters. | 567 // RegList constant kSafepointSavedRegisters. |
| 526 void PushSafepointRegisters(); | 568 void PushSafepointRegisters(); |
| 527 void PopSafepointRegisters(); | 569 void PopSafepointRegisters(); |
| 528 void PushSafepointRegistersAndDoubles(); | 570 void PushSafepointRegistersAndDoubles(); |
| 529 void PopSafepointRegistersAndDoubles(); | 571 void PopSafepointRegistersAndDoubles(); |
| 530 // Store value in register src in the safepoint stack slot for | 572 // Store value in register src in the safepoint stack slot for |
| 531 // register dst. | 573 // register dst. |
| 532 void StoreToSafepointRegisterSlot(Register src, Register dst); | 574 void StoreToSafepointRegisterSlot(Register src, Register dst); |
| 533 void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); | 575 void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); |
| 534 // Load the value of the src register from its safepoint stack slot | 576 // Load the value of the src register from its safepoint stack slot |
| 535 // into register dst. | 577 // into register dst. |
| 536 void LoadFromSafepointRegisterSlot(Register dst, Register src); | 578 void LoadFromSafepointRegisterSlot(Register dst, Register src); |
| 537 | 579 |
| 538 // MIPS32 R2 instruction macro. | 580 // MIPS32 R2 instruction macro. |
| 539 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); | 581 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); |
| 540 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); | 582 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); |
| 541 | 583 |
| 584 // --------------------------------------------------------------------------- |
| 585 // FPU macros. These do not handle special cases like NaN or +- inf. |
| 586 |
| 542 // Convert unsigned word to double. | 587 // Convert unsigned word to double. |
| 543 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch); | 588 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch); |
| 544 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); | 589 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); |
| 545 | 590 |
| 546 // Convert double to unsigned word. | 591 // Convert double to unsigned word. |
| 547 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); | 592 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); |
| 548 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch); | 593 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch); |
| 549 | 594 |
| 595 // Wrapper function for the different cmp/branch types. |
| 596 void BranchF(Label* target, |
| 597 Label* nan, |
| 598 Condition cc, |
| 599 FPURegister cmp1, |
| 600 FPURegister cmp2, |
| 601 BranchDelaySlot bd = PROTECT); |
| 602 |
| 603 // Alternate (inline) version for better readability with USE_DELAY_SLOT. |
| 604 inline void BranchF(BranchDelaySlot bd, |
| 605 Label* target, |
| 606 Label* nan, |
| 607 Condition cc, |
| 608 FPURegister cmp1, |
| 609 FPURegister cmp2) { |
| 610 BranchF(target, nan, cc, cmp1, cmp2, bd); |
| 611 }; |
| 612 |
| 550 // Convert the HeapNumber pointed to by source to a 32bits signed integer | 613 // Convert the HeapNumber pointed to by source to a 32bits signed integer |
| 551 // dest. If the HeapNumber does not fit into a 32bits signed integer branch | 614 // dest. If the HeapNumber does not fit into a 32bits signed integer branch |
| 552 // to not_int32 label. If FPU is available double_scratch is used but not | 615 // to not_int32 label. If FPU is available double_scratch is used but not |
| 553 // scratch2. | 616 // scratch2. |
| 554 void ConvertToInt32(Register source, | 617 void ConvertToInt32(Register source, |
| 555 Register dest, | 618 Register dest, |
| 556 Register scratch, | 619 Register scratch, |
| 557 Register scratch2, | 620 Register scratch2, |
| 558 FPURegister double_scratch, | 621 FPURegister double_scratch, |
| 559 Label *not_int32); | 622 Label *not_int32); |
| 560 | 623 |
| 624 // Truncates a double using a specific rounding mode. |
| 625 // The except_flag will contain any exceptions caused by the instruction. |
| 626 // If check_inexact is kDontCheckForInexactConversion, then the inexacat |
| 627 // exception is masked. |
| 628 void EmitFPUTruncate(FPURoundingMode rounding_mode, |
| 629 FPURegister result, |
| 630 DoubleRegister double_input, |
| 631 Register scratch1, |
| 632 Register except_flag, |
| 633 CheckForInexactConversion check_inexact |
| 634 = kDontCheckForInexactConversion); |
| 635 |
| 561 // Helper for EmitECMATruncate. | 636 // Helper for EmitECMATruncate. |
| 562 // This will truncate a floating-point value outside of the singed 32bit | 637 // This will truncate a floating-point value outside of the singed 32bit |
| 563 // integer range to a 32bit signed integer. | 638 // integer range to a 32bit signed integer. |
| 564 // Expects the double value loaded in input_high and input_low. | 639 // Expects the double value loaded in input_high and input_low. |
| 565 // Exits with the answer in 'result'. | 640 // Exits with the answer in 'result'. |
| 566 // Note that this code does not work for values in the 32bit range! | 641 // Note that this code does not work for values in the 32bit range! |
| 567 void EmitOutOfInt32RangeTruncate(Register result, | 642 void EmitOutOfInt32RangeTruncate(Register result, |
| 568 Register input_high, | 643 Register input_high, |
| 569 Register input_low, | 644 Register input_low, |
| 570 Register scratch); | 645 Register scratch); |
| 571 | 646 |
| 572 // Performs a truncating conversion of a floating point number as used by | 647 // Performs a truncating conversion of a floating point number as used by |
| 573 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. | 648 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. |
| 574 // Exits with 'result' holding the answer and all other registers clobbered. | 649 // Exits with 'result' holding the answer and all other registers clobbered. |
| 575 void EmitECMATruncate(Register result, | 650 void EmitECMATruncate(Register result, |
| 576 FPURegister double_input, | 651 FPURegister double_input, |
| 577 FPURegister single_scratch, | 652 FPURegister single_scratch, |
| 578 Register scratch, | 653 Register scratch, |
| 579 Register scratch2, | 654 Register scratch2, |
| 580 Register scratch3); | 655 Register scratch3); |
| 581 | 656 |
| 657 |
| 582 // ------------------------------------------------------------------------- | 658 // ------------------------------------------------------------------------- |
| 583 // Activation frames. | 659 // Activation frames. |
| 584 | 660 |
| 585 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } | 661 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } |
| 586 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } | 662 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } |
| 587 | 663 |
| 588 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } | 664 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } |
| 589 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } | 665 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } |
| 590 | 666 |
| 591 // Enter exit frame. | 667 // Enter exit frame. |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 747 void DispatchMap(Register obj, | 823 void DispatchMap(Register obj, |
| 748 Register scratch, | 824 Register scratch, |
| 749 Handle<Map> map, | 825 Handle<Map> map, |
| 750 Handle<Code> success, | 826 Handle<Code> success, |
| 751 SmiCheckType smi_check_type); | 827 SmiCheckType smi_check_type); |
| 752 | 828 |
| 753 // Generates code for reporting that an illegal operation has | 829 // Generates code for reporting that an illegal operation has |
| 754 // occurred. | 830 // occurred. |
| 755 void IllegalOperation(int num_arguments); | 831 void IllegalOperation(int num_arguments); |
| 756 | 832 |
| 833 |
| 834 // Load and check the instance type of an object for being a string. |
| 835 // Loads the type into the second argument register. |
| 836 // Returns a condition that will be enabled if the object was a string. |
| 837 Condition IsObjectStringType(Register obj, |
| 838 Register type, |
| 839 Register result) { |
| 840 lw(type, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 841 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); |
| 842 And(type, type, Operand(kIsNotStringMask)); |
| 843 ASSERT_EQ(0, kStringTag); |
| 844 return eq; |
| 845 } |
| 846 |
| 847 |
| 757 // Picks out an array index from the hash field. | 848 // Picks out an array index from the hash field. |
| 758 // Register use: | 849 // Register use: |
| 759 // hash - holds the index's hash. Clobbered. | 850 // hash - holds the index's hash. Clobbered. |
| 760 // index - holds the overwritten index on exit. | 851 // index - holds the overwritten index on exit. |
| 761 void IndexFromHash(Register hash, Register index); | 852 void IndexFromHash(Register hash, Register index); |
| 762 | 853 |
| 763 // Get the number of least significant bits from a register. | 854 // Get the number of least significant bits from a register. |
| 764 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); | 855 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); |
| 765 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); | 856 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); |
| 766 | 857 |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 872 // necessary. Do not perform a GC but instead return a retry after GC | 963 // necessary. Do not perform a GC but instead return a retry after GC |
| 873 // failure. | 964 // failure. |
| 874 MUST_USE_RESULT MaybeObject* TryTailCallExternalReference( | 965 MUST_USE_RESULT MaybeObject* TryTailCallExternalReference( |
| 875 const ExternalReference& ext, int num_arguments, int result_size); | 966 const ExternalReference& ext, int num_arguments, int result_size); |
| 876 | 967 |
| 877 // Convenience function: tail call a runtime routine (jump). | 968 // Convenience function: tail call a runtime routine (jump). |
| 878 void TailCallRuntime(Runtime::FunctionId fid, | 969 void TailCallRuntime(Runtime::FunctionId fid, |
| 879 int num_arguments, | 970 int num_arguments, |
| 880 int result_size); | 971 int result_size); |
| 881 | 972 |
| 973 int CalculateStackPassedWords(int num_reg_arguments, |
| 974 int num_double_arguments); |
| 975 |
| 882 // Before calling a C-function from generated code, align arguments on stack | 976 // Before calling a C-function from generated code, align arguments on stack |
| 883 // and add space for the four mips argument slots. | 977 // and add space for the four mips argument slots. |
| 884 // After aligning the frame, non-register arguments must be stored on the | 978 // After aligning the frame, non-register arguments must be stored on the |
| 885 // stack, after the argument-slots using helper: CFunctionArgumentOperand(). | 979 // stack, after the argument-slots using helper: CFunctionArgumentOperand(). |
| 886 // The argument count assumes all arguments are word sized. | 980 // The argument count assumes all arguments are word sized. |
| 887 // Some compilers/platforms require the stack to be aligned when calling | 981 // Some compilers/platforms require the stack to be aligned when calling |
| 888 // C++ code. | 982 // C++ code. |
| 889 // Needs a scratch register to do some arithmetic. This register will be | 983 // Needs a scratch register to do some arithmetic. This register will be |
| 890 // trashed. | 984 // trashed. |
| 891 void PrepareCallCFunction(int num_arguments, Register scratch); | 985 void PrepareCallCFunction(int num_reg_arguments, |
| 986 int num_double_registers, |
| 987 Register scratch); |
| 988 void PrepareCallCFunction(int num_reg_arguments, |
| 989 Register scratch); |
| 892 | 990 |
| 893 // Arguments 1-4 are placed in registers a0 thru a3 respectively. | 991 // Arguments 1-4 are placed in registers a0 thru a3 respectively. |
| 894 // Arguments 5..n are stored to stack using following: | 992 // Arguments 5..n are stored to stack using following: |
| 895 // sw(t0, CFunctionArgumentOperand(5)); | 993 // sw(t0, CFunctionArgumentOperand(5)); |
| 896 | 994 |
| 897 // Calls a C function and cleans up the space for arguments allocated | 995 // Calls a C function and cleans up the space for arguments allocated |
| 898 // by PrepareCallCFunction. The called function is not allowed to trigger a | 996 // by PrepareCallCFunction. The called function is not allowed to trigger a |
| 899 // garbage collection, since that might move the code and invalidate the | 997 // garbage collection, since that might move the code and invalidate the |
| 900 // return address (unless this is somehow accounted for by the called | 998 // return address (unless this is somehow accounted for by the called |
| 901 // function). | 999 // function). |
| 902 void CallCFunction(ExternalReference function, int num_arguments); | 1000 void CallCFunction(ExternalReference function, int num_arguments); |
| 903 void CallCFunction(Register function, Register scratch, int num_arguments); | 1001 void CallCFunction(Register function, Register scratch, int num_arguments); |
| 1002 void CallCFunction(ExternalReference function, |
| 1003 int num_reg_arguments, |
| 1004 int num_double_arguments); |
| 1005 void CallCFunction(Register function, Register scratch, |
| 1006 int num_reg_arguments, |
| 1007 int num_double_arguments); |
| 904 void GetCFunctionDoubleResult(const DoubleRegister dst); | 1008 void GetCFunctionDoubleResult(const DoubleRegister dst); |
| 905 | 1009 |
| 906 // There are two ways of passing double arguments on MIPS, depending on | 1010 // There are two ways of passing double arguments on MIPS, depending on |
| 907 // whether soft or hard floating point ABI is used. These functions | 1011 // whether soft or hard floating point ABI is used. These functions |
| 908 // abstract parameter passing for the three different ways we call | 1012 // abstract parameter passing for the three different ways we call |
| 909 // C functions from generated code. | 1013 // C functions from generated code. |
| 910 void SetCallCDoubleArguments(DoubleRegister dreg); | 1014 void SetCallCDoubleArguments(DoubleRegister dreg); |
| 911 void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2); | 1015 void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2); |
| 912 void SetCallCDoubleArguments(DoubleRegister dreg, Register reg); | 1016 void SetCallCDoubleArguments(DoubleRegister dreg, Register reg); |
| 913 | 1017 |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 996 // sets flags. | 1100 // sets flags. |
| 997 // This is only used by crankshaft atm so it is unimplemented on MIPS. | 1101 // This is only used by crankshaft atm so it is unimplemented on MIPS. |
| 998 void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) { | 1102 void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) { |
| 999 UNIMPLEMENTED_MIPS(); | 1103 UNIMPLEMENTED_MIPS(); |
| 1000 } | 1104 } |
| 1001 | 1105 |
| 1002 void SmiTag(Register reg) { | 1106 void SmiTag(Register reg) { |
| 1003 Addu(reg, reg, reg); | 1107 Addu(reg, reg, reg); |
| 1004 } | 1108 } |
| 1005 | 1109 |
| 1110 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). |
| 1111 void SmiTagCheckOverflow(Register reg, Register overflow) { |
| 1112 mov(overflow, reg); // Save original value. |
| 1113 addu(reg, reg, reg); |
| 1114 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0. |
| 1115 } |
| 1116 |
| 1006 void SmiTag(Register dst, Register src) { | 1117 void SmiTag(Register dst, Register src) { |
| 1007 Addu(dst, src, src); | 1118 Addu(dst, src, src); |
| 1008 } | 1119 } |
| 1009 | 1120 |
| 1010 void SmiUntag(Register reg) { | 1121 void SmiUntag(Register reg) { |
| 1011 sra(reg, reg, kSmiTagSize); | 1122 sra(reg, reg, kSmiTagSize); |
| 1012 } | 1123 } |
| 1013 | 1124 |
| 1014 void SmiUntag(Register dst, Register src) { | 1125 void SmiUntag(Register dst, Register src) { |
| 1015 sra(dst, src, kSmiTagSize); | 1126 sra(dst, src, kSmiTagSize); |
| 1016 } | 1127 } |
| 1017 | 1128 |
| 1018 // Jump the register contains a smi. | 1129 // Jump the register contains a smi. |
| 1019 inline void JumpIfSmi(Register value, Label* smi_label, | 1130 inline void JumpIfSmi(Register value, Label* smi_label, |
| 1020 Register scratch = at) { | 1131 Register scratch = at, |
| 1132 BranchDelaySlot bd = PROTECT) { |
| 1021 ASSERT_EQ(0, kSmiTag); | 1133 ASSERT_EQ(0, kSmiTag); |
| 1022 andi(scratch, value, kSmiTagMask); | 1134 andi(scratch, value, kSmiTagMask); |
| 1023 Branch(smi_label, eq, scratch, Operand(zero_reg)); | 1135 Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); |
| 1024 } | 1136 } |
| 1025 | 1137 |
| 1026 // Jump if the register contains a non-smi. | 1138 // Jump if the register contains a non-smi. |
| 1027 inline void JumpIfNotSmi(Register value, Label* not_smi_label, | 1139 inline void JumpIfNotSmi(Register value, Label* not_smi_label, |
| 1028 Register scratch = at) { | 1140 Register scratch = at) { |
| 1029 ASSERT_EQ(0, kSmiTag); | 1141 ASSERT_EQ(0, kSmiTag); |
| 1030 andi(scratch, value, kSmiTagMask); | 1142 andi(scratch, value, kSmiTagMask); |
| 1031 Branch(not_smi_label, ne, scratch, Operand(zero_reg)); | 1143 Branch(not_smi_label, ne, scratch, Operand(zero_reg)); |
| 1032 } | 1144 } |
| 1033 | 1145 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1083 Label* failure); | 1195 Label* failure); |
| 1084 | 1196 |
| 1085 // Test that both first and second are sequential ASCII strings. | 1197 // Test that both first and second are sequential ASCII strings. |
| 1086 // Check that they are non-smis. | 1198 // Check that they are non-smis. |
| 1087 void JumpIfNotBothSequentialAsciiStrings(Register first, | 1199 void JumpIfNotBothSequentialAsciiStrings(Register first, |
| 1088 Register second, | 1200 Register second, |
| 1089 Register scratch1, | 1201 Register scratch1, |
| 1090 Register scratch2, | 1202 Register scratch2, |
| 1091 Label* failure); | 1203 Label* failure); |
| 1092 | 1204 |
| 1205 void ClampUint8(Register output_reg, Register input_reg); |
| 1206 |
| 1207 void ClampDoubleToUint8(Register result_reg, |
| 1208 DoubleRegister input_reg, |
| 1209 DoubleRegister temp_double_reg); |
| 1210 |
| 1211 |
| 1093 void LoadInstanceDescriptors(Register map, Register descriptors); | 1212 void LoadInstanceDescriptors(Register map, Register descriptors); |
| 1094 | 1213 |
| 1095 private: | 1214 private: |
| 1096 void CallCFunctionHelper(Register function, | 1215 void CallCFunctionHelper(Register function, |
| 1097 ExternalReference function_reference, | 1216 ExternalReference function_reference, |
| 1098 Register scratch, | 1217 Register scratch, |
| 1099 int num_arguments); | 1218 int num_reg_arguments, |
| 1219 int num_double_arguments); |
| 1100 | 1220 |
| 1101 void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT); | 1221 void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT); |
| 1102 void BranchShort(int16_t offset, Condition cond, Register rs, | 1222 void BranchShort(int16_t offset, Condition cond, Register rs, |
| 1103 const Operand& rt, | 1223 const Operand& rt, |
| 1104 BranchDelaySlot bdslot = PROTECT); | 1224 BranchDelaySlot bdslot = PROTECT); |
| 1105 void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT); | 1225 void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT); |
| 1106 void BranchShort(Label* L, Condition cond, Register rs, | 1226 void BranchShort(Label* L, Condition cond, Register rs, |
| 1107 const Operand& rt, | 1227 const Operand& rt, |
| 1108 BranchDelaySlot bdslot = PROTECT); | 1228 BranchDelaySlot bdslot = PROTECT); |
| 1109 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT); | 1229 void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT); |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1140 Register length, | 1260 Register length, |
| 1141 Heap::RootListIndex map_index, | 1261 Heap::RootListIndex map_index, |
| 1142 Register scratch1, | 1262 Register scratch1, |
| 1143 Register scratch2); | 1263 Register scratch2); |
| 1144 | 1264 |
| 1145 // Compute memory operands for safepoint stack slots. | 1265 // Compute memory operands for safepoint stack slots. |
| 1146 static int SafepointRegisterStackIndex(int reg_code); | 1266 static int SafepointRegisterStackIndex(int reg_code); |
| 1147 MemOperand SafepointRegisterSlot(Register reg); | 1267 MemOperand SafepointRegisterSlot(Register reg); |
| 1148 MemOperand SafepointRegistersAndDoublesSlot(Register reg); | 1268 MemOperand SafepointRegistersAndDoublesSlot(Register reg); |
| 1149 | 1269 |
| 1150 bool UseAbsoluteCodePointers(); | |
| 1151 | |
| 1152 bool generating_stub_; | 1270 bool generating_stub_; |
| 1153 bool allow_stub_calls_; | 1271 bool allow_stub_calls_; |
| 1154 // This handle will be patched with the code object on installation. | 1272 // This handle will be patched with the code object on installation. |
| 1155 Handle<Object> code_object_; | 1273 Handle<Object> code_object_; |
| 1156 | 1274 |
| 1157 // Needs access to SafepointRegisterStackIndex for optimized frame | 1275 // Needs access to SafepointRegisterStackIndex for optimized frame |
| 1158 // traversal. | 1276 // traversal. |
| 1159 friend class OptimizedFrame; | 1277 friend class OptimizedFrame; |
| 1160 }; | 1278 }; |
| 1161 | 1279 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 1184 void ChangeBranchCondition(Condition cond); | 1302 void ChangeBranchCondition(Condition cond); |
| 1185 | 1303 |
| 1186 private: | 1304 private: |
| 1187 byte* address_; // The address of the code being patched. | 1305 byte* address_; // The address of the code being patched. |
| 1188 int instructions_; // Number of instructions of the expected patch size. | 1306 int instructions_; // Number of instructions of the expected patch size. |
| 1189 int size_; // Number of bytes of the expected patch size. | 1307 int size_; // Number of bytes of the expected patch size. |
| 1190 MacroAssembler masm_; // Macro assembler used to generate the code. | 1308 MacroAssembler masm_; // Macro assembler used to generate the code. |
| 1191 }; | 1309 }; |
| 1192 | 1310 |
| 1193 | 1311 |
| 1194 // ----------------------------------------------------------------------------- | |
| 1195 // Static helper functions. | |
| 1196 | |
| 1197 static MemOperand ContextOperand(Register context, int index) { | |
| 1198 return MemOperand(context, Context::SlotOffset(index)); | |
| 1199 } | |
| 1200 | |
| 1201 | |
| 1202 static inline MemOperand GlobalObjectOperand() { | |
| 1203 return ContextOperand(cp, Context::GLOBAL_INDEX); | |
| 1204 } | |
| 1205 | |
| 1206 | |
| 1207 // Generate a MemOperand for loading a field from an object. | |
| 1208 static inline MemOperand FieldMemOperand(Register object, int offset) { | |
| 1209 return MemOperand(object, offset - kHeapObjectTag); | |
| 1210 } | |
| 1211 | |
| 1212 | |
| 1213 // Generate a MemOperand for storing arguments 5..N on the stack | |
| 1214 // when calling CallCFunction(). | |
| 1215 static inline MemOperand CFunctionArgumentOperand(int index) { | |
| 1216 ASSERT(index > kCArgSlotCount); | |
| 1217 // Argument 5 takes the slot just past the four Arg-slots. | |
| 1218 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; | |
| 1219 return MemOperand(sp, offset); | |
| 1220 } | |
| 1221 | |
| 1222 | 1312 |
| 1223 #ifdef GENERATED_CODE_COVERAGE | 1313 #ifdef GENERATED_CODE_COVERAGE |
| 1224 #define CODE_COVERAGE_STRINGIFY(x) #x | 1314 #define CODE_COVERAGE_STRINGIFY(x) #x |
| 1225 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) | 1315 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) |
| 1226 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) | 1316 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) |
| 1227 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> | 1317 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> |
| 1228 #else | 1318 #else |
| 1229 #define ACCESS_MASM(masm) masm-> | 1319 #define ACCESS_MASM(masm) masm-> |
| 1230 #endif | 1320 #endif |
| 1231 | 1321 |
| 1232 } } // namespace v8::internal | 1322 } } // namespace v8::internal |
| 1233 | 1323 |
| 1234 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ | 1324 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ |
| OLD | NEW |