OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
74 // Don't load NaNs or infinities, branch to the non number case instead. | 74 // Don't load NaNs or infinities, branch to the non number case instead. |
75 AVOID_NANS_AND_INFINITIES = 1 << 1 | 75 AVOID_NANS_AND_INFINITIES = 1 << 1 |
76 }; | 76 }; |
77 | 77 |
78 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. | 78 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. |
79 enum BranchDelaySlot { | 79 enum BranchDelaySlot { |
80 USE_DELAY_SLOT, | 80 USE_DELAY_SLOT, |
81 PROTECT | 81 PROTECT |
82 }; | 82 }; |
83 | 83 |
| 84 // Flags used for the li macro-assembler function. |
| 85 enum LiFlags { |
| 86 // If the constant value can be represented in just 16 bits, then |
| 87 // optimize the li to use a single instruction, rather than lui/ori pair. |
| 88 OPTIMIZE_SIZE = 0, |
| 89 // Always use 2 instructions (lui/ori pair), even if the constant could |
| 90 // be loaded with just one, so that this value is patchable later. |
| 91 CONSTANT_SIZE = 1 |
| 92 }; |
| 93 |
84 | 94 |
85 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; | 95 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; |
86 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; | 96 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; |
87 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; | 97 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; |
88 | 98 |
89 bool AreAliased(Register r1, Register r2, Register r3, Register r4); | 99 bool AreAliased(Register r1, Register r2, Register r3, Register r4); |
90 | 100 |
91 | 101 |
92 // ----------------------------------------------------------------------------- | 102 // ----------------------------------------------------------------------------- |
93 // Static helper functions. | 103 // Static helper functions. |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
230 | 240 |
231 // Jump unconditionally to given label. | 241 // Jump unconditionally to given label. |
232 // We NEED a nop in the branch delay slot, as it used by v8, for example in | 242 // We NEED a nop in the branch delay slot, as it used by v8, for example in |
233 // CodeGenerator::ProcessDeferred(). | 243 // CodeGenerator::ProcessDeferred(). |
234 // Currently the branch delay slot is filled by the MacroAssembler. | 244 // Currently the branch delay slot is filled by the MacroAssembler. |
235 // Use rather b(Label) for code generation. | 245 // Use rather b(Label) for code generation. |
236 void jmp(Label* L) { | 246 void jmp(Label* L) { |
237 Branch(L); | 247 Branch(L); |
238 } | 248 } |
239 | 249 |
240 | 250 int FindRootIndex(Object* heap_object); |
241 // Load an object from the root table. | 251 // Load an object from the root table. |
242 void LoadRoot(Register destination, | 252 void LoadRoot(Register destination, |
243 Heap::RootListIndex index); | 253 Heap::RootListIndex index); |
244 void LoadRoot(Register destination, | 254 void LoadRoot(Register destination, |
245 Heap::RootListIndex index, | 255 Heap::RootListIndex index, |
246 Condition cond, Register src1, const Operand& src2); | 256 Condition cond, Register src1, const Operand& src2); |
247 | 257 |
248 // Store an object to the root table. | 258 // Store an object to the root table. |
249 void StoreRoot(Register source, | 259 void StoreRoot(Register source, |
250 Heap::RootListIndex index); | 260 Heap::RootListIndex index); |
(...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
561 #undef DEFINE_INSTRUCTION | 571 #undef DEFINE_INSTRUCTION |
562 #undef DEFINE_INSTRUCTION2 | 572 #undef DEFINE_INSTRUCTION2 |
563 | 573 |
564 | 574 |
565 // --------------------------------------------------------------------------- | 575 // --------------------------------------------------------------------------- |
566 // Pseudo-instructions. | 576 // Pseudo-instructions. |
567 | 577 |
568 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } | 578 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } |
569 | 579 |
570 // Load int32 in the rd register. | 580 // Load int32 in the rd register. |
571 void li(Register rd, Operand j, bool gen2instr = false); | 581 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); |
572 inline void li(Register rd, int32_t j, bool gen2instr = false) { | 582 inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { |
573 li(rd, Operand(j), gen2instr); | 583 li(rd, Operand(j), mode); |
574 } | 584 } |
575 inline void li(Register dst, Handle<Object> value, bool gen2instr = false) { | 585 inline void li(Register dst, Handle<Object> value, |
576 li(dst, Operand(value), gen2instr); | 586 LiFlags mode = OPTIMIZE_SIZE) { |
| 587 li(dst, Operand(value), mode); |
577 } | 588 } |
578 | 589 |
579 // Push multiple registers on the stack. | 590 // Push multiple registers on the stack. |
580 // Registers are saved in numerical order, with higher numbered registers | 591 // Registers are saved in numerical order, with higher numbered registers |
581 // saved in higher memory addresses. | 592 // saved in higher memory addresses. |
582 void MultiPush(RegList regs); | 593 void MultiPush(RegList regs); |
583 void MultiPushReversed(RegList regs); | 594 void MultiPushReversed(RegList regs); |
584 | 595 |
585 void MultiPushFPU(RegList regs); | 596 void MultiPushFPU(RegList regs); |
586 void MultiPushReversedFPU(RegList regs); | 597 void MultiPushReversedFPU(RegList regs); |
(...skipping 883 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1470 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) | 1481 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) |
1471 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) | 1482 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) |
1472 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> | 1483 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> |
1473 #else | 1484 #else |
1474 #define ACCESS_MASM(masm) masm-> | 1485 #define ACCESS_MASM(masm) masm-> |
1475 #endif | 1486 #endif |
1476 | 1487 |
1477 } } // namespace v8::internal | 1488 } } // namespace v8::internal |
1478 | 1489 |
1479 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ | 1490 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ |
OLD | NEW |