Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(185)

Side by Side Diff: src/mips/macro-assembler-mips.h

Issue 561072: MIPS port initial commit (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/jump-target-mips.cc ('k') | src/mips/macro-assembler-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
29 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
30
31 #include "assembler.h"
32 #include "mips/assembler-mips.h"
33
34 namespace v8 {
35 namespace internal {
36
37 // Forward declaration.
38 class JumpTarget;
39
40 // Register at is used for instruction generation. So it is not safe to use it
41 // unless we know exactly what we do.
42
43 // Registers aliases
44 const Register cp = s7; // JavaScript context pointer
45 const Register fp = s8_fp; // Alias fp
46
47 enum InvokeJSFlags {
48 CALL_JS,
49 JUMP_JS
50 };
51
52 // MacroAssembler implements a collection of frequently used macros.
53 class MacroAssembler: public Assembler {
54 public:
55 MacroAssembler(void* buffer, int size);
56
57 // Jump, Call, and Ret pseudo instructions implementing inter-working.
58 void Jump(const Operand& target,
59 Condition cond = cc_always,
60 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
61 void Call(const Operand& target,
62 Condition cond = cc_always,
63 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
64 void Jump(Register target,
65 Condition cond = cc_always,
66 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
67 void Jump(byte* target, RelocInfo::Mode rmode,
68 Condition cond = cc_always,
69 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
70 void Jump(Handle<Code> code, RelocInfo::Mode rmode,
71 Condition cond = cc_always,
72 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
73 void Call(Register target,
74 Condition cond = cc_always,
75 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
76 void Call(byte* target, RelocInfo::Mode rmode,
77 Condition cond = cc_always,
78 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
79 void Call(Handle<Code> code, RelocInfo::Mode rmode,
80 Condition cond = cc_always,
81 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
82 void Ret(Condition cond = cc_always,
83 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
84 void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
85 const Operand& rt = Operand(zero_reg), Register scratch = at);
86 void Branch(Condition cond, Label* L, Register rs = zero_reg,
87 const Operand& rt = Operand(zero_reg), Register scratch = at);
88 // conditionnal branch and link
89 void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
90 const Operand& rt = Operand(zero_reg),
91 Register scratch = at);
92 void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
93 const Operand& rt = Operand(zero_reg),
94 Register scratch = at);
95
96 // Emit code to discard a non-negative number of pointer-sized elements
97 // from the stack, clobbering only the sp register.
98 void Drop(int count, Condition cond = cc_always);
99
100 void Call(Label* target);
101
102 // Jump unconditionally to given label.
103 // We NEED a nop in the branch delay slot, as it used by v8, for example in
104 // CodeGenerator::ProcessDeferred().
105 // Use rather b(Label) for code generation.
106 void jmp(Label* L) {
107 Branch(cc_always, L);
108 nop();
109 }
110
111 // Load an object from the root table.
112 void LoadRoot(Register destination,
113 Heap::RootListIndex index);
114 void LoadRoot(Register destination,
115 Heap::RootListIndex index,
116 Condition cond, Register src1, const Operand& src2);
117
118 // Sets the remembered set bit for [address+offset], where address is the
119 // address of the heap object 'object'. The address must be in the first 8K
120 // of an allocated page. The 'scratch' register is used in the
121 // implementation and all 3 registers are clobbered by the operation, as
122 // well as the ip register.
123 void RecordWrite(Register object, Register offset, Register scratch);
124
125
126 // ---------------------------------------------------------------------------
127 // Instruction macros
128
129 #define DEFINE_INSTRUCTION(instr) \
130 void instr(Register rd, Register rs, const Operand& rt); \
131 void instr(Register rd, Register rs, Register rt) { \
132 instr(rd, rs, Operand(rt)); \
133 } \
134 void instr(Register rs, Register rt, int32_t j) { \
135 instr(rs, rt, Operand(j)); \
136 }
137
138 #define DEFINE_INSTRUCTION2(instr) \
139 void instr(Register rs, const Operand& rt); \
140 void instr(Register rs, Register rt) { \
141 instr(rs, Operand(rt)); \
142 } \
143 void instr(Register rs, int32_t j) { \
144 instr(rs, Operand(j)); \
145 }
146
147 DEFINE_INSTRUCTION(Add);
148 DEFINE_INSTRUCTION(Addu);
149 DEFINE_INSTRUCTION(Mul);
150 DEFINE_INSTRUCTION2(Mult);
151 DEFINE_INSTRUCTION2(Multu);
152 DEFINE_INSTRUCTION2(Div);
153 DEFINE_INSTRUCTION2(Divu);
154
155 DEFINE_INSTRUCTION(And);
156 DEFINE_INSTRUCTION(Or);
157 DEFINE_INSTRUCTION(Xor);
158 DEFINE_INSTRUCTION(Nor);
159
160 DEFINE_INSTRUCTION(Slt);
161 DEFINE_INSTRUCTION(Sltu);
162
163 #undef DEFINE_INSTRUCTION
164 #undef DEFINE_INSTRUCTION2
165
166
167 //------------Pseudo-instructions-------------
168
169 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
170 // Move the logical ones complement of source to dest.
171 void movn(Register rd, Register rt);
172
173
174 // load int32 in the rd register
175 void li(Register rd, Operand j, bool gen2instr = false);
176 inline void li(Register rd, int32_t j, bool gen2instr = false) {
177 li(rd, Operand(j), gen2instr);
178 }
179
180 // Exception-generating instructions and debugging support
181 void stop(const char* msg);
182
183
184 // Push multiple registers on the stack.
185 // With MultiPush, lower registers are pushed first on the stack.
186 // For example if you push t0, t1, s0, and ra you get:
187 // | |
188 // |-----------------------|
189 // | t0 | +
190 // |-----------------------| |
191 // | t1 | |
192 // |-----------------------| |
193 // | s0 | v
194 // |-----------------------| -
195 // | ra |
196 // |-----------------------|
197 // | |
198 void MultiPush(RegList regs);
199 void MultiPushReversed(RegList regs);
200 void Push(Register src) {
201 Addu(sp, sp, Operand(-kPointerSize));
202 sw(src, MemOperand(sp, 0));
203 }
204 inline void push(Register src) { Push(src); }
205
206 void Push(Register src, Condition cond, Register tst1, Register tst2) {
207 // Since we don't have conditionnal execution we use a Branch.
208 Branch(cond, 3, tst1, Operand(tst2));
209 nop();
210 Addu(sp, sp, Operand(-kPointerSize));
211 sw(src, MemOperand(sp, 0));
212 }
213
214 // Pops multiple values from the stack and load them in the
215 // registers specified in regs. Pop order is the opposite as in MultiPush.
216 void MultiPop(RegList regs);
217 void MultiPopReversed(RegList regs);
218 void Pop(Register dst) {
219 lw(dst, MemOperand(sp, 0));
220 Addu(sp, sp, Operand(kPointerSize));
221 }
222 void Pop() {
223 Add(sp, sp, Operand(kPointerSize));
224 }
225
226
227 // ---------------------------------------------------------------------------
228 // Exception handling
229
230 // Push a new try handler and link into try handler chain.
231 // The return address must be passed in register lr.
232 // On exit, r0 contains TOS (code slot).
233 void PushTryHandler(CodeLocation try_location, HandlerType type);
234
235 // Unlink the stack handler on top of the stack from the try handler chain.
236 // Must preserve the result register.
237 void PopTryHandler();
238
239
240 // ---------------------------------------------------------------------------
241 // Support functions.
242
243 inline void BranchOnSmi(Register value, Label* smi_label,
244 Register scratch = at) {
245 ASSERT_EQ(0, kSmiTag);
246 andi(scratch, value, kSmiTagMask);
247 Branch(eq, smi_label, scratch, Operand(zero_reg));
248 }
249
250
251 inline void BranchOnNotSmi(Register value, Label* not_smi_label,
252 Register scratch = at) {
253 ASSERT_EQ(0, kSmiTag);
254 andi(scratch, value, kSmiTagMask);
255 Branch(ne, not_smi_label, scratch, Operand(zero_reg));
256 }
257
258
259 // ---------------------------------------------------------------------------
260 // Runtime calls
261
262 // Call a code stub.
263 void CallStub(CodeStub* stub, Condition cond = cc_always,
264 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
265 void CallJSExitStub(CodeStub* stub);
266
267 // Return from a code stub after popping its arguments.
268 void StubReturn(int argc);
269
270 // Call a runtime routine.
271 // Eventually this should be used for all C calls.
272 void CallRuntime(Runtime::Function* f, int num_arguments);
273
274 // Convenience function: Same as above, but takes the fid instead.
275 void CallRuntime(Runtime::FunctionId fid, int num_arguments);
276
277 // Tail call of a runtime routine (jump).
278 // Like JumpToRuntime, but also takes care of passing the number
279 // of parameters.
280 void TailCallRuntime(const ExternalReference& ext,
281 int num_arguments,
282 int result_size);
283
284 // Jump to the builtin routine.
285 void JumpToRuntime(const ExternalReference& builtin);
286
287 // Invoke specified builtin JavaScript function. Adds an entry to
288 // the unresolved list if the name does not resolve.
289 void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
290
291 // Store the code object for the given builtin in the target register and
292 // setup the function in r1.
293 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
294
295 struct Unresolved {
296 int pc;
297 uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
298 const char* name;
299 };
300 List<Unresolved>* unresolved() { return &unresolved_; }
301
302 Handle<Object> CodeObject() { return code_object_; }
303
304
305 // ---------------------------------------------------------------------------
306 // Stack limit support
307
308 void StackLimitCheck(Label* on_stack_limit_hit);
309
310
311 // ---------------------------------------------------------------------------
312 // StatsCounter support
313
314 void SetCounter(StatsCounter* counter, int value,
315 Register scratch1, Register scratch2);
316 void IncrementCounter(StatsCounter* counter, int value,
317 Register scratch1, Register scratch2);
318 void DecrementCounter(StatsCounter* counter, int value,
319 Register scratch1, Register scratch2);
320
321
322 // ---------------------------------------------------------------------------
323 // Debugging
324
325 // Calls Abort(msg) if the condition cc is not satisfied.
326 // Use --debug_code to enable.
327 void Assert(Condition cc, const char* msg, Register rs, Operand rt);
328
329 // Like Assert(), but always enabled.
330 void Check(Condition cc, const char* msg, Register rs, Operand rt);
331
332 // Print a message to stdout and abort execution.
333 void Abort(const char* msg);
334
335 // Verify restrictions about code generated in stubs.
336 void set_generating_stub(bool value) { generating_stub_ = value; }
337 bool generating_stub() { return generating_stub_; }
338 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
339 bool allow_stub_calls() { return allow_stub_calls_; }
340
341 private:
342 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
343 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
344 void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
345 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
346
347 // Get the code for the given builtin. Returns if able to resolve
348 // the function in the 'resolved' flag.
349 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
350
351 List<Unresolved> unresolved_;
352 bool generating_stub_;
353 bool allow_stub_calls_;
354 // This handle will be patched with the code object on installation.
355 Handle<Object> code_object_;
356 };
357
358
359 // -----------------------------------------------------------------------------
360 // Static helper functions.
361
362 // Generate a MemOperand for loading a field from an object.
363 static inline MemOperand FieldMemOperand(Register object, int offset) {
364 return MemOperand(object, offset - kHeapObjectTag);
365 }
366
367
368
369 #ifdef GENERATED_CODE_COVERAGE
370 #define CODE_COVERAGE_STRINGIFY(x) #x
371 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
372 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
373 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
374 #else
375 #define ACCESS_MASM(masm) masm->
376 #endif
377
378 } } // namespace v8::internal
379
380 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
381
OLDNEW
« no previous file with comments | « src/mips/jump-target-mips.cc ('k') | src/mips/macro-assembler-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698