Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(126)

Side by Side Diff: src/mips/macro-assembler-mips.h

Issue 1320006: Updates and fixes for MIPS support. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 19 matching lines...) Expand all
30 30
31 #include "assembler.h" 31 #include "assembler.h"
32 #include "mips/assembler-mips.h" 32 #include "mips/assembler-mips.h"
33 33
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 // Forward declaration. 37 // Forward declaration.
38 class JumpTarget; 38 class JumpTarget;
39 39
40 // Register at is used for instruction generation. So it is not safe to use it 40 // Register at is used for instruction generation. So it is not always safe to
41 // unless we know exactly what we do. 41 // use it. Instead t8 and t9 registers are used by the MacroAssembler when
42 // necessary.
43 // The programmer should know that the MacroAssembler may clobber these two,
44 // but won't touch other registers except in special cases.
45
46 // Unless we know exactly what we do. Therefore we create another scratch reg.
Søren Thygesen Gjesse 2010/05/25 09:00:56 Remove "Unless we know exactly what we do."?
47 const Register ip = t8; // Alias ip (equivalent to arm ip scratch register).
42 48
43 // Registers aliases 49 // Registers aliases
44 // cp is assumed to be a callee saved register. 50 // cp is assumed to be a callee saved register.
51 const Register roots = s6; // Roots array pointer.
45 const Register cp = s7; // JavaScript context pointer 52 const Register cp = s7; // JavaScript context pointer
46 const Register fp = s8_fp; // Alias fp 53 const Register fp = s8_fp; // Alias fp
47 54
48 enum InvokeJSFlags { 55 enum InvokeJSFlags {
49 CALL_JS, 56 CALL_JS,
50 JUMP_JS 57 JUMP_JS
51 }; 58 };
52 59
53 // MacroAssembler implements a collection of frequently used macros. 60 // MacroAssembler implements a collection of frequently used macros.
54 class MacroAssembler: public Assembler { 61 class MacroAssembler: public Assembler {
55 public: 62 public:
56 MacroAssembler(void* buffer, int size); 63 MacroAssembler(void* buffer, int size);
57 64
58 // Jump, Call, and Ret pseudo instructions implementing inter-working. 65 // Arguments macros
Søren Thygesen Gjesse 2010/05/25 09:00:56 Are you sure that using these macros here are bene
59 void Jump(const Operand& target, 66 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
60 Condition cond = cc_always, 67 #define COND_ARGS cond, r1, r2
61 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 68
62 void Call(const Operand& target, 69 // ** Prototypes
63 Condition cond = cc_always, 70
64 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 71 // * Prototypes for functions with no target (eg Ret()).
65 void Jump(Register target, 72 #define DECLARE_NOTARGET_PROTOTYPE(Name) \
66 Condition cond = cc_always, 73 void Name(bool ProtectBranchDelaySlot = true); \
67 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 74 void Name(COND_TYPED_ARGS, bool ProtectBranchDelaySlot = true); \
68 void Jump(byte* target, RelocInfo::Mode rmode, 75 inline void Name(bool ProtectBranchDelaySlot, COND_TYPED_ARGS) { \
69 Condition cond = cc_always, 76 Name(COND_ARGS, ProtectBranchDelaySlot); \
70 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 77 }
71 void Jump(Handle<Code> code, RelocInfo::Mode rmode, 78
72 Condition cond = cc_always, 79 // * Prototypes for functions with a target.
73 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 80
74 void Call(Register target, 81 // Cases when relocation may be needed.
75 Condition cond = cc_always, 82 #define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
76 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 83 void Name(target_type target, \
77 void Call(byte* target, RelocInfo::Mode rmode, 84 RelocInfo::Mode rmode, \
78 Condition cond = cc_always, 85 bool ProtectBranchDelaySlot = true); \
79 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 86 inline void Name(bool ProtectBranchDelaySlot, \
80 void Call(Handle<Code> code, RelocInfo::Mode rmode, 87 target_type target, \
81 Condition cond = cc_always, 88 RelocInfo::Mode rmode) { \
82 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 89 Name(target, rmode, ProtectBranchDelaySlot); \
83 void Ret(Condition cond = cc_always, 90 } \
84 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 91 void Name(target_type target, \
85 void Branch(Condition cond, int16_t offset, Register rs = zero_reg, 92 RelocInfo::Mode rmode, \
86 const Operand& rt = Operand(zero_reg), Register scratch = at); 93 COND_TYPED_ARGS, \
87 void Branch(Condition cond, Label* L, Register rs = zero_reg, 94 bool ProtectBranchDelaySlot = true); \
88 const Operand& rt = Operand(zero_reg), Register scratch = at); 95 inline void Name(bool ProtectBranchDelaySlot, \
89 // conditionnal branch and link 96 target_type target, \
90 void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg, 97 RelocInfo::Mode rmode, \
91 const Operand& rt = Operand(zero_reg), 98 COND_TYPED_ARGS) { \
92 Register scratch = at); 99 Name(target, rmode, COND_ARGS, ProtectBranchDelaySlot); \
93 void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg, 100 }
94 const Operand& rt = Operand(zero_reg), 101
95 Register scratch = at); 102 // Cases when relocation is not needed.
103 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
104 void Name(target_type target, bool ProtectBranchDelaySlot = true); \
105 inline void Name(bool ProtectBranchDelaySlot, target_type target) { \
106 Name(target, ProtectBranchDelaySlot); \
107 } \
108 void Name(target_type target, \
109 COND_TYPED_ARGS, \
110 bool ProtectBranchDelaySlot = true); \
111 inline void Name(bool ProtectBranchDelaySlot, \
112 target_type target, \
113 COND_TYPED_ARGS) { \
114 Name(target, COND_ARGS, ProtectBranchDelaySlot); \
115 }
116
117 // ** Target prototypes.
118
119 #define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
120 DECLARE_NORELOC_PROTOTYPE(Name, Register) \
121 DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
122 DECLARE_RELOC_PROTOTYPE(Name, byte*) \
123 DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
124
125 #define DECLARE_BRANCH_PROTOTYPES(Name) \
126 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
127 DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
128
129
130 DECLARE_JUMP_CALL_PROTOTYPES(Jump)
131 DECLARE_JUMP_CALL_PROTOTYPES(Call)
132
133 DECLARE_BRANCH_PROTOTYPES(Branch)
134 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
135
136 DECLARE_NOTARGET_PROTOTYPE(Ret)
137
138 #undef COND_TYPED_ARGS
139 #undef COND_ARGS
140 #undef DECLARE_NOTARGET_PROTOTYPE
141 #undef DECLARE_NORELOC_PROTOTYPE
142 #undef DECLARE_RELOC_PROTOTYPE
143 #undef DECLARE_JUMP_CALL_PROTOTYPES
144 #undef DECLARE_BRANCH_PROTOTYPES
96 145
97 // Emit code to discard a non-negative number of pointer-sized elements 146 // Emit code to discard a non-negative number of pointer-sized elements
98 // from the stack, clobbering only the sp register. 147 // from the stack, clobbering only the sp register.
99 void Drop(int count, Condition cond = cc_always); 148 void Drop(int count, Condition cond = cc_always);
100 149
101 void Call(Label* target); 150 void Call(Label* target);
102 151
103 // Jump unconditionally to given label. 152 // Jump unconditionally to given label.
104 // We NEED a nop in the branch delay slot, as it used by v8, for example in 153 // We NEED a nop in the branch delay slot, as it used by v8, for example in
105 // CodeGenerator::ProcessDeferred(). 154 // CodeGenerator::ProcessDeferred().
106 // Currently the branch delay slot is filled by the MacroAssembler. 155 // Currently the branch delay slot is filled by the MacroAssembler.
107 // Use rather b(Label) for code generation. 156 // Use rather b(Label) for code generation.
108 void jmp(Label* L) { 157 void jmp(Label* L) {
109 Branch(cc_always, L); 158 Branch(L);
110 } 159 }
111 160
112 // Load an object from the root table. 161 // Load an object from the root table.
113 void LoadRoot(Register destination, 162 void LoadRoot(Register destination,
114 Heap::RootListIndex index); 163 Heap::RootListIndex index);
115 void LoadRoot(Register destination, 164 void LoadRoot(Register destination,
116 Heap::RootListIndex index, 165 Heap::RootListIndex index,
117 Condition cond, Register src1, const Operand& src2); 166 Condition cond, Register src1, const Operand& src2);
118 167
119 // Load an external reference. 168 // Load an external reference.
120 void LoadExternalReference(Register reg, ExternalReference ext) { 169 void LoadExternalReference(Register reg, ExternalReference ext) {
121 li(reg, Operand(ext)); 170 li(reg, Operand(ext));
122 } 171 }
123 172
124 // Sets the remembered set bit for [address+offset]. 173 // Sets the remembered set bit for [address+offset].
125 void RecordWrite(Register object, Register offset, Register scratch); 174 void RecordWrite(Register object, Register offset, Register scratch);
126 175
127 176
128 // --------------------------------------------------------------------------- 177 // ---------------------------------------------------------------------------
129 // Instruction macros 178 // Instruction macros
130 179
131 #define DEFINE_INSTRUCTION(instr) \ 180 #define DEFINE_INSTRUCTION(instr) \
132 void instr(Register rd, Register rs, const Operand& rt); \ 181 void instr(Register rd, Register rs, const Operand& rt); \
133 void instr(Register rd, Register rs, Register rt) { \ 182 void instr(Register rd, Register rs, Register rt) { \
134 instr(rd, rs, Operand(rt)); \ 183 instr(rd, rs, Operand(rt)); \
135 } \ 184 } \
136 void instr(Register rs, Register rt, int32_t j) { \ 185 void instr(Register rs, Register rt, int32_t j) { \
137 instr(rs, rt, Operand(j)); \ 186 instr(rs, rt, Operand(j)); \
138 } 187 }
139 188
140 #define DEFINE_INSTRUCTION2(instr) \ 189 #define DEFINE_INSTRUCTION2(instr) \
141 void instr(Register rs, const Operand& rt); \ 190 void instr(Register rs, const Operand& rt); \
142 void instr(Register rs, Register rt) { \ 191 void instr(Register rs, Register rt) { \
143 instr(rs, Operand(rt)); \ 192 instr(rs, Operand(rt)); \
144 } \ 193 } \
145 void instr(Register rs, int32_t j) { \ 194 void instr(Register rs, int32_t j) { \
146 instr(rs, Operand(j)); \ 195 instr(rs, Operand(j)); \
147 } 196 }
148 197
149 DEFINE_INSTRUCTION(Add);
150 DEFINE_INSTRUCTION(Addu); 198 DEFINE_INSTRUCTION(Addu);
199 DEFINE_INSTRUCTION(Subu);
151 DEFINE_INSTRUCTION(Mul); 200 DEFINE_INSTRUCTION(Mul);
152 DEFINE_INSTRUCTION2(Mult); 201 DEFINE_INSTRUCTION2(Mult);
153 DEFINE_INSTRUCTION2(Multu); 202 DEFINE_INSTRUCTION2(Multu);
154 DEFINE_INSTRUCTION2(Div); 203 DEFINE_INSTRUCTION2(Div);
155 DEFINE_INSTRUCTION2(Divu); 204 DEFINE_INSTRUCTION2(Divu);
156 205
157 DEFINE_INSTRUCTION(And); 206 DEFINE_INSTRUCTION(And);
158 DEFINE_INSTRUCTION(Or); 207 DEFINE_INSTRUCTION(Or);
159 DEFINE_INSTRUCTION(Xor); 208 DEFINE_INSTRUCTION(Xor);
160 DEFINE_INSTRUCTION(Nor); 209 DEFINE_INSTRUCTION(Nor);
161 210
162 DEFINE_INSTRUCTION(Slt); 211 DEFINE_INSTRUCTION(Slt);
163 DEFINE_INSTRUCTION(Sltu); 212 DEFINE_INSTRUCTION(Sltu);
164 213
165 #undef DEFINE_INSTRUCTION 214 #undef DEFINE_INSTRUCTION
166 #undef DEFINE_INSTRUCTION2 215 #undef DEFINE_INSTRUCTION2
167 216
168 217
169 //------------Pseudo-instructions------------- 218 //------------Pseudo-instructions-------------
170 219
171 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } 220 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
172 // Move the logical ones complement of source to dest.
173 void movn(Register rd, Register rt);
174
175 221
176 // load int32 in the rd register 222 // load int32 in the rd register
177 void li(Register rd, Operand j, bool gen2instr = false); 223 void li(Register rd, Operand j, bool gen2instr = false);
178 inline void li(Register rd, int32_t j, bool gen2instr = false) { 224 inline void li(Register rd, int32_t j, bool gen2instr = false) {
179 li(rd, Operand(j), gen2instr); 225 li(rd, Operand(j), gen2instr);
180 } 226 }
181 227
182 // Exception-generating instructions and debugging support 228 // Exception-generating instructions and debugging support
183 void stop(const char* msg); 229 void stop(const char* msg);
184 230
185 231
186 // Push multiple registers on the stack. 232 // Push multiple registers on the stack.
187 // Registers are saved in numerical order, with higher numbered registers 233 // Registers are saved in numerical order, with higher numbered registers
188 // saved in higher memory addresses 234 // saved in higher memory addresses
189 void MultiPush(RegList regs); 235 void MultiPush(RegList regs);
190 void MultiPushReversed(RegList regs); 236 void MultiPushReversed(RegList regs);
191 void Push(Register src) { 237 void Push(Register src) {
192 Addu(sp, sp, Operand(-kPointerSize)); 238 Addu(sp, sp, Operand(-kPointerSize));
193 sw(src, MemOperand(sp, 0)); 239 sw(src, MemOperand(sp, 0));
194 } 240 }
195 inline void push(Register src) { Push(src); } 241 inline void push(Register src) { Push(src); }
196 242
197 void Push(Register src, Condition cond, Register tst1, Register tst2) { 243 void Push(Register src, Condition cond, Register tst1, Register tst2) {
198 // Since we don't have conditionnal execution we use a Branch. 244 // Since we don't have conditionnal execution we use a Branch.
199 Branch(cond, 3, tst1, Operand(tst2)); 245 Branch(3, cond, tst1, Operand(tst2));
Søren Thygesen Gjesse 2010/05/25 09:00:56 The constant 3 relies on Addu only generates one i
200 Addu(sp, sp, Operand(-kPointerSize)); 246 Addu(sp, sp, Operand(-kPointerSize));
201 sw(src, MemOperand(sp, 0)); 247 sw(src, MemOperand(sp, 0));
202 } 248 }
203 249
204 // Pops multiple values from the stack and load them in the 250 // Pops multiple values from the stack and load them in the
205 // registers specified in regs. Pop order is the opposite as in MultiPush. 251 // registers specified in regs. Pop order is the opposite as in MultiPush.
206 void MultiPop(RegList regs); 252 void MultiPop(RegList regs);
207 void MultiPopReversed(RegList regs); 253 void MultiPopReversed(RegList regs);
208 void Pop(Register dst) { 254 void Pop(Register dst) {
209 lw(dst, MemOperand(sp, 0)); 255 lw(dst, MemOperand(sp, 0));
210 Addu(sp, sp, Operand(kPointerSize)); 256 Addu(sp, sp, Operand(kPointerSize));
211 } 257 }
212 void Pop() { 258 void Pop(uint32_t count = 1) {
213 Add(sp, sp, Operand(kPointerSize)); 259 Addu(sp, sp, Operand(count * kPointerSize));
214 } 260 }
215 261
216 262
217 // --------------------------------------------------------------------------- 263 // ---------------------------------------------------------------------------
218 // Activation frames 264 // Activation frames
219 265
220 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } 266 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
221 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } 267 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
222 268
223 // Enter specific kind of exit frame; either EXIT or 269 // Enter specific kind of exit frame; either EXIT or
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
273 RegList regs); 319 RegList regs);
274 void DebugBreak(); 320 void DebugBreak();
275 #endif 321 #endif
276 322
277 323
278 // --------------------------------------------------------------------------- 324 // ---------------------------------------------------------------------------
279 // Exception handling 325 // Exception handling
280 326
281 // Push a new try handler and link into try handler chain. 327 // Push a new try handler and link into try handler chain.
282 // The return address must be passed in register ra. 328 // The return address must be passed in register ra.
329 // Clobber t0, t1, t2.
Søren Thygesen Gjesse 2010/05/25 09:00:56 Clobber -> Clobbers
283 void PushTryHandler(CodeLocation try_location, HandlerType type); 330 void PushTryHandler(CodeLocation try_location, HandlerType type);
284 331
285 // Unlink the stack handler on top of the stack from the try handler chain. 332 // Unlink the stack handler on top of the stack from the try handler chain.
286 // Must preserve the result register. 333 // Must preserve the result register.
287 void PopTryHandler(); 334 void PopTryHandler();
288 335
289 336
290 // --------------------------------------------------------------------------- 337 // ---------------------------------------------------------------------------
291 // Support functions. 338 // Support functions.
292 339
293 void GetObjectType(Register function, 340 void GetObjectType(Register function,
294 Register map, 341 Register map,
295 Register type_reg); 342 Register type_reg);
296 343
297 inline void BranchOnSmi(Register value, Label* smi_label, 344 inline void BranchOnSmi(Register value, Label* smi_label,
298 Register scratch = at) { 345 Register scratch = at) {
299 ASSERT_EQ(0, kSmiTag); 346 ASSERT_EQ(0, kSmiTag);
300 andi(scratch, value, kSmiTagMask); 347 andi(scratch, value, kSmiTagMask);
301 Branch(eq, smi_label, scratch, Operand(zero_reg)); 348 Branch(smi_label, eq, scratch, Operand(zero_reg));
302 } 349 }
303 350
304 351
305 inline void BranchOnNotSmi(Register value, Label* not_smi_label, 352 inline void BranchOnNotSmi(Register value, Label* not_smi_label,
306 Register scratch = at) { 353 Register scratch = at) {
307 ASSERT_EQ(0, kSmiTag); 354 ASSERT_EQ(0, kSmiTag);
308 andi(scratch, value, kSmiTagMask); 355 andi(scratch, value, kSmiTagMask);
309 Branch(ne, not_smi_label, scratch, Operand(zero_reg)); 356 Branch(not_smi_label, ne, scratch, Operand(zero_reg));
310 } 357 }
311 358
312 void CallBuiltin(ExternalReference builtin_entry); 359 void CallBuiltin(ExternalReference builtin_entry);
313 void CallBuiltin(Register target); 360 void CallBuiltin(Register target);
314 void JumpToBuiltin(ExternalReference builtin_entry); 361 void JumpToBuiltin(ExternalReference builtin_entry);
315 void JumpToBuiltin(Register target); 362 void JumpToBuiltin(Register target);
316 363
317 // Generates code for reporting that an illegal operation has 364 // Generates code for reporting that an illegal operation has
318 // occurred. 365 // occurred.
319 void IllegalOperation(int num_arguments); 366 void IllegalOperation(int num_arguments);
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
405 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } 452 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
406 bool allow_stub_calls() { return allow_stub_calls_; } 453 bool allow_stub_calls() { return allow_stub_calls_; }
407 454
408 private: 455 private:
409 List<Unresolved> unresolved_; 456 List<Unresolved> unresolved_;
410 bool generating_stub_; 457 bool generating_stub_;
411 bool allow_stub_calls_; 458 bool allow_stub_calls_;
412 // This handle will be patched with the code object on installation. 459 // This handle will be patched with the code object on installation.
413 Handle<Object> code_object_; 460 Handle<Object> code_object_;
414 461
462 void Jump(intptr_t target, RelocInfo::Mode rmode,
463 bool ProtectBranchDelaySlot = true);
415 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always, 464 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
416 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 465 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
466 bool ProtectBranchDelaySlot = true);
467 void Call(intptr_t target, RelocInfo::Mode rmode,
468 bool ProtectBranchDelaySlot = true);
417 void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always, 469 void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
418 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); 470 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
471 bool ProtectBranchDelaySlot = true);
419 472
420 // Helper functions for generating invokes. 473 // Helper functions for generating invokes.
421 void InvokePrologue(const ParameterCount& expected, 474 void InvokePrologue(const ParameterCount& expected,
422 const ParameterCount& actual, 475 const ParameterCount& actual,
423 Handle<Code> code_constant, 476 Handle<Code> code_constant,
424 Register code_reg, 477 Register code_reg,
425 Label* done, 478 Label* done,
426 InvokeFlag flag); 479 InvokeFlag flag);
427 480
428 // Get the code for the given builtin. Returns if able to resolve 481 // Get the code for the given builtin. Returns if able to resolve
429 // the function in the 'resolved' flag. 482 // the function in the 'resolved' flag.
430 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved); 483 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
431 484
432 // Activation support. 485 // Activation support.
433 // EnterFrame clobbers t0 and t1.
434 void EnterFrame(StackFrame::Type type); 486 void EnterFrame(StackFrame::Type type);
435 void LeaveFrame(StackFrame::Type type); 487 void LeaveFrame(StackFrame::Type type);
436 }; 488 };
437 489
438 490
439 // ----------------------------------------------------------------------------- 491 // -----------------------------------------------------------------------------
440 // Static helper functions. 492 // Static helper functions.
441 493
442 // Generate a MemOperand for loading a field from an object. 494 // Generate a MemOperand for loading a field from an object.
443 static inline MemOperand FieldMemOperand(Register object, int offset) { 495 static inline MemOperand FieldMemOperand(Register object, int offset) {
444 return MemOperand(object, offset - kHeapObjectTag); 496 return MemOperand(object, offset - kHeapObjectTag);
445 } 497 }
446 498
447 499
448 500
449 #ifdef GENERATED_CODE_COVERAGE 501 #ifdef GENERATED_CODE_COVERAGE
450 #define CODE_COVERAGE_STRINGIFY(x) #x 502 #define CODE_COVERAGE_STRINGIFY(x) #x
451 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 503 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
452 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 504 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
453 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 505 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
454 #else 506 #else
455 #define ACCESS_MASM(masm) masm-> 507 #define ACCESS_MASM(masm) masm->
456 #endif 508 #endif
457 509
458 } } // namespace v8::internal 510 } } // namespace v8::internal
459 511
460 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ 512 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
461 513
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698