Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(218)

Side by Side Diff: src/ppc/macro-assembler-ppc.h

Issue 571173003: PowerPC specific sub-directories (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove IBM copyright, update code to later level Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ 5 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ 6 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
7 7
8 #include "src/assembler.h" 8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
9 #include "src/frames.h" 10 #include "src/frames.h"
10 #include "src/globals.h" 11 #include "src/globals.h"
11 12
12 namespace v8 { 13 namespace v8 {
13 namespace internal { 14 namespace internal {
14 15
15 // ---------------------------------------------------------------------------- 16 // ----------------------------------------------------------------------------
16 // Static helper functions 17 // Static helper functions
17 18
18 // Generate a MemOperand for loading a field from an object. 19 // Generate a MemOperand for loading a field from an object.
19 inline MemOperand FieldMemOperand(Register object, int offset) { 20 inline MemOperand FieldMemOperand(Register object, int offset) {
20 return MemOperand(object, offset - kHeapObjectTag); 21 return MemOperand(object, offset - kHeapObjectTag);
21 } 22 }
22 23
23 24
24 // Give alias names to registers
25 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
26 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
27 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
28
29 // Flags used for AllocateHeapNumber 25 // Flags used for AllocateHeapNumber
30 enum TaggingMode { 26 enum TaggingMode {
31 // Tag the result. 27 // Tag the result.
32 TAG_RESULT, 28 TAG_RESULT,
33 // Don't tag 29 // Don't tag
34 DONT_TAG_RESULT 30 DONT_TAG_RESULT
35 }; 31 };
36 32
37 33
38 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 34 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
39 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 35 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
40 enum PointersToHereCheck { 36 enum PointersToHereCheck {
41 kPointersToHereMaybeInteresting, 37 kPointersToHereMaybeInteresting,
42 kPointersToHereAreAlwaysInteresting 38 kPointersToHereAreAlwaysInteresting
43 }; 39 };
44 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; 40 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
45 41
46 42
47 Register GetRegisterThatIsNotOneOf(Register reg1, 43 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
48 Register reg2 = no_reg,
49 Register reg3 = no_reg, 44 Register reg3 = no_reg,
50 Register reg4 = no_reg, 45 Register reg4 = no_reg,
51 Register reg5 = no_reg, 46 Register reg5 = no_reg,
52 Register reg6 = no_reg); 47 Register reg6 = no_reg);
53 48
54 49
55 #ifdef DEBUG 50 #ifdef DEBUG
56 bool AreAliased(Register reg1, 51 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
57 Register reg2, 52 Register reg4 = no_reg, Register reg5 = no_reg,
58 Register reg3 = no_reg, 53 Register reg6 = no_reg, Register reg7 = no_reg,
59 Register reg4 = no_reg,
60 Register reg5 = no_reg,
61 Register reg6 = no_reg,
62 Register reg7 = no_reg,
63 Register reg8 = no_reg); 54 Register reg8 = no_reg);
64 #endif 55 #endif
65 56
57 // These exist to provide portability between 32 and 64bit
58 #if V8_TARGET_ARCH_PPC64
59 #define LoadPU ldu
60 #define LoadPX ldx
61 #define LoadPUX ldux
62 #define StorePU stdu
63 #define StorePX stdx
64 #define StorePUX stdux
65 #define ShiftLeftImm sldi
66 #define ShiftRightImm srdi
67 #define ClearLeftImm clrldi
68 #define ClearRightImm clrrdi
69 #define ShiftRightArithImm sradi
70 #define ShiftLeft_ sld
71 #define ShiftRight_ srd
72 #define ShiftRightArith srad
73 #define Mul mulld
74 #define Div divd
75 #else
76 #define LoadPU lwzu
77 #define LoadPX lwzx
78 #define LoadPUX lwzux
79 #define StorePU stwu
80 #define StorePX stwx
81 #define StorePUX stwux
82 #define ShiftLeftImm slwi
83 #define ShiftRightImm srwi
84 #define ClearLeftImm clrlwi
85 #define ClearRightImm clrrwi
86 #define ShiftRightArithImm srawi
87 #define ShiftLeft_ slw
88 #define ShiftRight_ srw
89 #define ShiftRightArith sraw
90 #define Mul mullw
91 #define Div divw
92 #endif
66 93
67 enum TargetAddressStorageMode {
68 CAN_INLINE_TARGET_ADDRESS,
69 NEVER_INLINE_TARGET_ADDRESS
70 };
71 94
72 // MacroAssembler implements a collection of frequently used macros. 95 // MacroAssembler implements a collection of frequently used macros.
73 class MacroAssembler: public Assembler { 96 class MacroAssembler : public Assembler {
74 public: 97 public:
75 // The isolate parameter can be NULL if the macro assembler should 98 // The isolate parameter can be NULL if the macro assembler should
76 // not use isolate-dependent functionality. In this case, it's the 99 // not use isolate-dependent functionality. In this case, it's the
77 // responsibility of the caller to never invoke such function on the 100 // responsibility of the caller to never invoke such function on the
78 // macro assembler. 101 // macro assembler.
79 MacroAssembler(Isolate* isolate, void* buffer, int size); 102 MacroAssembler(Isolate* isolate, void* buffer, int size);
80 103
81 104
82 // Returns the size of a call in instructions. Note, the value returned is 105 // Returns the size of a call in instructions. Note, the value returned is
83 // only valid as long as no entries are added to the constant pool between 106 // only valid as long as no entries are added to the constant pool between
84 // checking the call size and emitting the actual call. 107 // checking the call size and emitting the actual call.
85 static int CallSize(Register target, Condition cond = al); 108 static int CallSize(Register target);
86 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); 109 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
87 int CallStubSize(CodeStub* stub, 110 static int CallSizeNotPredictableCodeSize(Address target,
88 TypeFeedbackId ast_id = TypeFeedbackId::None(),
89 Condition cond = al);
90 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
91 Address target,
92 RelocInfo::Mode rmode, 111 RelocInfo::Mode rmode,
93 Condition cond = al); 112 Condition cond = al);
94 113
95 // Jump, Call, and Ret pseudo instructions implementing inter-working. 114 // Jump, Call, and Ret pseudo instructions implementing inter-working.
96 void Jump(Register target, Condition cond = al); 115 void Jump(Register target);
97 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); 116 void JumpToJSEntry(Register target);
117 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
118 CRegister cr = cr7);
98 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 119 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
99 void Call(Register target, Condition cond = al); 120 void Call(Register target);
100 void Call(Address target, RelocInfo::Mode rmode, 121 void CallJSEntry(Register target);
101 Condition cond = al, 122 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
102 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
103 int CallSize(Handle<Code> code, 123 int CallSize(Handle<Code> code,
104 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 124 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
105 TypeFeedbackId ast_id = TypeFeedbackId::None(), 125 TypeFeedbackId ast_id = TypeFeedbackId::None(),
106 Condition cond = al); 126 Condition cond = al);
107 void Call(Handle<Code> code, 127 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
108 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
109 TypeFeedbackId ast_id = TypeFeedbackId::None(), 128 TypeFeedbackId ast_id = TypeFeedbackId::None(),
110 Condition cond = al, 129 Condition cond = al);
111 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
112 void Ret(Condition cond = al); 130 void Ret(Condition cond = al);
113 131
114 // Emit code to discard a non-negative number of pointer-sized elements 132 // Emit code to discard a non-negative number of pointer-sized elements
115 // from the stack, clobbering only the sp register. 133 // from the stack, clobbering only the sp register.
116 void Drop(int count, Condition cond = al); 134 void Drop(int count, Condition cond = al);
117 135
118 void Ret(int drop, Condition cond = al); 136 void Ret(int drop, Condition cond = al);
119 137
120 // Swap two registers. If the scratch register is omitted then a slightly 138 void Call(Label* target);
121 // less efficient form using xor instead of mov is emitted.
122 void Swap(Register reg1,
123 Register reg2,
124 Register scratch = no_reg,
125 Condition cond = al);
126 139
127 void Mls(Register dst, Register src1, Register src2, Register srcA, 140 // Emit call to the code we are currently generating.
128 Condition cond = al); 141 void CallSelf() {
129 void And(Register dst, Register src1, const Operand& src2, 142 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
130 Condition cond = al); 143 Call(self, RelocInfo::CODE_TARGET);
131 void Ubfx(Register dst, Register src, int lsb, int width, 144 }
132 Condition cond = al);
133 void Sbfx(Register dst, Register src, int lsb, int width,
134 Condition cond = al);
135 // The scratch register is not used for ARMv7.
136 // scratch can be the same register as src (in which case it is trashed), but
137 // not the same as dst.
138 void Bfi(Register dst,
139 Register src,
140 Register scratch,
141 int lsb,
142 int width,
143 Condition cond = al);
144 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
145 void Usat(Register dst, int satpos, const Operand& src,
146 Condition cond = al);
147
148 void Call(Label* target);
149 void Push(Register src) { push(src); }
150 void Pop(Register dst) { pop(dst); }
151 145
152 // Register move. May do nothing if the registers are identical. 146 // Register move. May do nothing if the registers are identical.
153 void Move(Register dst, Handle<Object> value); 147 void Move(Register dst, Handle<Object> value);
154 void Move(Register dst, Register src, Condition cond = al); 148 void Move(Register dst, Register src, Condition cond = al);
155 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, 149 void Move(DoubleRegister dst, DoubleRegister src);
156 Condition cond = al) {
157 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
158 mov(dst, src, sbit, cond);
159 }
160 }
161 void Move(DwVfpRegister dst, DwVfpRegister src);
162 150
163 void Load(Register dst, const MemOperand& src, Representation r); 151 void MultiPush(RegList regs);
164 void Store(Register src, const MemOperand& dst, Representation r); 152 void MultiPop(RegList regs);
165 153
166 // Load an object from the root table. 154 // Load an object from the root table.
167 void LoadRoot(Register destination, 155 void LoadRoot(Register destination, Heap::RootListIndex index,
168 Heap::RootListIndex index,
169 Condition cond = al); 156 Condition cond = al);
170 // Store an object to the root table. 157 // Store an object to the root table.
171 void StoreRoot(Register source, 158 void StoreRoot(Register source, Heap::RootListIndex index,
172 Heap::RootListIndex index,
173 Condition cond = al); 159 Condition cond = al);
174 160
175 // --------------------------------------------------------------------------- 161 // ---------------------------------------------------------------------------
176 // GC Support 162 // GC Support
177 163
178 void IncrementalMarkingRecordWriteHelper(Register object, 164 void IncrementalMarkingRecordWriteHelper(Register object, Register value,
179 Register value,
180 Register address); 165 Register address);
181 166
182 enum RememberedSetFinalAction { 167 enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
183 kReturnAtEnd,
184 kFallThroughAtEnd
185 };
186 168
187 // Record in the remembered set the fact that we have a pointer to new space 169 // Record in the remembered set the fact that we have a pointer to new space
188 // at the address pointed to by the addr register. Only works if addr is not 170 // at the address pointed to by the addr register. Only works if addr is not
189 // in new space. 171 // in new space.
190 void RememberedSetHelper(Register object, // Used for debug code. 172 void RememberedSetHelper(Register object, // Used for debug code.
191 Register addr, 173 Register addr, Register scratch,
192 Register scratch,
193 SaveFPRegsMode save_fp, 174 SaveFPRegsMode save_fp,
194 RememberedSetFinalAction and_then); 175 RememberedSetFinalAction and_then);
195 176
196 void CheckPageFlag(Register object, 177 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
197 Register scratch,
198 int mask,
199 Condition cc,
200 Label* condition_met); 178 Label* condition_met);
201 179
202 void CheckMapDeprecated(Handle<Map> map, 180 void CheckMapDeprecated(Handle<Map> map, Register scratch,
203 Register scratch,
204 Label* if_deprecated); 181 Label* if_deprecated);
205 182
206 // Check if object is in new space. Jumps if the object is not in new space. 183 // Check if object is in new space. Jumps if the object is not in new space.
207 // The register scratch can be object itself, but scratch will be clobbered. 184 // The register scratch can be object itself, but scratch will be clobbered.
208 void JumpIfNotInNewSpace(Register object, 185 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
209 Register scratch,
210 Label* branch) {
211 InNewSpace(object, scratch, ne, branch); 186 InNewSpace(object, scratch, ne, branch);
212 } 187 }
213 188
214 // Check if object is in new space. Jumps if the object is in new space. 189 // Check if object is in new space. Jumps if the object is in new space.
215 // The register scratch can be object itself, but it will be clobbered. 190 // The register scratch can be object itself, but it will be clobbered.
216 void JumpIfInNewSpace(Register object, 191 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
217 Register scratch,
218 Label* branch) {
219 InNewSpace(object, scratch, eq, branch); 192 InNewSpace(object, scratch, eq, branch);
220 } 193 }
221 194
222 // Check if an object has a given incremental marking color. 195 // Check if an object has a given incremental marking color.
223 void HasColor(Register object, 196 void HasColor(Register object, Register scratch0, Register scratch1,
224 Register scratch0, 197 Label* has_color, int first_bit, int second_bit);
225 Register scratch1,
226 Label* has_color,
227 int first_bit,
228 int second_bit);
229 198
230 void JumpIfBlack(Register object, 199 void JumpIfBlack(Register object, Register scratch0, Register scratch1,
231 Register scratch0,
232 Register scratch1,
233 Label* on_black); 200 Label* on_black);
234 201
235 // Checks the color of an object. If the object is already grey or black 202 // Checks the color of an object. If the object is already grey or black
236 // then we just fall through, since it is already live. If it is white and 203 // then we just fall through, since it is already live. If it is white and
237 // we can determine that it doesn't need to be scanned, then we just mark it 204 // we can determine that it doesn't need to be scanned, then we just mark it
238 // black and fall through. For the rest we jump to the label so the 205 // black and fall through. For the rest we jump to the label so the
239 // incremental marker can fix its assumptions. 206 // incremental marker can fix its assumptions.
240 void EnsureNotWhite(Register object, 207 void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
241 Register scratch1, 208 Register scratch3, Label* object_is_white_and_not_data);
242 Register scratch2,
243 Register scratch3,
244 Label* object_is_white_and_not_data);
245 209
246 // Detects conservatively whether an object is data-only, i.e. it does need to 210 // Detects conservatively whether an object is data-only, i.e. it does need to
247 // be scanned by the garbage collector. 211 // be scanned by the garbage collector.
248 void JumpIfDataObject(Register value, 212 void JumpIfDataObject(Register value, Register scratch,
249 Register scratch,
250 Label* not_data_object); 213 Label* not_data_object);
251 214
252 // Notify the garbage collector that we wrote a pointer into an object. 215 // Notify the garbage collector that we wrote a pointer into an object.
253 // |object| is the object being stored into, |value| is the object being 216 // |object| is the object being stored into, |value| is the object being
254 // stored. value and scratch registers are clobbered by the operation. 217 // stored. value and scratch registers are clobbered by the operation.
255 // The offset is the offset from the start of the object, not the offset from 218 // The offset is the offset from the start of the object, not the offset from
256 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). 219 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
257 void RecordWriteField( 220 void RecordWriteField(
258 Register object, 221 Register object, int offset, Register value, Register scratch,
259 int offset, 222 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
260 Register value,
261 Register scratch,
262 LinkRegisterStatus lr_status,
263 SaveFPRegsMode save_fp,
264 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 223 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
265 SmiCheck smi_check = INLINE_SMI_CHECK, 224 SmiCheck smi_check = INLINE_SMI_CHECK,
266 PointersToHereCheck pointers_to_here_check_for_value = 225 PointersToHereCheck pointers_to_here_check_for_value =
267 kPointersToHereMaybeInteresting); 226 kPointersToHereMaybeInteresting);
268 227
269 // As above, but the offset has the tag presubtracted. For use with 228 // As above, but the offset has the tag presubtracted. For use with
270 // MemOperand(reg, off). 229 // MemOperand(reg, off).
271 inline void RecordWriteContextSlot( 230 inline void RecordWriteContextSlot(
272 Register context, 231 Register context, int offset, Register value, Register scratch,
273 int offset, 232 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
274 Register value,
275 Register scratch,
276 LinkRegisterStatus lr_status,
277 SaveFPRegsMode save_fp,
278 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 233 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
279 SmiCheck smi_check = INLINE_SMI_CHECK, 234 SmiCheck smi_check = INLINE_SMI_CHECK,
280 PointersToHereCheck pointers_to_here_check_for_value = 235 PointersToHereCheck pointers_to_here_check_for_value =
281 kPointersToHereMaybeInteresting) { 236 kPointersToHereMaybeInteresting) {
282 RecordWriteField(context, 237 RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
283 offset + kHeapObjectTag, 238 lr_status, save_fp, remembered_set_action, smi_check,
284 value,
285 scratch,
286 lr_status,
287 save_fp,
288 remembered_set_action,
289 smi_check,
290 pointers_to_here_check_for_value); 239 pointers_to_here_check_for_value);
291 } 240 }
292 241
293 void RecordWriteForMap( 242 void RecordWriteForMap(Register object, Register map, Register dst,
294 Register object, 243 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
295 Register map,
296 Register dst,
297 LinkRegisterStatus lr_status,
298 SaveFPRegsMode save_fp);
299 244
300 // For a given |object| notify the garbage collector that the slot |address| 245 // For a given |object| notify the garbage collector that the slot |address|
301 // has been written. |value| is the object being stored. The value and 246 // has been written. |value| is the object being stored. The value and
302 // address registers are clobbered by the operation. 247 // address registers are clobbered by the operation.
303 void RecordWrite( 248 void RecordWrite(
304 Register object, 249 Register object, Register address, Register value,
305 Register address, 250 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
306 Register value,
307 LinkRegisterStatus lr_status,
308 SaveFPRegsMode save_fp,
309 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 251 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
310 SmiCheck smi_check = INLINE_SMI_CHECK, 252 SmiCheck smi_check = INLINE_SMI_CHECK,
311 PointersToHereCheck pointers_to_here_check_for_value = 253 PointersToHereCheck pointers_to_here_check_for_value =
312 kPointersToHereMaybeInteresting); 254 kPointersToHereMaybeInteresting);
313 255
256 void Push(Register src) { push(src); }
257
314 // Push a handle. 258 // Push a handle.
315 void Push(Handle<Object> handle); 259 void Push(Handle<Object> handle);
316 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 260 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
317 261
318 // Push two registers. Pushes leftmost register first (to highest address). 262 // Push two registers. Pushes leftmost register first (to highest address).
319 void Push(Register src1, Register src2, Condition cond = al) { 263 void Push(Register src1, Register src2) {
320 DCHECK(!src1.is(src2)); 264 StorePU(src2, MemOperand(sp, -2 * kPointerSize));
321 if (src1.code() > src2.code()) { 265 StoreP(src1, MemOperand(sp, kPointerSize));
322 stm(db_w, sp, src1.bit() | src2.bit(), cond);
323 } else {
324 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
325 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
326 }
327 } 266 }
328 267
329 // Push three registers. Pushes leftmost register first (to highest address). 268 // Push three registers. Pushes leftmost register first (to highest address).
330 void Push(Register src1, Register src2, Register src3, Condition cond = al) { 269 void Push(Register src1, Register src2, Register src3) {
331 DCHECK(!src1.is(src2)); 270 StorePU(src3, MemOperand(sp, -3 * kPointerSize));
332 DCHECK(!src2.is(src3)); 271 StoreP(src2, MemOperand(sp, kPointerSize));
333 DCHECK(!src1.is(src3)); 272 StoreP(src1, MemOperand(sp, 2 * kPointerSize));
334 if (src1.code() > src2.code()) {
335 if (src2.code() > src3.code()) {
336 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
337 } else {
338 stm(db_w, sp, src1.bit() | src2.bit(), cond);
339 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
340 }
341 } else {
342 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
343 Push(src2, src3, cond);
344 }
345 } 273 }
346 274
347 // Push four registers. Pushes leftmost register first (to highest address). 275 // Push four registers. Pushes leftmost register first (to highest address).
348 void Push(Register src1, 276 void Push(Register src1, Register src2, Register src3, Register src4) {
349 Register src2, 277 StorePU(src4, MemOperand(sp, -4 * kPointerSize));
350 Register src3, 278 StoreP(src3, MemOperand(sp, kPointerSize));
351 Register src4, 279 StoreP(src2, MemOperand(sp, 2 * kPointerSize));
352 Condition cond = al) { 280 StoreP(src1, MemOperand(sp, 3 * kPointerSize));
353 DCHECK(!src1.is(src2));
354 DCHECK(!src2.is(src3));
355 DCHECK(!src1.is(src3));
356 DCHECK(!src1.is(src4));
357 DCHECK(!src2.is(src4));
358 DCHECK(!src3.is(src4));
359 if (src1.code() > src2.code()) {
360 if (src2.code() > src3.code()) {
361 if (src3.code() > src4.code()) {
362 stm(db_w,
363 sp,
364 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
365 cond);
366 } else {
367 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
368 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
369 }
370 } else {
371 stm(db_w, sp, src1.bit() | src2.bit(), cond);
372 Push(src3, src4, cond);
373 }
374 } else {
375 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
376 Push(src2, src3, src4, cond);
377 }
378 } 281 }
379 282
283 // Push five registers. Pushes leftmost register first (to highest address).
284 void Push(Register src1, Register src2, Register src3, Register src4,
285 Register src5) {
286 StorePU(src5, MemOperand(sp, -5 * kPointerSize));
287 StoreP(src4, MemOperand(sp, kPointerSize));
288 StoreP(src3, MemOperand(sp, 2 * kPointerSize));
289 StoreP(src2, MemOperand(sp, 3 * kPointerSize));
290 StoreP(src1, MemOperand(sp, 4 * kPointerSize));
291 }
292
293 void Pop(Register dst) { pop(dst); }
294
380 // Pop two registers. Pops rightmost register first (from lower address). 295 // Pop two registers. Pops rightmost register first (from lower address).
381 void Pop(Register src1, Register src2, Condition cond = al) { 296 void Pop(Register src1, Register src2) {
382 DCHECK(!src1.is(src2)); 297 LoadP(src2, MemOperand(sp, 0));
383 if (src1.code() > src2.code()) { 298 LoadP(src1, MemOperand(sp, kPointerSize));
384 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 299 addi(sp, sp, Operand(2 * kPointerSize));
385 } else {
386 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
387 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
388 }
389 } 300 }
390 301
391 // Pop three registers. Pops rightmost register first (from lower address). 302 // Pop three registers. Pops rightmost register first (from lower address).
392 void Pop(Register src1, Register src2, Register src3, Condition cond = al) { 303 void Pop(Register src1, Register src2, Register src3) {
393 DCHECK(!src1.is(src2)); 304 LoadP(src3, MemOperand(sp, 0));
394 DCHECK(!src2.is(src3)); 305 LoadP(src2, MemOperand(sp, kPointerSize));
395 DCHECK(!src1.is(src3)); 306 LoadP(src1, MemOperand(sp, 2 * kPointerSize));
396 if (src1.code() > src2.code()) { 307 addi(sp, sp, Operand(3 * kPointerSize));
397 if (src2.code() > src3.code()) {
398 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
399 } else {
400 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
401 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
402 }
403 } else {
404 Pop(src2, src3, cond);
405 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
406 }
407 } 308 }
408 309
409 // Pop four registers. Pops rightmost register first (from lower address). 310 // Pop four registers. Pops rightmost register first (from lower address).
410 void Pop(Register src1, 311 void Pop(Register src1, Register src2, Register src3, Register src4) {
411 Register src2, 312 LoadP(src4, MemOperand(sp, 0));
412 Register src3, 313 LoadP(src3, MemOperand(sp, kPointerSize));
413 Register src4, 314 LoadP(src2, MemOperand(sp, 2 * kPointerSize));
414 Condition cond = al) { 315 LoadP(src1, MemOperand(sp, 3 * kPointerSize));
415 DCHECK(!src1.is(src2)); 316 addi(sp, sp, Operand(4 * kPointerSize));
416 DCHECK(!src2.is(src3));
417 DCHECK(!src1.is(src3));
418 DCHECK(!src1.is(src4));
419 DCHECK(!src2.is(src4));
420 DCHECK(!src3.is(src4));
421 if (src1.code() > src2.code()) {
422 if (src2.code() > src3.code()) {
423 if (src3.code() > src4.code()) {
424 ldm(ia_w,
425 sp,
426 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
427 cond);
428 } else {
429 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
430 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
431 }
432 } else {
433 Pop(src3, src4, cond);
434 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
435 }
436 } else {
437 Pop(src2, src3, src4, cond);
438 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
439 }
440 } 317 }
441 318
442 // Push a fixed frame, consisting of lr, fp, constant pool (if 319 // Pop five registers. Pops rightmost register first (from lower address).
443 // FLAG_enable_ool_constant_pool), context and JS function / marker id if 320 void Pop(Register src1, Register src2, Register src3, Register src4,
444 // marker_reg is a valid register. 321 Register src5) {
322 LoadP(src5, MemOperand(sp, 0));
323 LoadP(src4, MemOperand(sp, kPointerSize));
324 LoadP(src3, MemOperand(sp, 2 * kPointerSize));
325 LoadP(src2, MemOperand(sp, 3 * kPointerSize));
326 LoadP(src1, MemOperand(sp, 4 * kPointerSize));
327 addi(sp, sp, Operand(5 * kPointerSize));
328 }
329
330 // Push a fixed frame, consisting of lr, fp, context and
331 // JS function / marker id if marker_reg is a valid register.
445 void PushFixedFrame(Register marker_reg = no_reg); 332 void PushFixedFrame(Register marker_reg = no_reg);
446 void PopFixedFrame(Register marker_reg = no_reg); 333 void PopFixedFrame(Register marker_reg = no_reg);
447 334
448 // Push and pop the registers that can hold pointers, as defined by the 335 // Push and pop the registers that can hold pointers, as defined by the
449 // RegList constant kSafepointSavedRegisters. 336 // RegList constant kSafepointSavedRegisters.
450 void PushSafepointRegisters(); 337 void PushSafepointRegisters();
451 void PopSafepointRegisters(); 338 void PopSafepointRegisters();
452 // Store value in register src in the safepoint stack slot for 339 // Store value in register src in the safepoint stack slot for
453 // register dst. 340 // register dst.
454 void StoreToSafepointRegisterSlot(Register src, Register dst); 341 void StoreToSafepointRegisterSlot(Register src, Register dst);
455 // Load the value of the src register from its safepoint stack slot 342 // Load the value of the src register from its safepoint stack slot
456 // into register dst. 343 // into register dst.
457 void LoadFromSafepointRegisterSlot(Register dst, Register src); 344 void LoadFromSafepointRegisterSlot(Register dst, Register src);
458 345
459 // Load two consecutive registers with two consecutive memory locations. 346 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
460 void Ldrd(Register dst1, 347 // from C.
461 Register dst2, 348 // Does not handle errors.
462 const MemOperand& src, 349 void FlushICache(Register address, size_t size, Register scratch);
463 Condition cond = al);
464
465 // Store two consecutive registers to two consecutive memory locations.
466 void Strd(Register src1,
467 Register src2,
468 const MemOperand& dst,
469 Condition cond = al);
470
471 // Ensure that FPSCR contains values needed by JavaScript.
472 // We need the NaNModeControlBit to be sure that operations like
473 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
474 // In VFP3 it will be always the Canonical NaN.
475 // In VFP2 it will be either the Canonical NaN or the negative version
476 // of the Canonical NaN. It doesn't matter if we have two values. The aim
477 // is to be sure to never generate the hole NaN.
478 void VFPEnsureFPSCRState(Register scratch);
479 350
480 // If the value is a NaN, canonicalize the value else, do nothing. 351 // If the value is a NaN, canonicalize the value else, do nothing.
481 void VFPCanonicalizeNaN(const DwVfpRegister dst, 352 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
482 const DwVfpRegister src, 353 void CanonicalizeNaN(const DoubleRegister value) {
483 const Condition cond = al); 354 CanonicalizeNaN(value, value);
484 void VFPCanonicalizeNaN(const DwVfpRegister value,
485 const Condition cond = al) {
486 VFPCanonicalizeNaN(value, value, cond);
487 } 355 }
488 356
489 // Compare double values and move the result to the normal condition flags. 357 // Converts the integer (untagged smi) in |src| to a double, storing
490 void VFPCompareAndSetFlags(const DwVfpRegister src1, 358 // the result to |double_dst|
491 const DwVfpRegister src2, 359 void ConvertIntToDouble(Register src, DoubleRegister double_dst);
492 const Condition cond = al);
493 void VFPCompareAndSetFlags(const DwVfpRegister src1,
494 const double src2,
495 const Condition cond = al);
496 360
497 // Compare double values and then load the fpscr flags to a register. 361 // Converts the unsigned integer (untagged smi) in |src| to
498 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 362 // a double, storing the result to |double_dst|
499 const DwVfpRegister src2, 363 void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
500 const Register fpscr_flags,
501 const Condition cond = al);
502 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
503 const double src2,
504 const Register fpscr_flags,
505 const Condition cond = al);
506 364
507 void Vmov(const DwVfpRegister dst, 365 // Converts the integer (untagged smi) in |src| to
508 const double imm, 366 // a float, storing the result in |dst|
509 const Register scratch = no_reg); 367 // Warning: The value in |int_scrach| will be changed in the process!
368 void ConvertIntToFloat(const DoubleRegister dst, const Register src,
369 const Register int_scratch);
510 370
511 void VmovHigh(Register dst, DwVfpRegister src); 371 // Converts the double_input to an integer. Note that, upon return,
512 void VmovHigh(DwVfpRegister dst, Register src); 372 // the contents of double_dst will also hold the fixed point representation.
513 void VmovLow(Register dst, DwVfpRegister src); 373 void ConvertDoubleToInt64(const DoubleRegister double_input,
514 void VmovLow(DwVfpRegister dst, Register src); 374 #if !V8_TARGET_ARCH_PPC64
515 375 const Register dst_hi,
516 // Loads the number from object into dst register. 376 #endif
517 // If |object| is neither smi nor heap number, |not_number| is jumped to 377 const Register dst, const DoubleRegister double_dst,
518 // with |object| still intact. 378 FPRoundingMode rounding_mode = kRoundToZero);
519 void LoadNumber(Register object,
520 LowDwVfpRegister dst,
521 Register heap_number_map,
522 Register scratch,
523 Label* not_number);
524
525 // Loads the number from object into double_dst in the double format.
526 // Control will jump to not_int32 if the value cannot be exactly represented
527 // by a 32-bit integer.
528 // Floating point value in the 32-bit integer range that are not exact integer
529 // won't be loaded.
530 void LoadNumberAsInt32Double(Register object,
531 DwVfpRegister double_dst,
532 Register heap_number_map,
533 Register scratch,
534 LowDwVfpRegister double_scratch,
535 Label* not_int32);
536
537 // Loads the number from object into dst as a 32-bit integer.
538 // Control will jump to not_int32 if the object cannot be exactly represented
539 // by a 32-bit integer.
540 // Floating point value in the 32-bit integer range that are not exact integer
541 // won't be converted.
542 void LoadNumberAsInt32(Register object,
543 Register dst,
544 Register heap_number_map,
545 Register scratch,
546 DwVfpRegister double_scratch0,
547 LowDwVfpRegister double_scratch1,
548 Label* not_int32);
549 379
550 // Generates function and stub prologue code. 380 // Generates function and stub prologue code.
551 void StubPrologue(); 381 void StubPrologue(int prologue_offset = 0);
552 void Prologue(bool code_pre_aging); 382 void Prologue(bool code_pre_aging, int prologue_offset = 0);
553 383
554 // Enter exit frame. 384 // Enter exit frame.
555 // stack_space - extra stack space, used for alignment before call to C. 385 // stack_space - extra stack space, used for alignment before call to C.
556 void EnterExitFrame(bool save_doubles, int stack_space = 0); 386 void EnterExitFrame(bool save_doubles, int stack_space = 0);
557 387
558 // Leave the current exit frame. Expects the return value in r0. 388 // Leave the current exit frame. Expects the return value in r0.
559 // Expect the number of values, pushed prior to the exit frame, to 389 // Expect the number of values, pushed prior to the exit frame, to
560 // remove in a register (or no_reg, if there is nothing to remove). 390 // remove in a register (or no_reg, if there is nothing to remove).
561 void LeaveExitFrame(bool save_doubles, 391 void LeaveExitFrame(bool save_doubles, Register argument_count,
562 Register argument_count,
563 bool restore_context); 392 bool restore_context);
564 393
565 // Get the actual activation frame alignment for target environment. 394 // Get the actual activation frame alignment for target environment.
566 static int ActivationFrameAlignment(); 395 static int ActivationFrameAlignment();
567 396
568 void LoadContext(Register dst, int context_chain_length); 397 void LoadContext(Register dst, int context_chain_length);
569 398
570 // Conditionally load the cached Array transitioned map of type 399 // Conditionally load the cached Array transitioned map of type
571 // transitioned_kind from the native context if the map in register 400 // transitioned_kind from the native context if the map in register
572 // map_in_out is the cached Array map in the native context of 401 // map_in_out is the cached Array map in the native context of
573 // expected_kind. 402 // expected_kind.
574 void LoadTransitionedArrayMapConditional( 403 void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
575 ElementsKind expected_kind, 404 ElementsKind transitioned_kind,
576 ElementsKind transitioned_kind, 405 Register map_in_out,
577 Register map_in_out, 406 Register scratch,
578 Register scratch, 407 Label* no_map_match);
579 Label* no_map_match);
580 408
581 void LoadGlobalFunction(int index, Register function); 409 void LoadGlobalFunction(int index, Register function);
582 410
583 // Load the initial map from the global function. The registers 411 // Load the initial map from the global function. The registers
584 // function and map can be the same, function is then overwritten. 412 // function and map can be the same, function is then overwritten.
585 void LoadGlobalFunctionInitialMap(Register function, 413 void LoadGlobalFunctionInitialMap(Register function, Register map,
586 Register map,
587 Register scratch); 414 Register scratch);
588 415
589 void InitializeRootRegister() { 416 void InitializeRootRegister() {
590 ExternalReference roots_array_start = 417 ExternalReference roots_array_start =
591 ExternalReference::roots_array_start(isolate()); 418 ExternalReference::roots_array_start(isolate());
592 mov(kRootRegister, Operand(roots_array_start)); 419 mov(kRootRegister, Operand(roots_array_start));
593 } 420 }
594 421
422 // ----------------------------------------------------------------
423 // new PPC macro-assembler interfaces that are slightly higher level
424 // than assembler-ppc and may generate variable length sequences
425
426 // load a literal signed int value <value> to GPR <dst>
427 void LoadIntLiteral(Register dst, int value);
428
429 // load an SMI value <value> to GPR <dst>
430 void LoadSmiLiteral(Register dst, Smi* smi);
431
432 // load a literal double value <value> to FPR <result>
433 void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
434
435 void LoadWord(Register dst, const MemOperand& mem, Register scratch);
436
437 void LoadWordArith(Register dst, const MemOperand& mem,
438 Register scratch = no_reg);
439
440 void StoreWord(Register src, const MemOperand& mem, Register scratch);
441
442 void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
443
444 void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
445
446 void LoadByte(Register dst, const MemOperand& mem, Register scratch);
447
448 void StoreByte(Register src, const MemOperand& mem, Register scratch);
449
450 void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
451 Register scratch = no_reg);
452
453 void StoreRepresentation(Register src, const MemOperand& mem,
454 Representation r, Register scratch = no_reg);
455
456 // Move values between integer and floating point registers.
457 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
458 void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
459 Register scratch);
460 void MovInt64ToDouble(DoubleRegister dst,
461 #if !V8_TARGET_ARCH_PPC64
462 Register src_hi,
463 #endif
464 Register src);
465 #if V8_TARGET_ARCH_PPC64
466 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
467 Register src_lo, Register scratch);
468 #endif
469 void MovDoubleLowToInt(Register dst, DoubleRegister src);
470 void MovDoubleHighToInt(Register dst, DoubleRegister src);
471 void MovDoubleToInt64(
472 #if !V8_TARGET_ARCH_PPC64
473 Register dst_hi,
474 #endif
475 Register dst, DoubleRegister src);
476
477 void Add(Register dst, Register src, intptr_t value, Register scratch);
478 void Cmpi(Register src1, const Operand& src2, Register scratch,
479 CRegister cr = cr7);
480 void Cmpli(Register src1, const Operand& src2, Register scratch,
481 CRegister cr = cr7);
482 void Cmpwi(Register src1, const Operand& src2, Register scratch,
483 CRegister cr = cr7);
484 void Cmplwi(Register src1, const Operand& src2, Register scratch,
485 CRegister cr = cr7);
486 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
487 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
488 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
489
490 void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
491 void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
492 void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
493 CRegister cr = cr7);
494 void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
495 CRegister cr = cr7);
496 void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
497 RCBit rc = LeaveRC);
498
499 // Set new rounding mode RN to FPSCR
500 void SetRoundingMode(FPRoundingMode RN);
501
502 // reset rounding mode to default (kRoundToNearest)
503 void ResetRoundingMode();
504
505 // These exist to provide portability between 32 and 64bit
506 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
507 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
508
595 // --------------------------------------------------------------------------- 509 // ---------------------------------------------------------------------------
596 // JavaScript invokes 510 // JavaScript invokes
597 511
598 // Invoke the JavaScript function code by either calling or jumping. 512 // Invoke the JavaScript function code by either calling or jumping.
599 void InvokeCode(Register code, 513 void InvokeCode(Register code, const ParameterCount& expected,
600 const ParameterCount& expected, 514 const ParameterCount& actual, InvokeFlag flag,
601 const ParameterCount& actual,
602 InvokeFlag flag,
603 const CallWrapper& call_wrapper); 515 const CallWrapper& call_wrapper);
604 516
605 // Invoke the JavaScript function in the given register. Changes the 517 // Invoke the JavaScript function in the given register. Changes the
606 // current context to the context in the function before invoking. 518 // current context to the context in the function before invoking.
607 void InvokeFunction(Register function, 519 void InvokeFunction(Register function, const ParameterCount& actual,
608 const ParameterCount& actual, 520 InvokeFlag flag, const CallWrapper& call_wrapper);
609 InvokeFlag flag,
610 const CallWrapper& call_wrapper);
611 521
612 void InvokeFunction(Register function, 522 void InvokeFunction(Register function, const ParameterCount& expected,
613 const ParameterCount& expected, 523 const ParameterCount& actual, InvokeFlag flag,
614 const ParameterCount& actual,
615 InvokeFlag flag,
616 const CallWrapper& call_wrapper); 524 const CallWrapper& call_wrapper);
617 525
618 void InvokeFunction(Handle<JSFunction> function, 526 void InvokeFunction(Handle<JSFunction> function,
619 const ParameterCount& expected, 527 const ParameterCount& expected,
620 const ParameterCount& actual, 528 const ParameterCount& actual, InvokeFlag flag,
621 InvokeFlag flag,
622 const CallWrapper& call_wrapper); 529 const CallWrapper& call_wrapper);
623 530
624 void IsObjectJSObjectType(Register heap_object, 531 void IsObjectJSObjectType(Register heap_object, Register map,
625 Register map, 532 Register scratch, Label* fail);
626 Register scratch,
627 Label* fail);
628 533
629 void IsInstanceJSObjectType(Register map, 534 void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
630 Register scratch,
631 Label* fail);
632 535
633 void IsObjectJSStringType(Register object, 536 void IsObjectJSStringType(Register object, Register scratch, Label* fail);
634 Register scratch,
635 Label* fail);
636 537
637 void IsObjectNameType(Register object, 538 void IsObjectNameType(Register object, Register scratch, Label* fail);
638 Register scratch,
639 Label* fail);
640 539
641 // --------------------------------------------------------------------------- 540 // ---------------------------------------------------------------------------
642 // Debugger Support 541 // Debugger Support
643 542
644 void DebugBreak(); 543 void DebugBreak();
645 544
646 // --------------------------------------------------------------------------- 545 // ---------------------------------------------------------------------------
647 // Exception handling 546 // Exception handling
648 547
649 // Push a new try handler and link into try handler chain. 548 // Push a new try handler and link into try handler chain.
650 void PushTryHandler(StackHandler::Kind kind, int handler_index); 549 void PushTryHandler(StackHandler::Kind kind, int handler_index);
651 550
652 // Unlink the stack handler on top of the stack from the try handler chain. 551 // Unlink the stack handler on top of the stack from the try handler chain.
653 // Must preserve the result register. 552 // Must preserve the result register.
654 void PopTryHandler(); 553 void PopTryHandler();
655 554
656 // Passes thrown value to the handler of top of the try handler chain. 555 // Passes thrown value to the handler of top of the try handler chain.
657 void Throw(Register value); 556 void Throw(Register value);
658 557
659 // Propagates an uncatchable exception to the top of the current JS stack's 558 // Propagates an uncatchable exception to the top of the current JS stack's
660 // handler chain. 559 // handler chain.
661 void ThrowUncatchable(Register value); 560 void ThrowUncatchable(Register value);
662 561
663 // --------------------------------------------------------------------------- 562 // ---------------------------------------------------------------------------
664 // Inline caching support 563 // Inline caching support
665 564
666 // Generate code for checking access rights - used for security checks 565 // Generate code for checking access rights - used for security checks
667 // on access to global objects across environments. The holder register 566 // on access to global objects across environments. The holder register
668 // is left untouched, whereas both scratch registers are clobbered. 567 // is left untouched, whereas both scratch registers are clobbered.
669 void CheckAccessGlobalProxy(Register holder_reg, 568 void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
670 Register scratch,
671 Label* miss); 569 Label* miss);
672 570
673 void GetNumberHash(Register t0, Register scratch); 571 void GetNumberHash(Register t0, Register scratch);
674 572
675 void LoadFromNumberDictionary(Label* miss, 573 void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
676 Register elements, 574 Register result, Register t0, Register t1,
677 Register key,
678 Register result,
679 Register t0,
680 Register t1,
681 Register t2); 575 Register t2);
682 576
683 577
684 inline void MarkCode(NopMarkerTypes type) { 578 inline void MarkCode(NopMarkerTypes type) { nop(type); }
685 nop(type);
686 }
687 579
688 // Check if the given instruction is a 'type' marker. 580 // Check if the given instruction is a 'type' marker.
689 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type)) 581 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
690 // These instructions are generated to mark special location in the code, 582 // These instructions are generated to mark special location in the code,
691 // like some special IC code. 583 // like some special IC code.
692 static inline bool IsMarkedCode(Instr instr, int type) { 584 static inline bool IsMarkedCode(Instr instr, int type) {
693 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); 585 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
694 return IsNop(instr, type); 586 return IsNop(instr, type);
695 } 587 }
696 588
697 589
698 static inline int GetCodeMarker(Instr instr) { 590 static inline int GetCodeMarker(Instr instr) {
699 int dst_reg_offset = 12; 591 int dst_reg_offset = 12;
700 int dst_mask = 0xf << dst_reg_offset; 592 int dst_mask = 0xf << dst_reg_offset;
701 int src_mask = 0xf; 593 int src_mask = 0xf;
702 int dst_reg = (instr & dst_mask) >> dst_reg_offset; 594 int dst_reg = (instr & dst_mask) >> dst_reg_offset;
703 int src_reg = instr & src_mask; 595 int src_reg = instr & src_mask;
704 uint32_t non_register_mask = ~(dst_mask | src_mask); 596 uint32_t non_register_mask = ~(dst_mask | src_mask);
705 uint32_t mov_mask = al | 13 << 21; 597 uint32_t mov_mask = al | 13 << 21;
706 598
707 // Return <n> if we have a mov rn rn, else return -1. 599 // Return <n> if we have a mov rn rn, else return -1.
708 int type = ((instr & non_register_mask) == mov_mask) && 600 int type = ((instr & non_register_mask) == mov_mask) &&
709 (dst_reg == src_reg) && 601 (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
710 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER) 602 (dst_reg < LAST_CODE_MARKER)
711 ? src_reg 603 ? src_reg
712 : -1; 604 : -1;
713 DCHECK((type == -1) || 605 DCHECK((type == -1) ||
714 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); 606 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
715 return type; 607 return type;
716 } 608 }
717 609
718 610
719 // --------------------------------------------------------------------------- 611 // ---------------------------------------------------------------------------
720 // Allocation support 612 // Allocation support
721 613
722 // Allocate an object in new space or old pointer space. The object_size is 614 // Allocate an object in new space or old pointer space. The object_size is
723 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 615 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
724 // is passed. If the space is exhausted control continues at the gc_required 616 // is passed. If the space is exhausted control continues at the gc_required
725 // label. The allocated object is returned in result. If the flag 617 // label. The allocated object is returned in result. If the flag
726 // tag_allocated_object is true the result is tagged as as a heap object. 618 // tag_allocated_object is true the result is tagged as as a heap object.
727 // All registers are clobbered also when control continues at the gc_required 619 // All registers are clobbered also when control continues at the gc_required
728 // label. 620 // label.
729 void Allocate(int object_size, 621 void Allocate(int object_size, Register result, Register scratch1,
730 Register result, 622 Register scratch2, Label* gc_required, AllocationFlags flags);
731 Register scratch1,
732 Register scratch2,
733 Label* gc_required,
734 AllocationFlags flags);
735 623
736 void Allocate(Register object_size, 624 void Allocate(Register object_size, Register result, Register scratch1,
737 Register result, 625 Register scratch2, Label* gc_required, AllocationFlags flags);
738 Register scratch1,
739 Register scratch2,
740 Label* gc_required,
741 AllocationFlags flags);
742 626
743 // Undo allocation in new space. The object passed and objects allocated after 627 // Undo allocation in new space. The object passed and objects allocated after
744 // it will no longer be allocated. The caller must make sure that no pointers 628 // it will no longer be allocated. The caller must make sure that no pointers
745 // are left to the object(s) no longer allocated as they would be invalid when 629 // are left to the object(s) no longer allocated as they would be invalid when
746 // allocation is undone. 630 // allocation is undone.
747 void UndoAllocationInNewSpace(Register object, Register scratch); 631 void UndoAllocationInNewSpace(Register object, Register scratch);
748 632
749 633
750 void AllocateTwoByteString(Register result, 634 void AllocateTwoByteString(Register result, Register length,
751 Register length, 635 Register scratch1, Register scratch2,
752 Register scratch1, 636 Register scratch3, Label* gc_required);
753 Register scratch2,
754 Register scratch3,
755 Label* gc_required);
756 void AllocateOneByteString(Register result, Register length, 637 void AllocateOneByteString(Register result, Register length,
757 Register scratch1, Register scratch2, 638 Register scratch1, Register scratch2,
758 Register scratch3, Label* gc_required); 639 Register scratch3, Label* gc_required);
759 void AllocateTwoByteConsString(Register result, 640 void AllocateTwoByteConsString(Register result, Register length,
760 Register length, 641 Register scratch1, Register scratch2,
761 Register scratch1,
762 Register scratch2,
763 Label* gc_required); 642 Label* gc_required);
764 void AllocateOneByteConsString(Register result, Register length, 643 void AllocateOneByteConsString(Register result, Register length,
765 Register scratch1, Register scratch2, 644 Register scratch1, Register scratch2,
766 Label* gc_required); 645 Label* gc_required);
767 void AllocateTwoByteSlicedString(Register result, 646 void AllocateTwoByteSlicedString(Register result, Register length,
768 Register length, 647 Register scratch1, Register scratch2,
769 Register scratch1,
770 Register scratch2,
771 Label* gc_required); 648 Label* gc_required);
772 void AllocateOneByteSlicedString(Register result, Register length, 649 void AllocateOneByteSlicedString(Register result, Register length,
773 Register scratch1, Register scratch2, 650 Register scratch1, Register scratch2,
774 Label* gc_required); 651 Label* gc_required);
775 652
776 // Allocates a heap number or jumps to the gc_required label if the young 653 // Allocates a heap number or jumps to the gc_required label if the young
777 // space is full and a scavenge is needed. All registers are clobbered also 654 // space is full and a scavenge is needed. All registers are clobbered also
778 // when control continues at the gc_required label. 655 // when control continues at the gc_required label.
779 void AllocateHeapNumber(Register result, 656 void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
780 Register scratch1, 657 Register heap_number_map, Label* gc_required,
781 Register scratch2,
782 Register heap_number_map,
783 Label* gc_required,
784 TaggingMode tagging_mode = TAG_RESULT, 658 TaggingMode tagging_mode = TAG_RESULT,
785 MutableMode mode = IMMUTABLE); 659 MutableMode mode = IMMUTABLE);
786 void AllocateHeapNumberWithValue(Register result, 660 void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
787 DwVfpRegister value, 661 Register scratch1, Register scratch2,
788 Register scratch1,
789 Register scratch2,
790 Register heap_number_map, 662 Register heap_number_map,
791 Label* gc_required); 663 Label* gc_required);
792 664
793 // Copies a fixed number of fields of heap objects from src to dst. 665 // Copies a fixed number of fields of heap objects from src to dst.
794 void CopyFields(Register dst, 666 void CopyFields(Register dst, Register src, RegList temps, int field_count);
795 Register src,
796 LowDwVfpRegister double_scratch,
797 int field_count);
798 667
799 // Copies a number of bytes from src to dst. All registers are clobbered. On 668 // Copies a number of bytes from src to dst. All registers are clobbered. On
800 // exit src and dst will point to the place just after where the last byte was 669 // exit src and dst will point to the place just after where the last byte was
801 // read or written and length will be zero. 670 // read or written and length will be zero.
802 void CopyBytes(Register src, 671 void CopyBytes(Register src, Register dst, Register length, Register scratch);
803 Register dst, 672
804 Register length, 673 // Initialize fields with filler values. |count| fields starting at
805 Register scratch); 674 // |start_offset| are overwritten with the value in |filler|. At the end the
675 // loop, |start_offset| points at the next uninitialized field. |count| is
676 // assumed to be non-zero.
677 void InitializeNFieldsWithFiller(Register start_offset, Register count,
678 Register filler);
806 679
807 // Initialize fields with filler values. Fields starting at |start_offset| 680 // Initialize fields with filler values. Fields starting at |start_offset|
808 // not including end_offset are overwritten with the value in |filler|. At 681 // not including end_offset are overwritten with the value in |filler|. At
809 // the end the loop, |start_offset| takes the value of |end_offset|. 682 // the end the loop, |start_offset| takes the value of |end_offset|.
810 void InitializeFieldsWithFiller(Register start_offset, 683 void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
811 Register end_offset,
812 Register filler); 684 Register filler);
813 685
814 // --------------------------------------------------------------------------- 686 // ---------------------------------------------------------------------------
815 // Support functions. 687 // Support functions.
816 688
817 // Try to get function prototype of a function and puts the value in 689 // Try to get function prototype of a function and puts the value in
818 // the result register. Checks that the function really is a 690 // the result register. Checks that the function really is a
819 // function and jumps to the miss label if the fast checks fail. The 691 // function and jumps to the miss label if the fast checks fail. The
820 // function register will be untouched; the other registers may be 692 // function register will be untouched; the other registers may be
821 // clobbered. 693 // clobbered.
822 void TryGetFunctionPrototype(Register function, 694 void TryGetFunctionPrototype(Register function, Register result,
823 Register result, 695 Register scratch, Label* miss,
824 Register scratch,
825 Label* miss,
826 bool miss_on_bound_function = false); 696 bool miss_on_bound_function = false);
827 697
828 // Compare object type for heap object. heap_object contains a non-Smi 698 // Compare object type for heap object. heap_object contains a non-Smi
829 // whose object type should be compared with the given type. This both 699 // whose object type should be compared with the given type. This both
830 // sets the flags and leaves the object type in the type_reg register. 700 // sets the flags and leaves the object type in the type_reg register.
831 // It leaves the map in the map register (unless the type_reg and map register 701 // It leaves the map in the map register (unless the type_reg and map register
832 // are the same register). It leaves the heap object in the heap_object 702 // are the same register). It leaves the heap object in the heap_object
833 // register unless the heap_object register is the same register as one of the 703 // register unless the heap_object register is the same register as one of the
834 // other registers. 704 // other registers.
835 // Type_reg can be no_reg. In that case ip is used. 705 // Type_reg can be no_reg. In that case ip is used.
836 void CompareObjectType(Register heap_object, 706 void CompareObjectType(Register heap_object, Register map, Register type_reg,
837 Register map,
838 Register type_reg,
839 InstanceType type); 707 InstanceType type);
840 708
841 // Compare object type for heap object. Branch to false_label if type 709 // Compare object type for heap object. Branch to false_label if type
842 // is lower than min_type or greater than max_type. 710 // is lower than min_type or greater than max_type.
843 // Load map into the register map. 711 // Load map into the register map.
844 void CheckObjectTypeRange(Register heap_object, 712 void CheckObjectTypeRange(Register heap_object, Register map,
845 Register map, 713 InstanceType min_type, InstanceType max_type,
846 InstanceType min_type,
847 InstanceType max_type,
848 Label* false_label); 714 Label* false_label);
849 715
850 // Compare instance type in a map. map contains a valid map object whose 716 // Compare instance type in a map. map contains a valid map object whose
851 // object type should be compared with the given type. This both 717 // object type should be compared with the given type. This both
852 // sets the flags and leaves the object type in the type_reg register. 718 // sets the flags and leaves the object type in the type_reg register.
853 void CompareInstanceType(Register map, 719 void CompareInstanceType(Register map, Register type_reg, InstanceType type);
854 Register type_reg,
855 InstanceType type);
856 720
857 721
858 // Check if a map for a JSObject indicates that the object has fast elements. 722 // Check if a map for a JSObject indicates that the object has fast elements.
859 // Jump to the specified label if it does not. 723 // Jump to the specified label if it does not.
860 void CheckFastElements(Register map, 724 void CheckFastElements(Register map, Register scratch, Label* fail);
861 Register scratch,
862 Label* fail);
863 725
864 // Check if a map for a JSObject indicates that the object can have both smi 726 // Check if a map for a JSObject indicates that the object can have both smi
865 // and HeapObject elements. Jump to the specified label if it does not. 727 // and HeapObject elements. Jump to the specified label if it does not.
866 void CheckFastObjectElements(Register map, 728 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
867 Register scratch,
868 Label* fail);
869 729
870 // Check if a map for a JSObject indicates that the object has fast smi only 730 // Check if a map for a JSObject indicates that the object has fast smi only
871 // elements. Jump to the specified label if it does not. 731 // elements. Jump to the specified label if it does not.
872 void CheckFastSmiElements(Register map, 732 void CheckFastSmiElements(Register map, Register scratch, Label* fail);
873 Register scratch,
874 Label* fail);
875 733
876 // Check to see if maybe_number can be stored as a double in 734 // Check to see if maybe_number can be stored as a double in
877 // FastDoubleElements. If it can, store it at the index specified by key in 735 // FastDoubleElements. If it can, store it at the index specified by key in
878 // the FastDoubleElements array elements. Otherwise jump to fail. 736 // the FastDoubleElements array elements. Otherwise jump to fail.
879 void StoreNumberToDoubleElements(Register value_reg, 737 void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
880 Register key_reg, 738 Register elements_reg, Register scratch1,
881 Register elements_reg, 739 DoubleRegister double_scratch, Label* fail,
882 Register scratch1,
883 LowDwVfpRegister double_scratch,
884 Label* fail,
885 int elements_offset = 0); 740 int elements_offset = 0);
886 741
887 // Compare an object's map with the specified map and its transitioned 742 // Compare an object's map with the specified map and its transitioned
888 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are 743 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
889 // set with result of map compare. If multiple map compares are required, the 744 // set with result of map compare. If multiple map compares are required, the
890 // compare sequences branches to early_success. 745 // compare sequences branches to early_success.
891 void CompareMap(Register obj, 746 void CompareMap(Register obj, Register scratch, Handle<Map> map,
892 Register scratch,
893 Handle<Map> map,
894 Label* early_success); 747 Label* early_success);
895 748
896 // As above, but the map of the object is already loaded into the register 749 // As above, but the map of the object is already loaded into the register
897 // which is preserved by the code generated. 750 // which is preserved by the code generated.
898 void CompareMap(Register obj_map, 751 void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
899 Handle<Map> map,
900 Label* early_success);
901 752
902 // Check if the map of an object is equal to a specified map and branch to 753 // Check if the map of an object is equal to a specified map and branch to
903 // label if not. Skip the smi check if not required (object is known to be a 754 // label if not. Skip the smi check if not required (object is known to be a
904 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 755 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
905 // against maps that are ElementsKind transition maps of the specified map. 756 // against maps that are ElementsKind transition maps of the specified map.
906 void CheckMap(Register obj, 757 void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
907 Register scratch,
908 Handle<Map> map,
909 Label* fail,
910 SmiCheckType smi_check_type); 758 SmiCheckType smi_check_type);
911 759
912 760
913 void CheckMap(Register obj, 761 void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
914 Register scratch, 762 Label* fail, SmiCheckType smi_check_type);
915 Heap::RootListIndex index,
916 Label* fail,
917 SmiCheckType smi_check_type);
918 763
919 764
920 // Check if the map of an object is equal to a specified map and branch to a 765 // Check if the map of an object is equal to a specified map and branch to a
921 // specified target if equal. Skip the smi check if not required (object is 766 // specified target if equal. Skip the smi check if not required (object is
922 // known to be a heap object) 767 // known to be a heap object)
923 void DispatchMap(Register obj, 768 void DispatchMap(Register obj, Register scratch, Handle<Map> map,
924 Register scratch, 769 Handle<Code> success, SmiCheckType smi_check_type);
925 Handle<Map> map,
926 Handle<Code> success,
927 SmiCheckType smi_check_type);
928 770
929 771
930 // Compare the object in a register to a value from the root list. 772 // Compare the object in a register to a value from the root list.
931 // Uses the ip register as scratch. 773 // Uses the ip register as scratch.
932 void CompareRoot(Register obj, Heap::RootListIndex index); 774 void CompareRoot(Register obj, Heap::RootListIndex index);
933 775
934 776
935 // Load and check the instance type of an object for being a string. 777 // Load and check the instance type of an object for being a string.
936 // Loads the type into the second argument register. 778 // Loads the type into the second argument register.
937 // Returns a condition that will be enabled if the object was a string 779 // Returns a condition that will be enabled if the object was a string.
938 // and the passed-in condition passed. If the passed-in condition failed 780 Condition IsObjectStringType(Register obj, Register type) {
939 // then flags remain unchanged. 781 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
940 Condition IsObjectStringType(Register obj, 782 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
941 Register type, 783 andi(r0, type, Operand(kIsNotStringMask));
942 Condition cond = al) {
943 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
944 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
945 tst(type, Operand(kIsNotStringMask), cond);
946 DCHECK_EQ(0, kStringTag); 784 DCHECK_EQ(0, kStringTag);
947 return eq; 785 return eq;
948 } 786 }
949 787
950 788
951 // Picks out an array index from the hash field. 789 // Picks out an array index from the hash field.
952 // Register use: 790 // Register use:
953 // hash - holds the index's hash. Clobbered. 791 // hash - holds the index's hash. Clobbered.
954 // index - holds the overwritten index on exit. 792 // index - holds the overwritten index on exit.
955 void IndexFromHash(Register hash, Register index); 793 void IndexFromHash(Register hash, Register index);
956 794
957 // Get the number of least significant bits from a register 795 // Get the number of least significant bits from a register
958 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 796 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
959 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 797 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
960 798
961 // Load the value of a smi object into a double register. 799 // Load the value of a smi object into a double register.
962 // The register value must be between d0 and d15. 800 void SmiToDouble(DoubleRegister value, Register smi);
963 void SmiToDouble(LowDwVfpRegister value, Register smi);
964 801
965 // Check if a double can be exactly represented as a signed 32-bit integer. 802 // Check if a double can be exactly represented as a signed 32-bit integer.
966 // Z flag set to one if true. 803 // CR_EQ in cr7 is set if true.
967 void TestDoubleIsInt32(DwVfpRegister double_input, 804 void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
968 LowDwVfpRegister double_scratch); 805 Register scratch2, DoubleRegister double_scratch);
969 806
970 // Try to convert a double to a signed 32-bit integer. 807 // Try to convert a double to a signed 32-bit integer.
971 // Z flag set to one and result assigned if the conversion is exact. 808 // CR_EQ in cr7 is set and result assigned if the conversion is exact.
972 void TryDoubleToInt32Exact(Register result, 809 void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
973 DwVfpRegister double_input, 810 Register scratch, DoubleRegister double_scratch);
974 LowDwVfpRegister double_scratch);
975 811
976 // Floor a double and writes the value to the result register. 812 // Floor a double and writes the value to the result register.
977 // Go to exact if the conversion is exact (to be able to test -0), 813 // Go to exact if the conversion is exact (to be able to test -0),
978 // fall through calling code if an overflow occurred, else go to done. 814 // fall through calling code if an overflow occurred, else go to done.
979 // In return, input_high is loaded with high bits of input. 815 // In return, input_high is loaded with high bits of input.
980 void TryInt32Floor(Register result, 816 void TryInt32Floor(Register result, DoubleRegister double_input,
981 DwVfpRegister double_input, 817 Register input_high, Register scratch,
982 Register input_high, 818 DoubleRegister double_scratch, Label* done, Label* exact);
983 LowDwVfpRegister double_scratch,
984 Label* done,
985 Label* exact);
986 819
987 // Performs a truncating conversion of a floating point number as used by 820 // Performs a truncating conversion of a floating point number as used by
988 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 821 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
989 // succeeds, otherwise falls through if result is saturated. On return 822 // succeeds, otherwise falls through if result is saturated. On return
990 // 'result' either holds answer, or is clobbered on fall through. 823 // 'result' either holds answer, or is clobbered on fall through.
991 // 824 //
992 // Only public for the test code in test-code-stubs-arm.cc. 825 // Only public for the test code in test-code-stubs-arm.cc.
993 void TryInlineTruncateDoubleToI(Register result, 826 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
994 DwVfpRegister input,
995 Label* done); 827 Label* done);
996 828
997 // Performs a truncating conversion of a floating point number as used by 829 // Performs a truncating conversion of a floating point number as used by
998 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 830 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
999 // Exits with 'result' holding the answer. 831 // Exits with 'result' holding the answer.
1000 void TruncateDoubleToI(Register result, DwVfpRegister double_input); 832 void TruncateDoubleToI(Register result, DoubleRegister double_input);
1001 833
1002 // Performs a truncating conversion of a heap number as used by 834 // Performs a truncating conversion of a heap number as used by
1003 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 835 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1004 // must be different registers. Exits with 'result' holding the answer. 836 // must be different registers. Exits with 'result' holding the answer.
1005 void TruncateHeapNumberToI(Register result, Register object); 837 void TruncateHeapNumberToI(Register result, Register object);
1006 838
1007 // Converts the smi or heap number in object to an int32 using the rules 839 // Converts the smi or heap number in object to an int32 using the rules
1008 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 840 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1009 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 841 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1010 // different registers. 842 // different registers.
1011 void TruncateNumberToI(Register object, 843 void TruncateNumberToI(Register object, Register result,
1012 Register result, 844 Register heap_number_map, Register scratch1,
1013 Register heap_number_map,
1014 Register scratch1,
1015 Label* not_int32); 845 Label* not_int32);
1016 846
1017 // Check whether d16-d31 are available on the CPU. The result is given by the 847 // Overflow handling functions.
1018 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. 848 // Usage: call the appropriate arithmetic function and then call one of the
1019 void CheckFor32DRegs(Register scratch); 849 // flow control functions with the corresponding label.
1020 850
1021 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double 851 // Compute dst = left + right, setting condition codes. dst may be same as
1022 // values to location, saving [d0..(d15|d31)]. 852 // either left or right (or a unique register). left and right must not be
1023 void SaveFPRegs(Register location, Register scratch); 853 // the same register.
854 void AddAndCheckForOverflow(Register dst, Register left, Register right,
855 Register overflow_dst, Register scratch = r0);
856 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
857 Register overflow_dst, Register scratch = r0);
1024 858
1025 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double 859 // Compute dst = left - right, setting condition codes. dst may be same as
1026 // values to location, restoring [d0..(d15|d31)]. 860 // either left or right (or a unique register). left and right must not be
1027 void RestoreFPRegs(Register location, Register scratch); 861 // the same register.
862 void SubAndCheckForOverflow(Register dst, Register left, Register right,
863 Register overflow_dst, Register scratch = r0);
864
865 void BranchOnOverflow(Label* label) { blt(label, cr0); }
866
867 void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
868
869 void RetOnOverflow(void) {
870 Label label;
871
872 blt(&label, cr0);
873 Ret();
874 bind(&label);
875 }
876
877 void RetOnNoOverflow(void) {
878 Label label;
879
880 bge(&label, cr0);
881 Ret();
882 bind(&label);
883 }
884
885 // Pushes <count> double values to <location>, starting from d<first>.
886 void SaveFPRegs(Register location, int first, int count);
887
888 // Pops <count> double values from <location>, starting from d<first>.
889 void RestoreFPRegs(Register location, int first, int count);
1028 890
1029 // --------------------------------------------------------------------------- 891 // ---------------------------------------------------------------------------
1030 // Runtime calls 892 // Runtime calls
1031 893
1032 // Call a code stub. 894 // Call a code stub.
1033 void CallStub(CodeStub* stub, 895 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
1034 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1035 Condition cond = al); 896 Condition cond = al);
1036 897
1037 // Call a code stub. 898 // Call a code stub.
1038 void TailCallStub(CodeStub* stub, Condition cond = al); 899 void TailCallStub(CodeStub* stub, Condition cond = al);
1039 900
1040 // Call a runtime routine. 901 // Call a runtime routine.
1041 void CallRuntime(const Runtime::Function* f, 902 void CallRuntime(const Runtime::Function* f, int num_arguments,
1042 int num_arguments,
1043 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 903 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1044 void CallRuntimeSaveDoubles(Runtime::FunctionId id) { 904 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1045 const Runtime::Function* function = Runtime::FunctionForId(id); 905 const Runtime::Function* function = Runtime::FunctionForId(id);
1046 CallRuntime(function, function->nargs, kSaveFPRegs); 906 CallRuntime(function, function->nargs, kSaveFPRegs);
1047 } 907 }
1048 908
1049 // Convenience function: Same as above, but takes the fid instead. 909 // Convenience function: Same as above, but takes the fid instead.
1050 void CallRuntime(Runtime::FunctionId id, 910 void CallRuntime(Runtime::FunctionId id, int num_arguments,
1051 int num_arguments,
1052 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 911 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1053 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); 912 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1054 } 913 }
1055 914
1056 // Convenience function: call an external reference. 915 // Convenience function: call an external reference.
1057 void CallExternalReference(const ExternalReference& ext, 916 void CallExternalReference(const ExternalReference& ext, int num_arguments);
1058 int num_arguments);
1059 917
1060 // Tail call of a runtime routine (jump). 918 // Tail call of a runtime routine (jump).
1061 // Like JumpToExternalReference, but also takes care of passing the number 919 // Like JumpToExternalReference, but also takes care of passing the number
1062 // of parameters. 920 // of parameters.
1063 void TailCallExternalReference(const ExternalReference& ext, 921 void TailCallExternalReference(const ExternalReference& ext,
1064 int num_arguments, 922 int num_arguments, int result_size);
1065 int result_size);
1066 923
1067 // Convenience function: tail call a runtime routine (jump). 924 // Convenience function: tail call a runtime routine (jump).
1068 void TailCallRuntime(Runtime::FunctionId fid, 925 void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
1069 int num_arguments,
1070 int result_size); 926 int result_size);
1071 927
1072 int CalculateStackPassedWords(int num_reg_arguments, 928 int CalculateStackPassedWords(int num_reg_arguments,
1073 int num_double_arguments); 929 int num_double_arguments);
1074 930
1075 // Before calling a C-function from generated code, align arguments on stack. 931 // Before calling a C-function from generated code, align arguments on stack.
1076 // After aligning the frame, non-register arguments must be stored in 932 // After aligning the frame, non-register arguments must be stored in
1077 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments 933 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1078 // are word sized. If double arguments are used, this function assumes that 934 // are word sized. If double arguments are used, this function assumes that
1079 // all double arguments are stored before core registers; otherwise the 935 // all double arguments are stored before core registers; otherwise the
1080 // correct alignment of the double values is not guaranteed. 936 // correct alignment of the double values is not guaranteed.
1081 // Some compilers/platforms require the stack to be aligned when calling 937 // Some compilers/platforms require the stack to be aligned when calling
1082 // C++ code. 938 // C++ code.
1083 // Needs a scratch register to do some arithmetic. This register will be 939 // Needs a scratch register to do some arithmetic. This register will be
1084 // trashed. 940 // trashed.
1085 void PrepareCallCFunction(int num_reg_arguments, 941 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
1086 int num_double_registers,
1087 Register scratch); 942 Register scratch);
1088 void PrepareCallCFunction(int num_reg_arguments, 943 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
1089 Register scratch);
1090 944
1091 // There are two ways of passing double arguments on ARM, depending on 945 // There are two ways of passing double arguments on ARM, depending on
1092 // whether soft or hard floating point ABI is used. These functions 946 // whether soft or hard floating point ABI is used. These functions
1093 // abstract parameter passing for the three different ways we call 947 // abstract parameter passing for the three different ways we call
1094 // C functions from generated code. 948 // C functions from generated code.
1095 void MovToFloatParameter(DwVfpRegister src); 949 void MovToFloatParameter(DoubleRegister src);
1096 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); 950 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1097 void MovToFloatResult(DwVfpRegister src); 951 void MovToFloatResult(DoubleRegister src);
1098 952
1099 // Calls a C function and cleans up the space for arguments allocated 953 // Calls a C function and cleans up the space for arguments allocated
1100 // by PrepareCallCFunction. The called function is not allowed to trigger a 954 // by PrepareCallCFunction. The called function is not allowed to trigger a
1101 // garbage collection, since that might move the code and invalidate the 955 // garbage collection, since that might move the code and invalidate the
1102 // return address (unless this is somehow accounted for by the called 956 // return address (unless this is somehow accounted for by the called
1103 // function). 957 // function).
1104 void CallCFunction(ExternalReference function, int num_arguments); 958 void CallCFunction(ExternalReference function, int num_arguments);
1105 void CallCFunction(Register function, int num_arguments); 959 void CallCFunction(Register function, int num_arguments);
1106 void CallCFunction(ExternalReference function, 960 void CallCFunction(ExternalReference function, int num_reg_arguments,
1107 int num_reg_arguments,
1108 int num_double_arguments); 961 int num_double_arguments);
1109 void CallCFunction(Register function, 962 void CallCFunction(Register function, int num_reg_arguments,
1110 int num_reg_arguments,
1111 int num_double_arguments); 963 int num_double_arguments);
1112 964
1113 void MovFromFloatParameter(DwVfpRegister dst); 965 void MovFromFloatParameter(DoubleRegister dst);
1114 void MovFromFloatResult(DwVfpRegister dst); 966 void MovFromFloatResult(DoubleRegister dst);
1115 967
1116 // Calls an API function. Allocates HandleScope, extracts returned value 968 // Calls an API function. Allocates HandleScope, extracts returned value
1117 // from handle and propagates exceptions. Restores context. stack_space 969 // from handle and propagates exceptions. Restores context. stack_space
1118 // - space to be unwound on exit (includes the call JS arguments space and 970 // - space to be unwound on exit (includes the call JS arguments space and
1119 // the additional space allocated for the fast call). 971 // the additional space allocated for the fast call).
1120 void CallApiFunctionAndReturn(Register function_address, 972 void CallApiFunctionAndReturn(Register function_address,
1121 ExternalReference thunk_ref, 973 ExternalReference thunk_ref, int stack_space,
1122 int stack_space,
1123 MemOperand return_value_operand, 974 MemOperand return_value_operand,
1124 MemOperand* context_restore_operand); 975 MemOperand* context_restore_operand);
1125 976
1126 // Jump to a runtime routine. 977 // Jump to a runtime routine.
1127 void JumpToExternalReference(const ExternalReference& builtin); 978 void JumpToExternalReference(const ExternalReference& builtin);
1128 979
1129 // Invoke specified builtin JavaScript function. Adds an entry to 980 // Invoke specified builtin JavaScript function. Adds an entry to
1130 // the unresolved list if the name does not resolve. 981 // the unresolved list if the name does not resolve.
1131 void InvokeBuiltin(Builtins::JavaScript id, 982 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
1132 InvokeFlag flag,
1133 const CallWrapper& call_wrapper = NullCallWrapper()); 983 const CallWrapper& call_wrapper = NullCallWrapper());
1134 984
1135 // Store the code object for the given builtin in the target register and 985 // Store the code object for the given builtin in the target register and
1136 // setup the function in r1. 986 // setup the function in r1.
1137 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 987 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1138 988
1139 // Store the function for the given builtin in the target register. 989 // Store the function for the given builtin in the target register.
1140 void GetBuiltinFunction(Register target, Builtins::JavaScript id); 990 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1141 991
1142 Handle<Object> CodeObject() { 992 Handle<Object> CodeObject() {
1143 DCHECK(!code_object_.is_null()); 993 DCHECK(!code_object_.is_null());
1144 return code_object_; 994 return code_object_;
1145 } 995 }
1146 996
1147 997
1148 // Emit code for a truncating division by a constant. The dividend register is 998 // Emit code for a truncating division by a constant. The dividend register is
1149 // unchanged and ip gets clobbered. Dividend and result must be different. 999 // unchanged and ip gets clobbered. Dividend and result must be different.
1150 void TruncatingDiv(Register result, Register dividend, int32_t divisor); 1000 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1151 1001
1152 // --------------------------------------------------------------------------- 1002 // ---------------------------------------------------------------------------
1153 // StatsCounter support 1003 // StatsCounter support
1154 1004
1155 void SetCounter(StatsCounter* counter, int value, 1005 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1156 Register scratch1, Register scratch2); 1006 Register scratch2);
1157 void IncrementCounter(StatsCounter* counter, int value, 1007 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1158 Register scratch1, Register scratch2); 1008 Register scratch2);
1159 void DecrementCounter(StatsCounter* counter, int value, 1009 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1160 Register scratch1, Register scratch2); 1010 Register scratch2);
1161 1011
1162 1012
1163 // --------------------------------------------------------------------------- 1013 // ---------------------------------------------------------------------------
1164 // Debugging 1014 // Debugging
1165 1015
1166 // Calls Abort(msg) if the condition cond is not satisfied. 1016 // Calls Abort(msg) if the condition cond is not satisfied.
1167 // Use --debug_code to enable. 1017 // Use --debug_code to enable.
1168 void Assert(Condition cond, BailoutReason reason); 1018 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1169 void AssertFastElements(Register elements); 1019 void AssertFastElements(Register elements);
1170 1020
1171 // Like Assert(), but always enabled. 1021 // Like Assert(), but always enabled.
1172 void Check(Condition cond, BailoutReason reason); 1022 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1173 1023
1174 // Print a message to stdout and abort execution. 1024 // Print a message to stdout and abort execution.
1175 void Abort(BailoutReason msg); 1025 void Abort(BailoutReason reason);
1176 1026
1177 // Verify restrictions about code generated in stubs. 1027 // Verify restrictions about code generated in stubs.
1178 void set_generating_stub(bool value) { generating_stub_ = value; } 1028 void set_generating_stub(bool value) { generating_stub_ = value; }
1179 bool generating_stub() { return generating_stub_; } 1029 bool generating_stub() { return generating_stub_; }
1180 void set_has_frame(bool value) { has_frame_ = value; } 1030 void set_has_frame(bool value) { has_frame_ = value; }
1181 bool has_frame() { return has_frame_; } 1031 bool has_frame() { return has_frame_; }
1182 inline bool AllowThisStubCall(CodeStub* stub); 1032 inline bool AllowThisStubCall(CodeStub* stub);
1183 1033
1184 // EABI variant for double arguments in use.
1185 bool use_eabi_hardfloat() {
1186 #ifdef __arm__
1187 return base::OS::ArmUsingHardFloat();
1188 #elif USE_EABI_HARDFLOAT
1189 return true;
1190 #else
1191 return false;
1192 #endif
1193 }
1194
1195 // --------------------------------------------------------------------------- 1034 // ---------------------------------------------------------------------------
1196 // Number utilities 1035 // Number utilities
1197 1036
1198 // Check whether the value of reg is a power of two and not zero. If not 1037 // Check whether the value of reg is a power of two and not zero. If not
1199 // control continues at the label not_power_of_two. If reg is a power of two 1038 // control continues at the label not_power_of_two. If reg is a power of two
1200 // the register scratch contains the value of (reg - 1) when control falls 1039 // the register scratch contains the value of (reg - 1) when control falls
1201 // through. 1040 // through.
1202 void JumpIfNotPowerOfTwoOrZero(Register reg, 1041 void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
1203 Register scratch,
1204 Label* not_power_of_two_or_zero); 1042 Label* not_power_of_two_or_zero);
1205 // Check whether the value of reg is a power of two and not zero. 1043 // Check whether the value of reg is a power of two and not zero.
1206 // Control falls through if it is, with scratch containing the mask 1044 // Control falls through if it is, with scratch containing the mask
1207 // value (reg - 1). 1045 // value (reg - 1).
1208 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is 1046 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1209 // zero or negative, or jumps to the 'not_power_of_two' label if the value is 1047 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1210 // strictly positive but not a power of two. 1048 // strictly positive but not a power of two.
1211 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, 1049 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
1212 Register scratch,
1213 Label* zero_and_neg, 1050 Label* zero_and_neg,
1214 Label* not_power_of_two); 1051 Label* not_power_of_two);
1215 1052
1216 // --------------------------------------------------------------------------- 1053 // ---------------------------------------------------------------------------
1217 // Smi utilities 1054 // Bit testing/extraction
1055 //
1056 // Bit numbering is such that the least significant bit is bit 0
1057 // (for consistency between 32/64-bit).
1218 1058
1219 void SmiTag(Register reg, SBit s = LeaveCC) { 1059 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1220 add(reg, reg, Operand(reg), s); 1060 // and place them into the least significant bits of dst.
1221 } 1061 inline void ExtractBitRange(Register dst, Register src, int rangeStart,
1222 void SmiTag(Register dst, Register src, SBit s = LeaveCC) { 1062 int rangeEnd, RCBit rc = LeaveRC) {
1223 add(dst, src, Operand(src), s); 1063 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1064 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
1065 int width = rangeStart - rangeEnd + 1;
1066 #if V8_TARGET_ARCH_PPC64
1067 rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
1068 #else
1069 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc);
1070 #endif
1224 } 1071 }
1225 1072
1226 // Try to convert int32 to smi. If the value is to large, preserve 1073 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
1227 // the original value and jump to not_a_smi. Destroys scratch and 1074 RCBit rc = LeaveRC) {
1228 // sets flags. 1075 ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
1229 void TrySmiTag(Register reg, Label* not_a_smi) {
1230 TrySmiTag(reg, reg, not_a_smi);
1231 } 1076 }
1232 void TrySmiTag(Register reg, Register src, Label* not_a_smi) { 1077
1233 SmiTag(ip, src, SetCC); 1078 // Extract consecutive bits (defined by mask) from src and place them
1234 b(vs, not_a_smi); 1079 // into the least significant bits of dst.
1235 mov(reg, ip); 1080 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1081 RCBit rc = LeaveRC) {
1082 int start = kBitsPerPointer - 1;
1083 int end;
1084 uintptr_t bit = (1L << start);
1085
1086 while (bit && (mask & bit) == 0) {
1087 start--;
1088 bit >>= 1;
1089 }
1090 end = start;
1091 bit >>= 1;
1092
1093 while (bit && (mask & bit)) {
1094 end--;
1095 bit >>= 1;
1096 }
1097
1098 // 1-bits in mask must be contiguous
1099 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1100
1101 ExtractBitRange(dst, src, start, end, rc);
1102 }
1103
1104 // Test single bit in value.
1105 inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
1106 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
1107 }
1108
1109 // Test consecutive bit range in value. Range is defined by
1110 // rangeStart - rangeEnd.
1111 inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
1112 Register scratch = r0) {
1113 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
1114 }
1115
1116 // Test consecutive bit range in value. Range is defined by mask.
1117 inline void TestBitMask(Register value, uintptr_t mask,
1118 Register scratch = r0) {
1119 ExtractBitMask(scratch, value, mask, SetRC);
1236 } 1120 }
1237 1121
1238 1122
1239 void SmiUntag(Register reg, SBit s = LeaveCC) { 1123 // ---------------------------------------------------------------------------
1240 mov(reg, Operand::SmiUntag(reg), s); 1124 // Smi utilities
1125
1126 // Shift left by 1
1127 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
1128 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
1129 ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
1241 } 1130 }
1242 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { 1131
1243 mov(dst, Operand::SmiUntag(src), s); 1132 #if !V8_TARGET_ARCH_PPC64
1133 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1134 void SmiTagCheckOverflow(Register reg, Register overflow);
1135 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1136
1137 inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1138 Label* not_smi_label) {
1139 // High bits must be identical to fit into an Smi
1140 addis(scratch, value, Operand(0x40000000u >> 16));
1141 cmpi(scratch, Operand::Zero());
1142 blt(not_smi_label);
1143 }
1144 #endif
1145 inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1146 // The test is different for unsigned int values. Since we need
1147 // the value to be in the range of a positive smi, we can't
1148 // handle any of the high bits being set in the value.
1149 TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
1150 scratch);
1151 }
1152 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1153 Label* not_smi_label) {
1154 TestUnsignedSmiCandidate(value, scratch);
1155 bne(not_smi_label, cr0);
1156 }
1157
1158 void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
1159
1160 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
1161 ShiftRightArithImm(dst, src, kSmiShift, rc);
1162 }
1163
1164 void SmiToPtrArrayOffset(Register dst, Register src) {
1165 #if V8_TARGET_ARCH_PPC64
1166 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1167 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
1168 #else
1169 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1170 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1171 #endif
1172 }
1173
1174 void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
1175
1176 void SmiToShortArrayOffset(Register dst, Register src) {
1177 #if V8_TARGET_ARCH_PPC64
1178 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1179 ShiftRightArithImm(dst, src, kSmiShift - 1);
1180 #else
1181 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1182 if (!dst.is(src)) {
1183 mr(dst, src);
1184 }
1185 #endif
1186 }
1187
1188 void SmiToIntArrayOffset(Register dst, Register src) {
1189 #if V8_TARGET_ARCH_PPC64
1190 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1191 ShiftRightArithImm(dst, src, kSmiShift - 2);
1192 #else
1193 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1194 ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
1195 #endif
1196 }
1197
1198 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1199
1200 void SmiToDoubleArrayOffset(Register dst, Register src) {
1201 #if V8_TARGET_ARCH_PPC64
1202 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1203 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
1204 #else
1205 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1206 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1207 #endif
1208 }
1209
1210 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1211 if (kSmiShift < elementSizeLog2) {
1212 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
1213 } else if (kSmiShift > elementSizeLog2) {
1214 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
1215 } else if (!dst.is(src)) {
1216 mr(dst, src);
1217 }
1218 }
1219
1220 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1221 bool isSmi) {
1222 if (isSmi) {
1223 SmiToArrayOffset(dst, src, elementSizeLog2);
1224 } else {
1225 ShiftLeftImm(dst, src, Operand(elementSizeLog2));
1226 }
1244 } 1227 }
1245 1228
1246 // Untag the source value into destination and jump if source is a smi. 1229 // Untag the source value into destination and jump if source is a smi.
1247 // Souce and destination can be the same register. 1230 // Souce and destination can be the same register.
1248 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1231 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1249 1232
1250 // Untag the source value into destination and jump if source is not a smi. 1233 // Untag the source value into destination and jump if source is not a smi.
1251 // Souce and destination can be the same register. 1234 // Souce and destination can be the same register.
1252 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1235 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1253 1236
1254 // Test if the register contains a smi (Z == 0 (eq) if true). 1237 inline void TestIfSmi(Register value, Register scratch) {
1255 inline void SmiTst(Register value) { 1238 TestBit(value, 0, scratch); // tst(value, Operand(kSmiTagMask));
1256 tst(value, Operand(kSmiTagMask));
1257 } 1239 }
1258 inline void NonNegativeSmiTst(Register value) { 1240
1259 tst(value, Operand(kSmiTagMask | kSmiSignMask)); 1241 inline void TestIfPositiveSmi(Register value, Register scratch) {
1242 STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
1243 (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
1244 #if V8_TARGET_ARCH_PPC64
1245 rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC);
1246 #else
1247 rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC);
1248 #endif
1260 } 1249 }
1261 // Jump if the register contains a smi. 1250
1251 // Jump the register contains a smi.
1262 inline void JumpIfSmi(Register value, Label* smi_label) { 1252 inline void JumpIfSmi(Register value, Label* smi_label) {
1263 tst(value, Operand(kSmiTagMask)); 1253 TestIfSmi(value, r0);
1264 b(eq, smi_label); 1254 beq(smi_label, cr0); // branch if SMI
1265 } 1255 }
1266 // Jump if either of the registers contain a non-smi. 1256 // Jump if either of the registers contain a non-smi.
1267 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { 1257 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1268 tst(value, Operand(kSmiTagMask)); 1258 TestIfSmi(value, r0);
1269 b(ne, not_smi_label); 1259 bne(not_smi_label, cr0);
1270 } 1260 }
1271 // Jump if either of the registers contain a non-smi. 1261 // Jump if either of the registers contain a non-smi.
1272 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1262 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1273 // Jump if either of the registers contain a smi. 1263 // Jump if either of the registers contain a smi.
1274 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1264 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1275 1265
1276 // Abort execution if argument is a smi, enabled via --debug-code. 1266 // Abort execution if argument is a smi, enabled via --debug-code.
1277 void AssertNotSmi(Register object); 1267 void AssertNotSmi(Register object);
1278 void AssertSmi(Register object); 1268 void AssertSmi(Register object);
1279 1269
1270
1271 #if V8_TARGET_ARCH_PPC64
1272 inline void TestIfInt32(Register value, Register scratch1, Register scratch2,
1273 CRegister cr = cr7) {
1274 // High bits must be identical to fit into an 32-bit integer
1275 srawi(scratch1, value, 31);
1276 sradi(scratch2, value, 32);
1277 cmp(scratch1, scratch2, cr);
1278 }
1279 #else
1280 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
1281 CRegister cr = cr7) {
1282 // High bits must be identical to fit into an 32-bit integer
1283 srawi(scratch, lo_word, 31);
1284 cmp(scratch, hi_word, cr);
1285 }
1286 #endif
1287
1280 // Abort execution if argument is not a string, enabled via --debug-code. 1288 // Abort execution if argument is not a string, enabled via --debug-code.
1281 void AssertString(Register object); 1289 void AssertString(Register object);
1282 1290
1283 // Abort execution if argument is not a name, enabled via --debug-code. 1291 // Abort execution if argument is not a name, enabled via --debug-code.
1284 void AssertName(Register object); 1292 void AssertName(Register object);
1285 1293
1286 // Abort execution if argument is not undefined or an AllocationSite, enabled 1294 // Abort execution if argument is not undefined or an AllocationSite, enabled
1287 // via --debug-code. 1295 // via --debug-code.
1288 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1296 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1289 1297
1290 // Abort execution if reg is not the root value with the given index, 1298 // Abort execution if reg is not the root value with the given index,
1291 // enabled via --debug-code. 1299 // enabled via --debug-code.
1292 void AssertIsRoot(Register reg, Heap::RootListIndex index); 1300 void AssertIsRoot(Register reg, Heap::RootListIndex index);
1293 1301
1294 // --------------------------------------------------------------------------- 1302 // ---------------------------------------------------------------------------
1295 // HeapNumber utilities 1303 // HeapNumber utilities
1296 1304
1297 void JumpIfNotHeapNumber(Register object, 1305 void JumpIfNotHeapNumber(Register object, Register heap_number_map,
1298 Register heap_number_map, 1306 Register scratch, Label* on_not_heap_number);
1299 Register scratch,
1300 Label* on_not_heap_number);
1301 1307
1302 // --------------------------------------------------------------------------- 1308 // ---------------------------------------------------------------------------
1303 // String utilities 1309 // String utilities
1304 1310
1305 // Generate code to do a lookup in the number string cache. If the number in 1311 // Generate code to do a lookup in the number string cache. If the number in
1306 // the register object is found in the cache the generated code falls through 1312 // the register object is found in the cache the generated code falls through
1307 // with the result in the result register. The object and the result register 1313 // with the result in the result register. The object and the result register
1308 // can be the same. If the number is not found in the cache the code jumps to 1314 // can be the same. If the number is not found in the cache the code jumps to
1309 // the label not_found with only the content of register object unchanged. 1315 // the label not_found with only the content of register object unchanged.
1310 void LookupNumberStringCache(Register object, 1316 void LookupNumberStringCache(Register object, Register result,
1311 Register result, 1317 Register scratch1, Register scratch2,
1312 Register scratch1, 1318 Register scratch3, Label* not_found);
1313 Register scratch2,
1314 Register scratch3,
1315 Label* not_found);
1316 1319
1317 // Checks if both objects are sequential one-byte strings and jumps to label 1320 // Checks if both objects are sequential one-byte strings and jumps to label
1318 // if either is not. Assumes that neither object is a smi. 1321 // if either is not. Assumes that neither object is a smi.
1319 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1, 1322 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
1320 Register object2, 1323 Register object2,
1321 Register scratch1, 1324 Register scratch1,
1322 Register scratch2, 1325 Register scratch2,
1323 Label* failure); 1326 Label* failure);
1324 1327
1325 // Checks if both objects are sequential one-byte strings and jumps to label 1328 // Checks if both objects are sequential one-byte strings and jumps to label
1326 // if either is not. 1329 // if either is not.
1327 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second, 1330 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
1328 Register scratch1, 1331 Register scratch1,
1329 Register scratch2, 1332 Register scratch2,
1330 Label* not_flat_one_byte_strings); 1333 Label* not_flat_one_byte_strings);
1331 1334
1332 // Checks if both instance types are sequential one-byte strings and jumps to 1335 // Checks if both instance types are sequential one-byte strings and jumps to
1333 // label if either is not. 1336 // label if either is not.
1334 void JumpIfBothInstanceTypesAreNotSequentialOneByte( 1337 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1335 Register first_object_instance_type, Register second_object_instance_type, 1338 Register first_object_instance_type, Register second_object_instance_type,
1336 Register scratch1, Register scratch2, Label* failure); 1339 Register scratch1, Register scratch2, Label* failure);
1337 1340
1338 // Check if instance type is sequential one-byte string and jump to label if 1341 // Check if instance type is sequential one-byte string and jump to label if
1339 // it is not. 1342 // it is not.
1340 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, 1343 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1341 Label* failure); 1344 Label* failure);
1342 1345
1343 void JumpIfNotUniqueName(Register reg, Label* not_unique_name); 1346 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
1344 1347
1345 void EmitSeqStringSetCharCheck(Register string, 1348 void EmitSeqStringSetCharCheck(Register string, Register index,
1346 Register index, 1349 Register value, uint32_t encoding_mask);
1347 Register value,
1348 uint32_t encoding_mask);
1349 1350
1350 // --------------------------------------------------------------------------- 1351 // ---------------------------------------------------------------------------
1351 // Patching helpers. 1352 // Patching helpers.
1352 1353
1353 // Get the location of a relocated constant (its address in the constant pool) 1354 // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
1354 // from its load site. 1355 void GetRelocatedValue(Register location, Register result, Register scratch);
1355 void GetRelocatedValueLocation(Register ldr_location, Register result, 1356 void SetRelocatedValue(Register location, Register scratch,
1356 Register scratch); 1357 Register new_value);
1357
1358 1358
1359 void ClampUint8(Register output_reg, Register input_reg); 1359 void ClampUint8(Register output_reg, Register input_reg);
1360 1360
1361 void ClampDoubleToUint8(Register result_reg, 1361 // Saturate a value into 8-bit unsigned integer
1362 DwVfpRegister input_reg, 1362 // if input_value < 0, output_value is 0
1363 LowDwVfpRegister double_scratch); 1363 // if input_value > 255, output_value is 255
1364 // otherwise output_value is the (int)input_value (round to nearest)
1365 void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
1366 DoubleRegister temp_double_reg);
1364 1367
1365 1368
1366 void LoadInstanceDescriptors(Register map, Register descriptors); 1369 void LoadInstanceDescriptors(Register map, Register descriptors);
1367 void EnumLength(Register dst, Register map); 1370 void EnumLength(Register dst, Register map);
1368 void NumberOfOwnDescriptors(Register dst, Register map); 1371 void NumberOfOwnDescriptors(Register dst, Register map);
1369 1372
1370 template<typename Field> 1373 template <typename Field>
1371 void DecodeField(Register dst, Register src) { 1374 void DecodeField(Register dst, Register src) {
1372 Ubfx(dst, src, Field::kShift, Field::kSize); 1375 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
1373 } 1376 }
1374 1377
1375 template<typename Field> 1378 template <typename Field>
1376 void DecodeField(Register reg) { 1379 void DecodeField(Register reg) {
1377 DecodeField<Field>(reg, reg); 1380 DecodeField<Field>(reg, reg);
1378 } 1381 }
1379 1382
1380 template<typename Field> 1383 template <typename Field>
1381 void DecodeFieldToSmi(Register dst, Register src) { 1384 void DecodeFieldToSmi(Register dst, Register src) {
1382 static const int shift = Field::kShift; 1385 #if V8_TARGET_ARCH_PPC64
1383 static const int mask = Field::kMask >> shift << kSmiTagSize; 1386 DecodeField<Field>(dst, src);
1384 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); 1387 SmiTag(dst);
1385 STATIC_ASSERT(kSmiTag == 0); 1388 #else
1386 if (shift < kSmiTagSize) { 1389 // 32-bit can do this in one instruction:
1387 mov(dst, Operand(src, LSL, kSmiTagSize - shift)); 1390 int start = Field::kSize + kSmiShift - 1;
1388 and_(dst, dst, Operand(mask)); 1391 int end = kSmiShift;
1389 } else if (shift > kSmiTagSize) { 1392 int rotate = kSmiShift - Field::kShift;
1390 mov(dst, Operand(src, LSR, shift - kSmiTagSize)); 1393 if (rotate < 0) {
1391 and_(dst, dst, Operand(mask)); 1394 rotate += kBitsPerPointer;
1392 } else {
1393 and_(dst, src, Operand(mask));
1394 } 1395 }
1396 rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
1397 kBitsPerPointer - end - 1);
1398 #endif
1395 } 1399 }
1396 1400
1397 template<typename Field> 1401 template <typename Field>
1398 void DecodeFieldToSmi(Register reg) { 1402 void DecodeFieldToSmi(Register reg) {
1399 DecodeField<Field>(reg, reg); 1403 DecodeFieldToSmi<Field>(reg, reg);
1400 } 1404 }
1401 1405
1402 // Activation support. 1406 // Activation support.
1403 void EnterFrame(StackFrame::Type type, bool load_constant_pool = false); 1407 void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
1404 // Returns the pc offset at which the frame ends. 1408 // Returns the pc offset at which the frame ends.
1405 int LeaveFrame(StackFrame::Type type); 1409 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
1406 1410
1407 // Expects object in r0 and returns map with validated enum cache 1411 // Expects object in r0 and returns map with validated enum cache
1408 // in r0. Assumes that any other register can be used as a scratch. 1412 // in r0. Assumes that any other register can be used as a scratch.
1409 void CheckEnumCache(Register null_value, Label* call_runtime); 1413 void CheckEnumCache(Register null_value, Label* call_runtime);
1410 1414
1411 // AllocationMemento support. Arrays may have an associated 1415 // AllocationMemento support. Arrays may have an associated
1412 // AllocationMemento object that can be checked for in order to pretransition 1416 // AllocationMemento object that can be checked for in order to pretransition
1413 // to another type. 1417 // to another type.
1414 // On entry, receiver_reg should point to the array object. 1418 // On entry, receiver_reg should point to the array object.
1415 // scratch_reg gets clobbered. 1419 // scratch_reg gets clobbered.
1416 // If allocation info is present, condition flags are set to eq. 1420 // If allocation info is present, condition flags are set to eq.
1417 void TestJSArrayForAllocationMemento(Register receiver_reg, 1421 void TestJSArrayForAllocationMemento(Register receiver_reg,
1418 Register scratch_reg, 1422 Register scratch_reg,
1419 Label* no_memento_found); 1423 Label* no_memento_found);
1420 1424
1421 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1425 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1422 Register scratch_reg, 1426 Register scratch_reg,
1423 Label* memento_found) { 1427 Label* memento_found) {
1424 Label no_memento_found; 1428 Label no_memento_found;
1425 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 1429 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1426 &no_memento_found); 1430 &no_memento_found);
1427 b(eq, memento_found); 1431 beq(memento_found);
1428 bind(&no_memento_found); 1432 bind(&no_memento_found);
1429 } 1433 }
1430 1434
1431 // Jumps to found label if a prototype map has dictionary elements. 1435 // Jumps to found label if a prototype map has dictionary elements.
1432 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1436 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1433 Register scratch1, Label* found); 1437 Register scratch1, Label* found);
1434 1438
1435 private: 1439 private:
1436 void CallCFunctionHelper(Register function, 1440 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1437 int num_reg_arguments, 1441
1442 void CallCFunctionHelper(Register function, int num_reg_arguments,
1438 int num_double_arguments); 1443 int num_double_arguments);
1439 1444
1440 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 1445 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
1446 CRegister cr = cr7);
1441 1447
1442 // Helper functions for generating invokes. 1448 // Helper functions for generating invokes.
1443 void InvokePrologue(const ParameterCount& expected, 1449 void InvokePrologue(const ParameterCount& expected,
1444 const ParameterCount& actual, 1450 const ParameterCount& actual, Handle<Code> code_constant,
1445 Handle<Code> code_constant, 1451 Register code_reg, Label* done,
1446 Register code_reg, 1452 bool* definitely_mismatches, InvokeFlag flag,
1447 Label* done,
1448 bool* definitely_mismatches,
1449 InvokeFlag flag,
1450 const CallWrapper& call_wrapper); 1453 const CallWrapper& call_wrapper);
1451 1454
1452 void InitializeNewString(Register string, 1455 void InitializeNewString(Register string, Register length,
1453 Register length, 1456 Heap::RootListIndex map_index, Register scratch1,
1454 Heap::RootListIndex map_index,
1455 Register scratch1,
1456 Register scratch2); 1457 Register scratch2);
1457 1458
1458 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1459 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1459 void InNewSpace(Register object, 1460 void InNewSpace(Register object, Register scratch,
1460 Register scratch,
1461 Condition cond, // eq for new space, ne otherwise. 1461 Condition cond, // eq for new space, ne otherwise.
1462 Label* branch); 1462 Label* branch);
1463 1463
1464 // Helper for finding the mark bits for an address. Afterwards, the 1464 // Helper for finding the mark bits for an address. Afterwards, the
1465 // bitmap register points at the word with the mark bits and the mask 1465 // bitmap register points at the word with the mark bits and the mask
1466 // the position of the first bit. Leaves addr_reg unchanged. 1466 // the position of the first bit. Leaves addr_reg unchanged.
1467 inline void GetMarkBits(Register addr_reg, 1467 inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
1468 Register bitmap_reg,
1469 Register mask_reg); 1468 Register mask_reg);
1470 1469
1471 // Helper for throwing exceptions. Compute a handler address and jump to 1470 // Helper for throwing exceptions. Compute a handler address and jump to
1472 // it. See the implementation for register usage. 1471 // it. See the implementation for register usage.
1473 void JumpToHandlerEntry(); 1472 void JumpToHandlerEntry();
1474 1473
1475 // Compute memory operands for safepoint stack slots. 1474 // Compute memory operands for safepoint stack slots.
1476 static int SafepointRegisterStackIndex(int reg_code); 1475 static int SafepointRegisterStackIndex(int reg_code);
1477 MemOperand SafepointRegisterSlot(Register reg); 1476 MemOperand SafepointRegisterSlot(Register reg);
1478 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1477 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1479 1478
1480 // Loads the constant pool pointer (pp) register. 1479 #if V8_OOL_CONSTANT_POOL
1481 void LoadConstantPoolPointerRegister(); 1480 // Loads the constant pool pointer (kConstantPoolRegister).
1481 enum CodeObjectAccessMethod { CAN_USE_IP, CONSTRUCT_INTERNAL_REFERENCE };
1482 void LoadConstantPoolPointerRegister(CodeObjectAccessMethod access_method,
1483 int ip_code_entry_delta = 0);
1484 #endif
1482 1485
1483 bool generating_stub_; 1486 bool generating_stub_;
1484 bool has_frame_; 1487 bool has_frame_;
1485 // This handle will be patched with the code object on installation. 1488 // This handle will be patched with the code object on installation.
1486 Handle<Object> code_object_; 1489 Handle<Object> code_object_;
1487 1490
1488 // Needs access to SafepointRegisterStackIndex for compiled frame 1491 // Needs access to SafepointRegisterStackIndex for compiled frame
1489 // traversal. 1492 // traversal.
1490 friend class StandardFrame; 1493 friend class StandardFrame;
1491 }; 1494 };
1492 1495
1493 1496
1494 // The code patcher is used to patch (typically) small parts of code e.g. for 1497 // The code patcher is used to patch (typically) small parts of code e.g. for
1495 // debugging and other types of instrumentation. When using the code patcher 1498 // debugging and other types of instrumentation. When using the code patcher
1496 // the exact number of bytes specified must be emitted. It is not legal to emit 1499 // the exact number of bytes specified must be emitted. It is not legal to emit
1497 // relocation information. If any of these constraints are violated it causes 1500 // relocation information. If any of these constraints are violated it causes
1498 // an assertion to fail. 1501 // an assertion to fail.
1499 class CodePatcher { 1502 class CodePatcher {
1500 public: 1503 public:
1501 enum FlushICache { 1504 enum FlushICache { FLUSH, DONT_FLUSH };
1502 FLUSH,
1503 DONT_FLUSH
1504 };
1505 1505
1506 CodePatcher(byte* address, 1506 CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
1507 int instructions,
1508 FlushICache flush_cache = FLUSH);
1509 virtual ~CodePatcher(); 1507 virtual ~CodePatcher();
1510 1508
1511 // Macro assembler to emit code. 1509 // Macro assembler to emit code.
1512 MacroAssembler* masm() { return &masm_; } 1510 MacroAssembler* masm() { return &masm_; }
1513 1511
1514 // Emit an instruction directly. 1512 // Emit an instruction directly.
1515 void Emit(Instr instr); 1513 void Emit(Instr instr);
1516 1514
1517 // Emit an address directly.
1518 void Emit(Address addr);
1519
1520 // Emit the condition part of an instruction leaving the rest of the current 1515 // Emit the condition part of an instruction leaving the rest of the current
1521 // instruction unchanged. 1516 // instruction unchanged.
1522 void EmitCondition(Condition cond); 1517 void EmitCondition(Condition cond);
1523 1518
1524 private: 1519 private:
1525 byte* address_; // The address of the code being patched. 1520 byte* address_; // The address of the code being patched.
1526 int size_; // Number of bytes of the expected patch size. 1521 int size_; // Number of bytes of the expected patch size.
1527 MacroAssembler masm_; // Macro assembler used to generate the code. 1522 MacroAssembler masm_; // Macro assembler used to generate the code.
1528 FlushICache flush_cache_; // Whether to flush the I cache after patching. 1523 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1529 }; 1524 };
1530 1525
1531 1526
1527 #if V8_OOL_CONSTANT_POOL
1532 class FrameAndConstantPoolScope { 1528 class FrameAndConstantPoolScope {
1533 public: 1529 public:
1534 FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type) 1530 FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
1535 : masm_(masm), 1531 : masm_(masm),
1536 type_(type), 1532 type_(type),
1537 old_has_frame_(masm->has_frame()), 1533 old_has_frame_(masm->has_frame()),
1538 old_constant_pool_available_(masm->is_constant_pool_available()) { 1534 old_constant_pool_available_(masm->is_constant_pool_available()) {
1539 // We only want to enable constant pool access for non-manual frame scopes 1535 // We only want to enable constant pool access for non-manual frame scopes
1540 // to ensure the constant pool pointer is valid throughout the scope. 1536 // to ensure the constant pool pointer is valid throughout the scope.
1541 DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); 1537 DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
1542 masm->set_has_frame(true); 1538 masm->set_has_frame(true);
1543 masm->set_constant_pool_available(true); 1539 masm->set_constant_pool_available(true);
1544 masm->EnterFrame(type, !old_constant_pool_available_); 1540 masm->EnterFrame(type, !old_constant_pool_available_);
1545 } 1541 }
1546 1542
1547 ~FrameAndConstantPoolScope() { 1543 ~FrameAndConstantPoolScope() {
1548 masm_->LeaveFrame(type_); 1544 masm_->LeaveFrame(type_);
1549 masm_->set_has_frame(old_has_frame_); 1545 masm_->set_has_frame(old_has_frame_);
1550 masm_->set_constant_pool_available(old_constant_pool_available_); 1546 masm_->set_constant_pool_available(old_constant_pool_available_);
1551 } 1547 }
1552 1548
1553 // Normally we generate the leave-frame code when this object goes 1549 // Normally we generate the leave-frame code when this object goes
1554 // out of scope. Sometimes we may need to generate the code somewhere else 1550 // out of scope. Sometimes we may need to generate the code somewhere else
1555 // in addition. Calling this will achieve that, but the object stays in 1551 // in addition. Calling this will achieve that, but the object stays in
1556 // scope, the MacroAssembler is still marked as being in a frame scope, and 1552 // scope, the MacroAssembler is still marked as being in a frame scope, and
1557 // the code will be generated again when it goes out of scope. 1553 // the code will be generated again when it goes out of scope.
1558 void GenerateLeaveFrame() { 1554 void GenerateLeaveFrame(int stack_adjustment = 0) {
1559 DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); 1555 DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
1560 masm_->LeaveFrame(type_); 1556 masm_->LeaveFrame(type_, stack_adjustment);
1561 } 1557 }
1562 1558
1563 private: 1559 private:
1564 MacroAssembler* masm_; 1560 MacroAssembler* masm_;
1565 StackFrame::Type type_; 1561 StackFrame::Type type_;
1566 bool old_has_frame_; 1562 bool old_has_frame_;
1567 bool old_constant_pool_available_; 1563 bool old_constant_pool_available_;
1568 1564
1569 DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope); 1565 DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
1570 }; 1566 };
1567 #else
1568 #define FrameAndConstantPoolScope FrameScope
1569 #endif
1571 1570
1572 1571
1572 #if V8_OOL_CONSTANT_POOL
1573 // Class for scoping the the unavailability of constant pool access. 1573 // Class for scoping the the unavailability of constant pool access.
1574 class ConstantPoolUnavailableScope { 1574 class ConstantPoolUnavailableScope {
1575 public: 1575 public:
1576 explicit ConstantPoolUnavailableScope(MacroAssembler* masm) 1576 explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
1577 : masm_(masm), 1577 : masm_(masm),
1578 old_constant_pool_available_(masm->is_constant_pool_available()) { 1578 old_constant_pool_available_(masm->is_constant_pool_available()) {
1579 if (FLAG_enable_ool_constant_pool) { 1579 if (FLAG_enable_ool_constant_pool) {
1580 masm_->set_constant_pool_available(false); 1580 masm_->set_constant_pool_available(false);
1581 } 1581 }
1582 } 1582 }
1583 ~ConstantPoolUnavailableScope() { 1583 ~ConstantPoolUnavailableScope() {
1584 if (FLAG_enable_ool_constant_pool) { 1584 if (FLAG_enable_ool_constant_pool) {
1585 masm_->set_constant_pool_available(old_constant_pool_available_); 1585 masm_->set_constant_pool_available(old_constant_pool_available_);
1586 } 1586 }
1587 } 1587 }
1588 1588
1589 private: 1589 private:
1590 MacroAssembler* masm_; 1590 MacroAssembler* masm_;
1591 int old_constant_pool_available_; 1591 int old_constant_pool_available_;
1592 1592
1593 DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope); 1593 DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
1594 }; 1594 };
1595 #endif
1595 1596
1596 1597
1597 // ----------------------------------------------------------------------------- 1598 // -----------------------------------------------------------------------------
1598 // Static helper functions. 1599 // Static helper functions.
1599 1600
1600 inline MemOperand ContextOperand(Register context, int index) { 1601 inline MemOperand ContextOperand(Register context, int index) {
1601 return MemOperand(context, Context::SlotOffset(index)); 1602 return MemOperand(context, Context::SlotOffset(index));
1602 } 1603 }
1603 1604
1604 1605
1605 inline MemOperand GlobalObjectOperand() { 1606 inline MemOperand GlobalObjectOperand() {
1606 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); 1607 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1607 } 1608 }
1608 1609
1609 1610
1610 #ifdef GENERATED_CODE_COVERAGE 1611 #ifdef GENERATED_CODE_COVERAGE
1611 #define CODE_COVERAGE_STRINGIFY(x) #x 1612 #define CODE_COVERAGE_STRINGIFY(x) #x
1612 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1613 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1613 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1614 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1614 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 1615 #define ACCESS_MASM(masm) \
1616 masm->stop(__FILE_LINE__); \
1617 masm->
1615 #else 1618 #else
1616 #define ACCESS_MASM(masm) masm-> 1619 #define ACCESS_MASM(masm) masm->
1617 #endif 1620 #endif
1621 }
1622 } // namespace v8::internal
1618 1623
1619 1624 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
1620 } } // namespace v8::internal
1621
1622 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698