OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // |
| 3 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 4 // |
2 // Use of this source code is governed by a BSD-style license that can be | 5 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 6 // found in the LICENSE file. |
4 | 7 |
| 8 #include <assert.h> // For assert |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 9 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 10 |
7 #include "src/v8.h" | 11 #include "src/v8.h" |
8 | 12 |
9 #if V8_TARGET_ARCH_ARM | 13 #if V8_TARGET_ARCH_PPC |
10 | 14 |
11 #include "src/bootstrapper.h" | 15 #include "src/bootstrapper.h" |
12 #include "src/codegen.h" | 16 #include "src/codegen.h" |
13 #include "src/cpu-profiler.h" | 17 #include "src/cpu-profiler.h" |
14 #include "src/debug.h" | 18 #include "src/debug.h" |
15 #include "src/isolate-inl.h" | 19 #include "src/isolate-inl.h" |
16 #include "src/runtime.h" | 20 #include "src/runtime.h" |
17 | 21 |
18 namespace v8 { | 22 namespace v8 { |
19 namespace internal { | 23 namespace internal { |
20 | 24 |
21 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) | 25 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) |
22 : Assembler(arg_isolate, buffer, size), | 26 : Assembler(arg_isolate, buffer, size), |
23 generating_stub_(false), | 27 generating_stub_(false), |
24 has_frame_(false) { | 28 has_frame_(false) { |
25 if (isolate() != NULL) { | 29 if (isolate() != NULL) { |
26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 30 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
27 isolate()); | 31 isolate()); |
28 } | 32 } |
29 } | 33 } |
30 | 34 |
31 | 35 |
32 void MacroAssembler::Jump(Register target, Condition cond) { | 36 void MacroAssembler::Jump(Register target, Condition cond) { |
33 bx(target, cond); | 37 DCHECK(cond == al); |
| 38 mtctr(target); |
| 39 bctr(); |
34 } | 40 } |
35 | 41 |
36 | 42 |
37 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, | 43 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, |
38 Condition cond) { | 44 Condition cond, CRegister cr) { |
39 DCHECK(RelocInfo::IsCodeTarget(rmode)); | 45 Label skip; |
40 mov(pc, Operand(target, rmode), LeaveCC, cond); | 46 |
| 47 if (cond != al) b(NegateCondition(cond), &skip, cr); |
| 48 |
| 49 DCHECK(rmode == RelocInfo::CODE_TARGET || |
| 50 rmode == RelocInfo::RUNTIME_ENTRY); |
| 51 |
| 52 mov(r0, Operand(target, rmode)); |
| 53 mtctr(r0); |
| 54 bctr(); |
| 55 |
| 56 bind(&skip); |
| 57 // mov(pc, Operand(target, rmode), LeaveCC, cond); |
41 } | 58 } |
42 | 59 |
43 | 60 |
44 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, | 61 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, |
45 Condition cond) { | 62 Condition cond, CRegister cr) { |
46 DCHECK(!RelocInfo::IsCodeTarget(rmode)); | 63 DCHECK(!RelocInfo::IsCodeTarget(rmode)); |
47 Jump(reinterpret_cast<intptr_t>(target), rmode, cond); | 64 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr); |
48 } | 65 } |
49 | 66 |
50 | 67 |
51 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, | 68 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, |
52 Condition cond) { | 69 Condition cond) { |
53 DCHECK(RelocInfo::IsCodeTarget(rmode)); | 70 DCHECK(RelocInfo::IsCodeTarget(rmode)); |
54 // 'code' is always generated ARM code, never THUMB code | 71 // 'code' is always generated ppc code, never THUMB code |
55 AllowDeferredHandleDereference embedding_raw_address; | 72 AllowDeferredHandleDereference embedding_raw_address; |
56 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); | 73 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); |
57 } | 74 } |
58 | 75 |
59 | 76 |
60 int MacroAssembler::CallSize(Register target, Condition cond) { | 77 int MacroAssembler::CallSize(Register target, Condition cond) { |
61 return kInstrSize; | 78 return 2 * kInstrSize; |
62 } | 79 } |
63 | 80 |
64 | 81 |
65 void MacroAssembler::Call(Register target, Condition cond) { | 82 void MacroAssembler::Call(Register target, Condition cond) { |
66 // Block constant pool for the call instruction sequence. | 83 BlockTrampolinePoolScope block_trampoline_pool(this); |
67 BlockConstPoolScope block_const_pool(this); | |
68 Label start; | 84 Label start; |
69 bind(&start); | 85 bind(&start); |
70 blx(target, cond); | 86 DCHECK(cond == al); // in prep of removal of condition |
| 87 |
| 88 // Statement positions are expected to be recorded when the target |
| 89 // address is loaded. |
| 90 positions_recorder()->WriteRecordedPositions(); |
| 91 |
| 92 // branch via link register and set LK bit for return point |
| 93 mtlr(target); |
| 94 bclr(BA, SetLK); |
| 95 |
71 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); | 96 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); |
72 } | 97 } |
73 | 98 |
74 | 99 |
75 int MacroAssembler::CallSize( | 100 int MacroAssembler::CallSize( |
76 Address target, RelocInfo::Mode rmode, Condition cond) { | 101 Address target, RelocInfo::Mode rmode, Condition cond) { |
77 Instr mov_instr = cond | MOV | LeaveCC; | |
78 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); | 102 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); |
79 return kInstrSize + | 103 return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize; |
80 mov_operand.instructions_required(this, mov_instr) * kInstrSize; | |
81 } | 104 } |
82 | 105 |
83 | 106 |
84 int MacroAssembler::CallStubSize( | 107 int MacroAssembler::CallSizeNotPredictableCodeSize( |
85 CodeStub* stub, TypeFeedbackId ast_id, Condition cond) { | 108 Address target, RelocInfo::Mode rmode, Condition cond) { |
86 return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); | 109 return (2 + kMovInstructionsNoConstantPool) * kInstrSize; |
87 } | |
88 | |
89 | |
90 int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate, | |
91 Address target, | |
92 RelocInfo::Mode rmode, | |
93 Condition cond) { | |
94 Instr mov_instr = cond | MOV | LeaveCC; | |
95 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); | |
96 return kInstrSize + | |
97 mov_operand.instructions_required(NULL, mov_instr) * kInstrSize; | |
98 } | 110 } |
99 | 111 |
100 | 112 |
101 void MacroAssembler::Call(Address target, | 113 void MacroAssembler::Call(Address target, |
102 RelocInfo::Mode rmode, | 114 RelocInfo::Mode rmode, |
103 Condition cond, | 115 Condition cond) { |
104 TargetAddressStorageMode mode) { | 116 BlockTrampolinePoolScope block_trampoline_pool(this); |
105 // Block constant pool for the call instruction sequence. | 117 DCHECK(cond == al); |
106 BlockConstPoolScope block_const_pool(this); | |
107 Label start; | |
108 bind(&start); | |
109 | |
110 bool old_predictable_code_size = predictable_code_size(); | |
111 if (mode == NEVER_INLINE_TARGET_ADDRESS) { | |
112 set_predictable_code_size(true); | |
113 } | |
114 | 118 |
115 #ifdef DEBUG | 119 #ifdef DEBUG |
116 // Check the expected size before generating code to ensure we assume the same | 120 // Check the expected size before generating code to ensure we assume the same |
117 // constant pool availability (e.g., whether constant pool is full or not). | 121 // constant pool availability (e.g., whether constant pool is full or not). |
118 int expected_size = CallSize(target, rmode, cond); | 122 int expected_size = CallSize(target, rmode, cond); |
| 123 Label start; |
| 124 bind(&start); |
119 #endif | 125 #endif |
120 | 126 |
121 // Call sequence on V7 or later may be : | |
122 // movw ip, #... @ call address low 16 | |
123 // movt ip, #... @ call address high 16 | |
124 // blx ip | |
125 // @ return address | |
126 // Or for pre-V7 or values that may be back-patched | |
127 // to avoid ICache flushes: | |
128 // ldr ip, [pc, #...] @ call address | |
129 // blx ip | |
130 // @ return address | |
131 | |
132 // Statement positions are expected to be recorded when the target | 127 // Statement positions are expected to be recorded when the target |
133 // address is loaded. The mov method will automatically record | 128 // address is loaded. |
134 // positions when pc is the target, since this is not the case here | |
135 // we have to do it explicitly. | |
136 positions_recorder()->WriteRecordedPositions(); | 129 positions_recorder()->WriteRecordedPositions(); |
137 | 130 |
138 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); | 131 // This can likely be optimized to make use of bc() with 24bit relative |
139 blx(ip, cond); | 132 // |
| 133 // RecordRelocInfo(x.rmode_, x.imm_); |
| 134 // bc( BA, .... offset, LKset); |
| 135 // |
| 136 |
| 137 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode)); |
| 138 mtlr(ip); |
| 139 bclr(BA, SetLK); |
140 | 140 |
141 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); | 141 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); |
142 if (mode == NEVER_INLINE_TARGET_ADDRESS) { | |
143 set_predictable_code_size(old_predictable_code_size); | |
144 } | |
145 } | 142 } |
146 | 143 |
147 | 144 |
148 int MacroAssembler::CallSize(Handle<Code> code, | 145 int MacroAssembler::CallSize(Handle<Code> code, |
149 RelocInfo::Mode rmode, | 146 RelocInfo::Mode rmode, |
150 TypeFeedbackId ast_id, | 147 TypeFeedbackId ast_id, |
151 Condition cond) { | 148 Condition cond) { |
152 AllowDeferredHandleDereference using_raw_address; | 149 AllowDeferredHandleDereference using_raw_address; |
153 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); | 150 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); |
154 } | 151 } |
155 | 152 |
156 | 153 |
157 void MacroAssembler::Call(Handle<Code> code, | 154 void MacroAssembler::Call(Handle<Code> code, |
158 RelocInfo::Mode rmode, | 155 RelocInfo::Mode rmode, |
159 TypeFeedbackId ast_id, | 156 TypeFeedbackId ast_id, |
160 Condition cond, | 157 Condition cond) { |
161 TargetAddressStorageMode mode) { | 158 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 159 DCHECK(RelocInfo::IsCodeTarget(rmode)); |
| 160 |
| 161 #ifdef DEBUG |
| 162 // Check the expected size before generating code to ensure we assume the same |
| 163 // constant pool availability (e.g., whether constant pool is full or not). |
| 164 int expected_size = CallSize(code, rmode, ast_id, cond); |
162 Label start; | 165 Label start; |
163 bind(&start); | 166 bind(&start); |
164 DCHECK(RelocInfo::IsCodeTarget(rmode)); | 167 #endif |
| 168 |
165 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { | 169 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { |
166 SetRecordedAstId(ast_id); | 170 SetRecordedAstId(ast_id); |
167 rmode = RelocInfo::CODE_TARGET_WITH_ID; | 171 rmode = RelocInfo::CODE_TARGET_WITH_ID; |
168 } | 172 } |
169 // 'code' is always generated ARM code, never THUMB code | 173 AllowDeferredHandleDereference using_raw_address; |
170 AllowDeferredHandleDereference embedding_raw_address; | 174 Call(reinterpret_cast<Address>(code.location()), rmode, cond); |
171 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode); | 175 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); |
172 } | 176 } |
173 | 177 |
174 | 178 |
175 void MacroAssembler::Ret(Condition cond) { | 179 void MacroAssembler::Ret(Condition cond) { |
176 bx(lr, cond); | 180 DCHECK(cond == al); |
| 181 blr(); |
177 } | 182 } |
178 | 183 |
179 | 184 |
180 void MacroAssembler::Drop(int count, Condition cond) { | 185 void MacroAssembler::Drop(int count, Condition cond) { |
| 186 DCHECK(cond == al); |
181 if (count > 0) { | 187 if (count > 0) { |
182 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond); | 188 Add(sp, sp, count * kPointerSize, r0); |
183 } | 189 } |
184 } | 190 } |
185 | 191 |
186 | 192 |
187 void MacroAssembler::Ret(int drop, Condition cond) { | 193 void MacroAssembler::Ret(int drop, Condition cond) { |
188 Drop(drop, cond); | 194 Drop(drop, cond); |
189 Ret(cond); | 195 Ret(cond); |
190 } | 196 } |
191 | 197 |
192 | 198 |
193 void MacroAssembler::Swap(Register reg1, | |
194 Register reg2, | |
195 Register scratch, | |
196 Condition cond) { | |
197 if (scratch.is(no_reg)) { | |
198 eor(reg1, reg1, Operand(reg2), LeaveCC, cond); | |
199 eor(reg2, reg2, Operand(reg1), LeaveCC, cond); | |
200 eor(reg1, reg1, Operand(reg2), LeaveCC, cond); | |
201 } else { | |
202 mov(scratch, reg1, LeaveCC, cond); | |
203 mov(reg1, reg2, LeaveCC, cond); | |
204 mov(reg2, scratch, LeaveCC, cond); | |
205 } | |
206 } | |
207 | |
208 | |
209 void MacroAssembler::Call(Label* target) { | 199 void MacroAssembler::Call(Label* target) { |
210 bl(target); | 200 b(target, SetLK); |
211 } | 201 } |
212 | 202 |
213 | 203 |
214 void MacroAssembler::Push(Handle<Object> handle) { | 204 void MacroAssembler::Push(Handle<Object> handle) { |
215 mov(ip, Operand(handle)); | 205 mov(ip, Operand(handle)); |
216 push(ip); | 206 push(ip); |
217 } | 207 } |
218 | 208 |
219 | 209 |
220 void MacroAssembler::Move(Register dst, Handle<Object> value) { | 210 void MacroAssembler::Move(Register dst, Handle<Object> value) { |
221 AllowDeferredHandleDereference smi_check; | 211 AllowDeferredHandleDereference smi_check; |
222 if (value->IsSmi()) { | 212 if (value->IsSmi()) { |
223 mov(dst, Operand(value)); | 213 LoadSmiLiteral(dst, reinterpret_cast<Smi *>(*value)); |
224 } else { | 214 } else { |
225 DCHECK(value->IsHeapObject()); | 215 DCHECK(value->IsHeapObject()); |
226 if (isolate()->heap()->InNewSpace(*value)) { | 216 if (isolate()->heap()->InNewSpace(*value)) { |
227 Handle<Cell> cell = isolate()->factory()->NewCell(value); | 217 Handle<Cell> cell = isolate()->factory()->NewCell(value); |
228 mov(dst, Operand(cell)); | 218 mov(dst, Operand(cell)); |
229 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset)); | 219 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset)); |
230 } else { | 220 } else { |
231 mov(dst, Operand(value)); | 221 mov(dst, Operand(value)); |
232 } | 222 } |
233 } | 223 } |
234 } | 224 } |
235 | 225 |
236 | 226 |
237 void MacroAssembler::Move(Register dst, Register src, Condition cond) { | 227 void MacroAssembler::Move(Register dst, Register src, Condition cond) { |
| 228 DCHECK(cond == al); |
238 if (!dst.is(src)) { | 229 if (!dst.is(src)) { |
239 mov(dst, src, LeaveCC, cond); | 230 mr(dst, src); |
240 } | 231 } |
241 } | 232 } |
242 | 233 |
243 | 234 |
244 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { | 235 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { |
245 if (!dst.is(src)) { | 236 if (!dst.is(src)) { |
246 vmov(dst, src); | 237 fmr(dst, src); |
247 } | 238 } |
248 } | 239 } |
249 | 240 |
250 | 241 |
251 void MacroAssembler::Mls(Register dst, Register src1, Register src2, | 242 void MacroAssembler::MultiPush(RegList regs) { |
252 Register srcA, Condition cond) { | 243 int16_t num_to_push = NumberOfBitsSet(regs); |
253 if (CpuFeatures::IsSupported(MLS)) { | 244 int16_t stack_offset = num_to_push * kPointerSize; |
254 CpuFeatureScope scope(this, MLS); | 245 |
255 mls(dst, src1, src2, srcA, cond); | 246 subi(sp, sp, Operand(stack_offset)); |
256 } else { | 247 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
257 DCHECK(!srcA.is(ip)); | 248 if ((regs & (1 << i)) != 0) { |
258 mul(ip, src1, src2, LeaveCC, cond); | 249 stack_offset -= kPointerSize; |
259 sub(dst, srcA, ip, LeaveCC, cond); | 250 StoreP(ToRegister(i), MemOperand(sp, stack_offset)); |
| 251 } |
260 } | 252 } |
261 } | 253 } |
262 | 254 |
263 | 255 |
264 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, | 256 void MacroAssembler::MultiPop(RegList regs) { |
265 Condition cond) { | 257 int16_t stack_offset = 0; |
266 if (!src2.is_reg() && | 258 |
267 !src2.must_output_reloc_info(this) && | 259 for (int16_t i = 0; i < kNumRegisters; i++) { |
268 src2.immediate() == 0) { | 260 if ((regs & (1 << i)) != 0) { |
269 mov(dst, Operand::Zero(), LeaveCC, cond); | 261 LoadP(ToRegister(i), MemOperand(sp, stack_offset)); |
270 } else if (!(src2.instructions_required(this) == 1) && | 262 stack_offset += kPointerSize; |
271 !src2.must_output_reloc_info(this) && | 263 } |
272 CpuFeatures::IsSupported(ARMv7) && | |
273 IsPowerOf2(src2.immediate() + 1)) { | |
274 ubfx(dst, src1, 0, | |
275 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); | |
276 } else { | |
277 and_(dst, src1, src2, LeaveCC, cond); | |
278 } | 264 } |
279 } | 265 addi(sp, sp, Operand(stack_offset)); |
280 | |
281 | |
282 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, | |
283 Condition cond) { | |
284 DCHECK(lsb < 32); | |
285 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
286 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
287 and_(dst, src1, Operand(mask), LeaveCC, cond); | |
288 if (lsb != 0) { | |
289 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); | |
290 } | |
291 } else { | |
292 ubfx(dst, src1, lsb, width, cond); | |
293 } | |
294 } | |
295 | |
296 | |
297 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, | |
298 Condition cond) { | |
299 DCHECK(lsb < 32); | |
300 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
301 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
302 and_(dst, src1, Operand(mask), LeaveCC, cond); | |
303 int shift_up = 32 - lsb - width; | |
304 int shift_down = lsb + shift_up; | |
305 if (shift_up != 0) { | |
306 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); | |
307 } | |
308 if (shift_down != 0) { | |
309 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); | |
310 } | |
311 } else { | |
312 sbfx(dst, src1, lsb, width, cond); | |
313 } | |
314 } | |
315 | |
316 | |
317 void MacroAssembler::Bfi(Register dst, | |
318 Register src, | |
319 Register scratch, | |
320 int lsb, | |
321 int width, | |
322 Condition cond) { | |
323 DCHECK(0 <= lsb && lsb < 32); | |
324 DCHECK(0 <= width && width < 32); | |
325 DCHECK(lsb + width < 32); | |
326 DCHECK(!scratch.is(dst)); | |
327 if (width == 0) return; | |
328 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
329 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
330 bic(dst, dst, Operand(mask)); | |
331 and_(scratch, src, Operand((1 << width) - 1)); | |
332 mov(scratch, Operand(scratch, LSL, lsb)); | |
333 orr(dst, dst, scratch); | |
334 } else { | |
335 bfi(dst, src, lsb, width, cond); | |
336 } | |
337 } | |
338 | |
339 | |
340 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, | |
341 Condition cond) { | |
342 DCHECK(lsb < 32); | |
343 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
344 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
345 bic(dst, src, Operand(mask)); | |
346 } else { | |
347 Move(dst, src, cond); | |
348 bfc(dst, lsb, width, cond); | |
349 } | |
350 } | |
351 | |
352 | |
353 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, | |
354 Condition cond) { | |
355 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
356 DCHECK(!dst.is(pc) && !src.rm().is(pc)); | |
357 DCHECK((satpos >= 0) && (satpos <= 31)); | |
358 | |
359 // These asserts are required to ensure compatibility with the ARMv7 | |
360 // implementation. | |
361 DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL)); | |
362 DCHECK(src.rs().is(no_reg)); | |
363 | |
364 Label done; | |
365 int satval = (1 << satpos) - 1; | |
366 | |
367 if (cond != al) { | |
368 b(NegateCondition(cond), &done); // Skip saturate if !condition. | |
369 } | |
370 if (!(src.is_reg() && dst.is(src.rm()))) { | |
371 mov(dst, src); | |
372 } | |
373 tst(dst, Operand(~satval)); | |
374 b(eq, &done); | |
375 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative. | |
376 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. | |
377 bind(&done); | |
378 } else { | |
379 usat(dst, satpos, src, cond); | |
380 } | |
381 } | |
382 | |
383 | |
384 void MacroAssembler::Load(Register dst, | |
385 const MemOperand& src, | |
386 Representation r) { | |
387 DCHECK(!r.IsDouble()); | |
388 if (r.IsInteger8()) { | |
389 ldrsb(dst, src); | |
390 } else if (r.IsUInteger8()) { | |
391 ldrb(dst, src); | |
392 } else if (r.IsInteger16()) { | |
393 ldrsh(dst, src); | |
394 } else if (r.IsUInteger16()) { | |
395 ldrh(dst, src); | |
396 } else { | |
397 ldr(dst, src); | |
398 } | |
399 } | |
400 | |
401 | |
402 void MacroAssembler::Store(Register src, | |
403 const MemOperand& dst, | |
404 Representation r) { | |
405 DCHECK(!r.IsDouble()); | |
406 if (r.IsInteger8() || r.IsUInteger8()) { | |
407 strb(src, dst); | |
408 } else if (r.IsInteger16() || r.IsUInteger16()) { | |
409 strh(src, dst); | |
410 } else { | |
411 if (r.IsHeapObject()) { | |
412 AssertNotSmi(src); | |
413 } else if (r.IsSmi()) { | |
414 AssertSmi(src); | |
415 } | |
416 str(src, dst); | |
417 } | |
418 } | 266 } |
419 | 267 |
420 | 268 |
421 void MacroAssembler::LoadRoot(Register destination, | 269 void MacroAssembler::LoadRoot(Register destination, |
422 Heap::RootListIndex index, | 270 Heap::RootListIndex index, |
423 Condition cond) { | 271 Condition cond) { |
424 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && | 272 DCHECK(cond == al); |
425 isolate()->heap()->RootCanBeTreatedAsConstant(index) && | 273 LoadP(destination, MemOperand(kRootRegister, |
426 !predictable_code_size()) { | 274 index << kPointerSizeLog2), r0); |
427 // The CPU supports fast immediate values, and this root will never | |
428 // change. We will load it as a relocatable immediate value. | |
429 Handle<Object> root(&isolate()->heap()->roots_array_start()[index]); | |
430 mov(destination, Operand(root), LeaveCC, cond); | |
431 return; | |
432 } | |
433 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); | |
434 } | 275 } |
435 | 276 |
436 | 277 |
437 void MacroAssembler::StoreRoot(Register source, | 278 void MacroAssembler::StoreRoot(Register source, |
438 Heap::RootListIndex index, | 279 Heap::RootListIndex index, |
439 Condition cond) { | 280 Condition cond) { |
440 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); | 281 DCHECK(cond == al); |
| 282 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0); |
441 } | 283 } |
442 | 284 |
443 | 285 |
444 void MacroAssembler::InNewSpace(Register object, | 286 void MacroAssembler::InNewSpace(Register object, |
445 Register scratch, | 287 Register scratch, |
446 Condition cond, | 288 Condition cond, |
447 Label* branch) { | 289 Label* branch) { |
| 290 // N.B. scratch may be same register as object |
448 DCHECK(cond == eq || cond == ne); | 291 DCHECK(cond == eq || cond == ne); |
449 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); | 292 mov(r0, Operand(ExternalReference::new_space_mask(isolate()))); |
450 cmp(scratch, Operand(ExternalReference::new_space_start(isolate()))); | 293 and_(scratch, object, r0); |
| 294 mov(r0, Operand(ExternalReference::new_space_start(isolate()))); |
| 295 cmp(scratch, r0); |
451 b(cond, branch); | 296 b(cond, branch); |
452 } | 297 } |
453 | 298 |
454 | 299 |
455 void MacroAssembler::RecordWriteField( | 300 void MacroAssembler::RecordWriteField( |
456 Register object, | 301 Register object, |
457 int offset, | 302 int offset, |
458 Register value, | 303 Register value, |
459 Register dst, | 304 Register dst, |
460 LinkRegisterStatus lr_status, | 305 LinkRegisterStatus lr_status, |
461 SaveFPRegsMode save_fp, | 306 SaveFPRegsMode save_fp, |
462 RememberedSetAction remembered_set_action, | 307 RememberedSetAction remembered_set_action, |
463 SmiCheck smi_check, | 308 SmiCheck smi_check, |
464 PointersToHereCheck pointers_to_here_check_for_value) { | 309 PointersToHereCheck pointers_to_here_check_for_value) { |
465 // First, check if a write barrier is even needed. The tests below | 310 // First, check if a write barrier is even needed. The tests below |
466 // catch stores of Smis. | 311 // catch stores of Smis. |
467 Label done; | 312 Label done; |
468 | 313 |
469 // Skip barrier if writing a smi. | 314 // Skip barrier if writing a smi. |
470 if (smi_check == INLINE_SMI_CHECK) { | 315 if (smi_check == INLINE_SMI_CHECK) { |
471 JumpIfSmi(value, &done); | 316 JumpIfSmi(value, &done); |
472 } | 317 } |
473 | 318 |
474 // Although the object register is tagged, the offset is relative to the start | 319 // Although the object register is tagged, the offset is relative to the start |
475 // of the object, so so offset must be a multiple of kPointerSize. | 320 // of the object, so so offset must be a multiple of kPointerSize. |
476 DCHECK(IsAligned(offset, kPointerSize)); | 321 DCHECK(IsAligned(offset, kPointerSize)); |
477 | 322 |
478 add(dst, object, Operand(offset - kHeapObjectTag)); | 323 Add(dst, object, offset - kHeapObjectTag, r0); |
479 if (emit_debug_code()) { | 324 if (emit_debug_code()) { |
480 Label ok; | 325 Label ok; |
481 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); | 326 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1)); |
482 b(eq, &ok); | 327 beq(&ok, cr0); |
483 stop("Unaligned cell in write barrier"); | 328 stop("Unaligned cell in write barrier"); |
484 bind(&ok); | 329 bind(&ok); |
485 } | 330 } |
486 | 331 |
487 RecordWrite(object, | 332 RecordWrite(object, |
488 dst, | 333 dst, |
489 value, | 334 value, |
490 lr_status, | 335 lr_status, |
491 save_fp, | 336 save_fp, |
492 remembered_set_action, | 337 remembered_set_action, |
493 OMIT_SMI_CHECK, | 338 OMIT_SMI_CHECK, |
494 pointers_to_here_check_for_value); | 339 pointers_to_here_check_for_value); |
495 | 340 |
496 bind(&done); | 341 bind(&done); |
497 | 342 |
498 // Clobber clobbered input registers when running with the debug-code flag | 343 // Clobber clobbered input registers when running with the debug-code flag |
499 // turned on to provoke errors. | 344 // turned on to provoke errors. |
500 if (emit_debug_code()) { | 345 if (emit_debug_code()) { |
501 mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); | 346 mov(value, Operand(BitCast<intptr_t>(kZapValue + 4))); |
502 mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); | 347 mov(dst, Operand(BitCast<intptr_t>(kZapValue + 8))); |
503 } | 348 } |
504 } | 349 } |
505 | 350 |
506 | 351 |
507 // Will clobber 4 registers: object, map, dst, ip. The | 352 // Will clobber 4 registers: object, map, dst, ip. The |
508 // register 'object' contains a heap object pointer. | 353 // register 'object' contains a heap object pointer. |
509 void MacroAssembler::RecordWriteForMap(Register object, | 354 void MacroAssembler::RecordWriteForMap(Register object, |
510 Register map, | 355 Register map, |
511 Register dst, | 356 Register dst, |
512 LinkRegisterStatus lr_status, | 357 LinkRegisterStatus lr_status, |
513 SaveFPRegsMode fp_mode) { | 358 SaveFPRegsMode fp_mode) { |
514 if (emit_debug_code()) { | 359 if (emit_debug_code()) { |
515 ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset)); | 360 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset)); |
516 cmp(dst, Operand(isolate()->factory()->meta_map())); | 361 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0); |
517 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 362 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
518 } | 363 } |
519 | 364 |
520 if (!FLAG_incremental_marking) { | 365 if (!FLAG_incremental_marking) { |
521 return; | 366 return; |
522 } | 367 } |
523 | 368 |
524 if (emit_debug_code()) { | 369 if (emit_debug_code()) { |
525 ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset)); | 370 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset)); |
526 cmp(ip, map); | 371 cmp(ip, map); |
527 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 372 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
528 } | 373 } |
529 | 374 |
530 Label done; | 375 Label done; |
531 | 376 |
532 // A single check of the map's pages interesting flag suffices, since it is | 377 // A single check of the map's pages interesting flag suffices, since it is |
533 // only set during incremental collection, and then it's also guaranteed that | 378 // only set during incremental collection, and then it's also guaranteed that |
534 // the from object's page's interesting flag is also set. This optimization | 379 // the from object's page's interesting flag is also set. This optimization |
535 // relies on the fact that maps can never be in new space. | 380 // relies on the fact that maps can never be in new space. |
536 CheckPageFlag(map, | 381 CheckPageFlag(map, |
537 map, // Used as scratch. | 382 map, // Used as scratch. |
538 MemoryChunk::kPointersToHereAreInterestingMask, | 383 MemoryChunk::kPointersToHereAreInterestingMask, |
539 eq, | 384 eq, |
540 &done); | 385 &done); |
541 | 386 |
542 add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); | 387 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); |
543 if (emit_debug_code()) { | 388 if (emit_debug_code()) { |
544 Label ok; | 389 Label ok; |
545 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); | 390 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1)); |
546 b(eq, &ok); | 391 beq(&ok, cr0); |
547 stop("Unaligned cell in write barrier"); | 392 stop("Unaligned cell in write barrier"); |
548 bind(&ok); | 393 bind(&ok); |
549 } | 394 } |
550 | 395 |
551 // Record the actual write. | 396 // Record the actual write. |
552 if (lr_status == kLRHasNotBeenSaved) { | 397 if (lr_status == kLRHasNotBeenSaved) { |
553 push(lr); | 398 mflr(r0); |
| 399 push(r0); |
554 } | 400 } |
555 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, | 401 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, |
556 fp_mode); | 402 fp_mode); |
557 CallStub(&stub); | 403 CallStub(&stub); |
558 if (lr_status == kLRHasNotBeenSaved) { | 404 if (lr_status == kLRHasNotBeenSaved) { |
559 pop(lr); | 405 pop(r0); |
| 406 mtlr(r0); |
560 } | 407 } |
561 | 408 |
562 bind(&done); | 409 bind(&done); |
563 | 410 |
564 // Count number of write barriers in generated code. | 411 // Count number of write barriers in generated code. |
565 isolate()->counters()->write_barriers_static()->Increment(); | 412 isolate()->counters()->write_barriers_static()->Increment(); |
566 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst); | 413 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst); |
567 | 414 |
568 // Clobber clobbered registers when running with the debug-code flag | 415 // Clobber clobbered registers when running with the debug-code flag |
569 // turned on to provoke errors. | 416 // turned on to provoke errors. |
570 if (emit_debug_code()) { | 417 if (emit_debug_code()) { |
571 mov(dst, Operand(BitCast<int32_t>(kZapValue + 12))); | 418 mov(dst, Operand(BitCast<intptr_t>(kZapValue + 12))); |
572 mov(map, Operand(BitCast<int32_t>(kZapValue + 16))); | 419 mov(map, Operand(BitCast<intptr_t>(kZapValue + 16))); |
573 } | 420 } |
574 } | 421 } |
575 | 422 |
576 | 423 |
577 // Will clobber 4 registers: object, address, scratch, ip. The | 424 // Will clobber 4 registers: object, address, scratch, ip. The |
578 // register 'object' contains a heap object pointer. The heap object | 425 // register 'object' contains a heap object pointer. The heap object |
579 // tag is shifted away. | 426 // tag is shifted away. |
580 void MacroAssembler::RecordWrite( | 427 void MacroAssembler::RecordWrite( |
581 Register object, | 428 Register object, |
582 Register address, | 429 Register address, |
583 Register value, | 430 Register value, |
584 LinkRegisterStatus lr_status, | 431 LinkRegisterStatus lr_status, |
585 SaveFPRegsMode fp_mode, | 432 SaveFPRegsMode fp_mode, |
586 RememberedSetAction remembered_set_action, | 433 RememberedSetAction remembered_set_action, |
587 SmiCheck smi_check, | 434 SmiCheck smi_check, |
588 PointersToHereCheck pointers_to_here_check_for_value) { | 435 PointersToHereCheck pointers_to_here_check_for_value) { |
589 DCHECK(!object.is(value)); | 436 DCHECK(!object.is(value)); |
590 if (emit_debug_code()) { | 437 if (emit_debug_code()) { |
591 ldr(ip, MemOperand(address)); | 438 LoadP(ip, MemOperand(address)); |
592 cmp(ip, value); | 439 cmp(ip, value); |
593 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 440 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
594 } | 441 } |
595 | 442 |
596 if (remembered_set_action == OMIT_REMEMBERED_SET && | 443 if (remembered_set_action == OMIT_REMEMBERED_SET && |
597 !FLAG_incremental_marking) { | 444 !FLAG_incremental_marking) { |
598 return; | 445 return; |
599 } | 446 } |
600 | 447 |
601 // First, check if a write barrier is even needed. The tests below | 448 // First, check if a write barrier is even needed. The tests below |
(...skipping 12 matching lines...) Expand all Loading... |
614 &done); | 461 &done); |
615 } | 462 } |
616 CheckPageFlag(object, | 463 CheckPageFlag(object, |
617 value, // Used as scratch. | 464 value, // Used as scratch. |
618 MemoryChunk::kPointersFromHereAreInterestingMask, | 465 MemoryChunk::kPointersFromHereAreInterestingMask, |
619 eq, | 466 eq, |
620 &done); | 467 &done); |
621 | 468 |
622 // Record the actual write. | 469 // Record the actual write. |
623 if (lr_status == kLRHasNotBeenSaved) { | 470 if (lr_status == kLRHasNotBeenSaved) { |
624 push(lr); | 471 mflr(r0); |
| 472 push(r0); |
625 } | 473 } |
626 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, | 474 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, |
627 fp_mode); | 475 fp_mode); |
628 CallStub(&stub); | 476 CallStub(&stub); |
629 if (lr_status == kLRHasNotBeenSaved) { | 477 if (lr_status == kLRHasNotBeenSaved) { |
630 pop(lr); | 478 pop(r0); |
| 479 mtlr(r0); |
631 } | 480 } |
632 | 481 |
633 bind(&done); | 482 bind(&done); |
634 | 483 |
635 // Count number of write barriers in generated code. | 484 // Count number of write barriers in generated code. |
636 isolate()->counters()->write_barriers_static()->Increment(); | 485 isolate()->counters()->write_barriers_static()->Increment(); |
637 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, | 486 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, |
638 value); | 487 value); |
639 | 488 |
640 // Clobber clobbered registers when running with the debug-code flag | 489 // Clobber clobbered registers when running with the debug-code flag |
641 // turned on to provoke errors. | 490 // turned on to provoke errors. |
642 if (emit_debug_code()) { | 491 if (emit_debug_code()) { |
643 mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); | 492 mov(address, Operand(BitCast<intptr_t>(kZapValue + 12))); |
644 mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); | 493 mov(value, Operand(BitCast<intptr_t>(kZapValue + 16))); |
645 } | 494 } |
646 } | 495 } |
647 | 496 |
648 | 497 |
649 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | 498 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. |
650 Register address, | 499 Register address, |
651 Register scratch, | 500 Register scratch, |
652 SaveFPRegsMode fp_mode, | 501 SaveFPRegsMode fp_mode, |
653 RememberedSetFinalAction and_then) { | 502 RememberedSetFinalAction and_then) { |
654 Label done; | 503 Label done; |
655 if (emit_debug_code()) { | 504 if (emit_debug_code()) { |
656 Label ok; | 505 Label ok; |
657 JumpIfNotInNewSpace(object, scratch, &ok); | 506 JumpIfNotInNewSpace(object, scratch, &ok); |
658 stop("Remembered set pointer is in new space"); | 507 stop("Remembered set pointer is in new space"); |
659 bind(&ok); | 508 bind(&ok); |
660 } | 509 } |
661 // Load store buffer top. | 510 // Load store buffer top. |
662 ExternalReference store_buffer = | 511 ExternalReference store_buffer = |
663 ExternalReference::store_buffer_top(isolate()); | 512 ExternalReference::store_buffer_top(isolate()); |
664 mov(ip, Operand(store_buffer)); | 513 mov(ip, Operand(store_buffer)); |
665 ldr(scratch, MemOperand(ip)); | 514 LoadP(scratch, MemOperand(ip)); |
666 // Store pointer to buffer and increment buffer top. | 515 // Store pointer to buffer and increment buffer top. |
667 str(address, MemOperand(scratch, kPointerSize, PostIndex)); | 516 StoreP(address, MemOperand(scratch)); |
| 517 addi(scratch, scratch, Operand(kPointerSize)); |
668 // Write back new top of buffer. | 518 // Write back new top of buffer. |
669 str(scratch, MemOperand(ip)); | 519 StoreP(scratch, MemOperand(ip)); |
670 // Call stub on end of buffer. | 520 // Call stub on end of buffer. |
671 // Check for end of buffer. | 521 // Check for end of buffer. |
672 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); | 522 mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit)); |
| 523 and_(r0, scratch, r0, SetRC); |
| 524 |
673 if (and_then == kFallThroughAtEnd) { | 525 if (and_then == kFallThroughAtEnd) { |
674 b(eq, &done); | 526 beq(&done, cr0); |
675 } else { | 527 } else { |
676 DCHECK(and_then == kReturnAtEnd); | 528 DCHECK(and_then == kReturnAtEnd); |
677 Ret(eq); | 529 beq(&done, cr0); |
678 } | 530 } |
679 push(lr); | 531 mflr(r0); |
| 532 push(r0); |
680 StoreBufferOverflowStub store_buffer_overflow = | 533 StoreBufferOverflowStub store_buffer_overflow = |
681 StoreBufferOverflowStub(isolate(), fp_mode); | 534 StoreBufferOverflowStub(isolate(), fp_mode); |
682 CallStub(&store_buffer_overflow); | 535 CallStub(&store_buffer_overflow); |
683 pop(lr); | 536 pop(r0); |
| 537 mtlr(r0); |
684 bind(&done); | 538 bind(&done); |
685 if (and_then == kReturnAtEnd) { | 539 if (and_then == kReturnAtEnd) { |
686 Ret(); | 540 Ret(); |
687 } | 541 } |
688 } | 542 } |
689 | 543 |
690 | 544 |
691 void MacroAssembler::PushFixedFrame(Register marker_reg) { | 545 void MacroAssembler::PushFixedFrame(Register marker_reg) { |
692 DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code()); | 546 mflr(r0); |
693 stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | | 547 #if V8_OOL_CONSTANT_POOL |
694 cp.bit() | | 548 if (marker_reg.is_valid()) { |
695 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | | 549 Push(r0, fp, kConstantPoolRegister, cp, marker_reg); |
696 fp.bit() | | 550 } else { |
697 lr.bit()); | 551 Push(r0, fp, kConstantPoolRegister, cp); |
| 552 } |
| 553 #else |
| 554 if (marker_reg.is_valid()) { |
| 555 Push(r0, fp, cp, marker_reg); |
| 556 } else { |
| 557 Push(r0, fp, cp); |
| 558 } |
| 559 #endif |
698 } | 560 } |
699 | 561 |
700 | 562 |
701 void MacroAssembler::PopFixedFrame(Register marker_reg) { | 563 void MacroAssembler::PopFixedFrame(Register marker_reg) { |
702 DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code()); | 564 #if V8_OOL_CONSTANT_POOL |
703 ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | | 565 if (marker_reg.is_valid()) { |
704 cp.bit() | | 566 Pop(r0, fp, kConstantPoolRegister, cp, marker_reg); |
705 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | | 567 } else { |
706 fp.bit() | | 568 Pop(r0, fp, kConstantPoolRegister, cp); |
707 lr.bit()); | 569 } |
| 570 #else |
| 571 if (marker_reg.is_valid()) { |
| 572 Pop(r0, fp, cp, marker_reg); |
| 573 } else { |
| 574 Pop(r0, fp, cp); |
| 575 } |
| 576 #endif |
| 577 mtlr(r0); |
708 } | 578 } |
709 | 579 |
710 | 580 |
711 // Push and pop all registers that can hold pointers. | 581 // Push and pop all registers that can hold pointers. |
712 void MacroAssembler::PushSafepointRegisters() { | 582 void MacroAssembler::PushSafepointRegisters() { |
713 // Safepoints expect a block of contiguous register values starting with r0: | |
714 DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); | |
715 // Safepoints expect a block of kNumSafepointRegisters values on the | 583 // Safepoints expect a block of kNumSafepointRegisters values on the |
716 // stack, so adjust the stack for unsaved registers. | 584 // stack, so adjust the stack for unsaved registers. |
717 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 585 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
718 DCHECK(num_unsaved >= 0); | 586 DCHECK(num_unsaved >= 0); |
719 sub(sp, sp, Operand(num_unsaved * kPointerSize)); | 587 if (num_unsaved > 0) { |
720 stm(db_w, sp, kSafepointSavedRegisters); | 588 subi(sp, sp, Operand(num_unsaved * kPointerSize)); |
| 589 } |
| 590 MultiPush(kSafepointSavedRegisters); |
721 } | 591 } |
722 | 592 |
723 | 593 |
724 void MacroAssembler::PopSafepointRegisters() { | 594 void MacroAssembler::PopSafepointRegisters() { |
725 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 595 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
726 ldm(ia_w, sp, kSafepointSavedRegisters); | 596 MultiPop(kSafepointSavedRegisters); |
727 add(sp, sp, Operand(num_unsaved * kPointerSize)); | 597 if (num_unsaved > 0) { |
| 598 addi(sp, sp, Operand(num_unsaved * kPointerSize)); |
| 599 } |
728 } | 600 } |
729 | 601 |
730 | 602 |
731 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { | 603 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { |
732 str(src, SafepointRegisterSlot(dst)); | 604 StoreP(src, SafepointRegisterSlot(dst)); |
733 } | 605 } |
734 | 606 |
735 | 607 |
736 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { | 608 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { |
737 ldr(dst, SafepointRegisterSlot(src)); | 609 LoadP(dst, SafepointRegisterSlot(src)); |
738 } | 610 } |
739 | 611 |
740 | 612 |
741 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 613 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
742 // The registers are pushed starting with the highest encoding, | 614 // The registers are pushed starting with the highest encoding, |
743 // which means that lowest encodings are closest to the stack pointer. | 615 // which means that lowest encodings are closest to the stack pointer. |
744 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters); | 616 RegList regs = kSafepointSavedRegisters; |
745 return reg_code; | 617 int index = 0; |
| 618 |
| 619 DCHECK(reg_code >= 0 && reg_code < kNumRegisters); |
| 620 |
| 621 for (int16_t i = 0; i < reg_code; i++) { |
| 622 if ((regs & (1 << i)) != 0) { |
| 623 index++; |
| 624 } |
| 625 } |
| 626 |
| 627 return index; |
746 } | 628 } |
747 | 629 |
748 | 630 |
749 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { | 631 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { |
750 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); | 632 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); |
751 } | 633 } |
752 | 634 |
753 | 635 |
754 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { | 636 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { |
755 // Number of d-regs not known at snapshot time. | |
756 DCHECK(!serializer_enabled()); | |
757 // General purpose registers are pushed last on the stack. | 637 // General purpose registers are pushed last on the stack. |
758 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; | 638 int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize; |
759 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; | 639 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; |
760 return MemOperand(sp, doubles_size + register_offset); | 640 return MemOperand(sp, doubles_size + register_offset); |
761 } | 641 } |
762 | 642 |
763 | 643 |
764 void MacroAssembler::Ldrd(Register dst1, Register dst2, | 644 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, |
765 const MemOperand& src, Condition cond) { | 645 const DoubleRegister src) { |
766 DCHECK(src.rm().is(no_reg)); | 646 Label done; |
767 DCHECK(!dst1.is(lr)); // r14. | 647 |
768 | 648 // Test for NaN |
769 // V8 does not use this addressing mode, so the fallback code | 649 fcmpu(src, src); |
770 // below doesn't support it yet. | 650 |
771 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex)); | 651 if (dst.is(src)) { |
772 | 652 bordered(&done); |
773 // Generate two ldr instructions if ldrd is not available. | |
774 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && | |
775 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) { | |
776 CpuFeatureScope scope(this, ARMv7); | |
777 ldrd(dst1, dst2, src, cond); | |
778 } else { | 653 } else { |
779 if ((src.am() == Offset) || (src.am() == NegOffset)) { | 654 Label is_nan; |
780 MemOperand src2(src); | 655 bunordered(&is_nan); |
781 src2.set_offset(src2.offset() + 4); | 656 fmr(dst, src); |
782 if (dst1.is(src.rn())) { | 657 b(&done); |
783 ldr(dst2, src2, cond); | 658 bind(&is_nan); |
784 ldr(dst1, src, cond); | 659 } |
785 } else { | 660 |
786 ldr(dst1, src, cond); | 661 // Replace with canonical NaN. |
787 ldr(dst2, src2, cond); | 662 double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double(); |
788 } | 663 LoadDoubleLiteral(dst, nan_value, r0); |
789 } else { // PostIndex or NegPostIndex. | 664 |
790 DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex)); | 665 bind(&done); |
791 if (dst1.is(src.rn())) { | 666 } |
792 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond); | 667 |
793 ldr(dst1, src, cond); | 668 |
794 } else { | 669 void MacroAssembler::ConvertIntToDouble(Register src, |
795 MemOperand src2(src); | 670 DoubleRegister double_dst) { |
796 src2.set_offset(src2.offset() - 4); | 671 MovIntToDouble(double_dst, src, r0); |
797 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond); | 672 fcfid(double_dst, double_dst); |
798 ldr(dst2, src2, cond); | 673 } |
799 } | 674 |
800 } | 675 |
801 } | 676 void MacroAssembler::ConvertUnsignedIntToDouble(Register src, |
802 } | 677 DoubleRegister double_dst) { |
803 | 678 MovUnsignedIntToDouble(double_dst, src, r0); |
804 | 679 fcfid(double_dst, double_dst); |
805 void MacroAssembler::Strd(Register src1, Register src2, | 680 } |
806 const MemOperand& dst, Condition cond) { | 681 |
807 DCHECK(dst.rm().is(no_reg)); | 682 |
808 DCHECK(!src1.is(lr)); // r14. | 683 void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst, |
809 | 684 const Register src, |
810 // V8 does not use this addressing mode, so the fallback code | 685 const Register int_scratch) { |
811 // below doesn't support it yet. | 686 MovIntToDouble(dst, src, int_scratch); |
812 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); | 687 fcfid(dst, dst); |
813 | 688 frsp(dst, dst); |
814 // Generate two str instructions if strd is not available. | 689 } |
815 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && | 690 |
816 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) { | 691 |
817 CpuFeatureScope scope(this, ARMv7); | 692 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, |
818 strd(src1, src2, dst, cond); | 693 #if !V8_TARGET_ARCH_PPC64 |
| 694 const Register dst_hi, |
| 695 #endif |
| 696 const Register dst, |
| 697 const DoubleRegister double_dst, |
| 698 FPRoundingMode rounding_mode) { |
| 699 if (rounding_mode == kRoundToZero) { |
| 700 fctidz(double_dst, double_input); |
819 } else { | 701 } else { |
820 MemOperand dst2(dst); | 702 SetRoundingMode(rounding_mode); |
821 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { | 703 fctid(double_dst, double_input); |
822 dst2.set_offset(dst2.offset() + 4); | 704 ResetRoundingMode(); |
823 str(src1, dst, cond); | 705 } |
824 str(src2, dst2, cond); | 706 |
825 } else { // PostIndex or NegPostIndex. | 707 MovDoubleToInt64( |
826 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); | 708 #if !V8_TARGET_ARCH_PPC64 |
827 dst2.set_offset(dst2.offset() - 4); | 709 dst_hi, |
828 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); | 710 #endif |
829 str(src2, dst2, cond); | 711 dst, double_dst); |
830 } | 712 } |
831 } | 713 |
832 } | 714 |
833 | 715 #if V8_OOL_CONSTANT_POOL |
834 | |
835 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { | |
836 // If needed, restore wanted bits of FPSCR. | |
837 Label fpscr_done; | |
838 vmrs(scratch); | |
839 if (emit_debug_code()) { | |
840 Label rounding_mode_correct; | |
841 tst(scratch, Operand(kVFPRoundingModeMask)); | |
842 b(eq, &rounding_mode_correct); | |
843 // Don't call Assert here, since Runtime_Abort could re-enter here. | |
844 stop("Default rounding mode not set"); | |
845 bind(&rounding_mode_correct); | |
846 } | |
847 tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); | |
848 b(ne, &fpscr_done); | |
849 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); | |
850 vmsr(scratch); | |
851 bind(&fpscr_done); | |
852 } | |
853 | |
854 | |
855 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, | |
856 const DwVfpRegister src, | |
857 const Condition cond) { | |
858 vsub(dst, src, kDoubleRegZero, cond); | |
859 } | |
860 | |
861 | |
862 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, | |
863 const DwVfpRegister src2, | |
864 const Condition cond) { | |
865 // Compare and move FPSCR flags to the normal condition flags. | |
866 VFPCompareAndLoadFlags(src1, src2, pc, cond); | |
867 } | |
868 | |
869 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, | |
870 const double src2, | |
871 const Condition cond) { | |
872 // Compare and move FPSCR flags to the normal condition flags. | |
873 VFPCompareAndLoadFlags(src1, src2, pc, cond); | |
874 } | |
875 | |
876 | |
877 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, | |
878 const DwVfpRegister src2, | |
879 const Register fpscr_flags, | |
880 const Condition cond) { | |
881 // Compare and load FPSCR. | |
882 vcmp(src1, src2, cond); | |
883 vmrs(fpscr_flags, cond); | |
884 } | |
885 | |
886 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, | |
887 const double src2, | |
888 const Register fpscr_flags, | |
889 const Condition cond) { | |
890 // Compare and load FPSCR. | |
891 vcmp(src1, src2, cond); | |
892 vmrs(fpscr_flags, cond); | |
893 } | |
894 | |
895 void MacroAssembler::Vmov(const DwVfpRegister dst, | |
896 const double imm, | |
897 const Register scratch) { | |
898 static const DoubleRepresentation minus_zero(-0.0); | |
899 static const DoubleRepresentation zero(0.0); | |
900 DoubleRepresentation value_rep(imm); | |
901 // Handle special values first. | |
902 if (value_rep == zero) { | |
903 vmov(dst, kDoubleRegZero); | |
904 } else if (value_rep == minus_zero) { | |
905 vneg(dst, kDoubleRegZero); | |
906 } else { | |
907 vmov(dst, imm, scratch); | |
908 } | |
909 } | |
910 | |
911 | |
912 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) { | |
913 if (src.code() < 16) { | |
914 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); | |
915 vmov(dst, loc.high()); | |
916 } else { | |
917 vmov(dst, VmovIndexHi, src); | |
918 } | |
919 } | |
920 | |
921 | |
922 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) { | |
923 if (dst.code() < 16) { | |
924 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); | |
925 vmov(loc.high(), src); | |
926 } else { | |
927 vmov(dst, VmovIndexHi, src); | |
928 } | |
929 } | |
930 | |
931 | |
932 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) { | |
933 if (src.code() < 16) { | |
934 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); | |
935 vmov(dst, loc.low()); | |
936 } else { | |
937 vmov(dst, VmovIndexLo, src); | |
938 } | |
939 } | |
940 | |
941 | |
942 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { | |
943 if (dst.code() < 16) { | |
944 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); | |
945 vmov(loc.low(), src); | |
946 } else { | |
947 vmov(dst, VmovIndexLo, src); | |
948 } | |
949 } | |
950 | |
951 | |
952 void MacroAssembler::LoadConstantPoolPointerRegister() { | 716 void MacroAssembler::LoadConstantPoolPointerRegister() { |
953 if (FLAG_enable_ool_constant_pool) { | 717 ConstantPoolUnavailableScope constant_pool_unavailable(this); |
954 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize - | 718 |
955 pc_offset() - Instruction::kPCReadOffset; | 719 // CheckBuffer() is called too frequently. This will pre-grow |
956 DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset)); | 720 // the buffer if needed to avoid spliting the relocation and instructions |
957 ldr(pp, MemOperand(pc, constant_pool_offset)); | 721 EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize); |
958 } | 722 |
959 } | 723 uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset(); |
| 724 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize; |
| 725 mov(kConstantPoolRegister, |
| 726 Operand(code_start, RelocInfo::INTERNAL_REFERENCE)); |
| 727 LoadP(kConstantPoolRegister, |
| 728 MemOperand(kConstantPoolRegister, constant_pool_offset)); |
| 729 } |
| 730 #endif |
960 | 731 |
961 | 732 |
962 void MacroAssembler::StubPrologue() { | 733 void MacroAssembler::StubPrologue() { |
963 PushFixedFrame(); | 734 PushFixedFrame(); |
964 Push(Smi::FromInt(StackFrame::STUB)); | 735 Push(Smi::FromInt(StackFrame::STUB)); |
965 // Adjust FP to point to saved FP. | 736 // Adjust FP to point to saved FP. |
966 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 737 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
967 if (FLAG_enable_ool_constant_pool) { | 738 #if V8_OOL_CONSTANT_POOL |
968 LoadConstantPoolPointerRegister(); | 739 LoadConstantPoolPointerRegister(); |
969 set_constant_pool_available(true); | 740 set_constant_pool_available(true); |
970 } | 741 #endif |
971 } | 742 } |
972 | 743 |
973 | 744 |
974 void MacroAssembler::Prologue(bool code_pre_aging) { | 745 void MacroAssembler::Prologue(bool code_pre_aging) { |
975 { PredictableCodeSizeScope predictible_code_size_scope( | 746 { PredictableCodeSizeScope predictible_code_size_scope( |
976 this, kNoCodeAgeSequenceLength); | 747 this, kNoCodeAgeSequenceLength); |
977 // The following three instructions must remain together and unmodified | 748 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); |
| 749 // The following instructions must remain together and unmodified |
978 // for code aging to work properly. | 750 // for code aging to work properly. |
979 if (code_pre_aging) { | 751 if (code_pre_aging) { |
980 // Pre-age the code. | 752 // Pre-age the code. |
| 753 // This matches the code found in PatchPlatformCodeAge() |
981 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); | 754 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); |
982 add(r0, pc, Operand(-8)); | 755 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start()); |
983 ldr(pc, MemOperand(pc, -4)); | 756 mflr(ip); |
984 emit_code_stub_address(stub); | 757 mov(r3, Operand(target)); |
| 758 Call(r3); |
| 759 for (int i = 0; i < kCodeAgingSequenceNops; i++) { |
| 760 nop(); |
| 761 } |
985 } else { | 762 } else { |
986 PushFixedFrame(r1); | 763 // This matches the code found in GetNoCodeAgeSequence() |
987 nop(ip.code()); | 764 PushFixedFrame(r4); |
988 // Adjust FP to point to saved FP. | 765 // Adjust fp to point to saved fp. |
989 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 766 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
| 767 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) { |
| 768 nop(); |
| 769 } |
990 } | 770 } |
991 } | 771 } |
992 if (FLAG_enable_ool_constant_pool) { | 772 #if V8_OOL_CONSTANT_POOL |
993 LoadConstantPoolPointerRegister(); | 773 LoadConstantPoolPointerRegister(); |
994 set_constant_pool_available(true); | 774 set_constant_pool_available(true); |
995 } | 775 #endif |
996 } | 776 } |
997 | 777 |
998 | 778 |
999 void MacroAssembler::EnterFrame(StackFrame::Type type, | 779 void MacroAssembler::EnterFrame(StackFrame::Type type, |
1000 bool load_constant_pool) { | 780 bool load_constant_pool) { |
1001 // r0-r3: preserved | |
1002 PushFixedFrame(); | 781 PushFixedFrame(); |
1003 if (FLAG_enable_ool_constant_pool && load_constant_pool) { | 782 #if V8_OOL_CONSTANT_POOL |
| 783 if (load_constant_pool) { |
1004 LoadConstantPoolPointerRegister(); | 784 LoadConstantPoolPointerRegister(); |
1005 } | 785 } |
1006 mov(ip, Operand(Smi::FromInt(type))); | 786 #endif |
1007 push(ip); | 787 LoadSmiLiteral(r0, Smi::FromInt(type)); |
1008 mov(ip, Operand(CodeObject())); | 788 push(r0); |
1009 push(ip); | 789 mov(r0, Operand(CodeObject())); |
| 790 push(r0); |
1010 // Adjust FP to point to saved FP. | 791 // Adjust FP to point to saved FP. |
1011 add(fp, sp, | 792 addi(fp, sp, |
1012 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); | 793 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); |
1013 } | 794 } |
1014 | 795 |
1015 | 796 |
1016 int MacroAssembler::LeaveFrame(StackFrame::Type type) { | 797 int MacroAssembler::LeaveFrame(StackFrame::Type type) { |
1017 // r0: preserved | 798 // r3: preserved |
1018 // r1: preserved | 799 // r4: preserved |
1019 // r2: preserved | 800 // r5: preserved |
1020 | 801 |
1021 // Drop the execution stack down to the frame pointer and restore | 802 // Drop the execution stack down to the frame pointer and restore |
1022 // the caller frame pointer, return address and constant pool pointer | 803 // the caller frame pointer, return address and constant pool pointer. |
1023 // (if FLAG_enable_ool_constant_pool). | |
1024 int frame_ends; | 804 int frame_ends; |
1025 if (FLAG_enable_ool_constant_pool) { | 805 #if V8_OOL_CONSTANT_POOL |
1026 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset)); | 806 addi(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset)); |
1027 frame_ends = pc_offset(); | 807 frame_ends = pc_offset(); |
1028 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit()); | 808 Pop(r0, fp, kConstantPoolRegister); |
1029 } else { | 809 #else |
1030 mov(sp, fp); | 810 mr(sp, fp); |
1031 frame_ends = pc_offset(); | 811 frame_ends = pc_offset(); |
1032 ldm(ia_w, sp, fp.bit() | lr.bit()); | 812 Pop(r0, fp); |
1033 } | 813 #endif |
| 814 mtlr(r0); |
1034 return frame_ends; | 815 return frame_ends; |
1035 } | 816 } |
1036 | 817 |
1037 | 818 |
| 819 // ExitFrame layout (probably wrongish.. needs updating) |
| 820 // |
| 821 // SP -> previousSP |
| 822 // LK reserved |
| 823 // code |
| 824 // sp_on_exit (for debug?) |
| 825 // oldSP->prev SP |
| 826 // LK |
| 827 // <parameters on stack> |
| 828 |
| 829 // Prior to calling EnterExitFrame, we've got a bunch of parameters |
| 830 // on the stack that we need to wrap a real frame around.. so first |
| 831 // we reserve a slot for LK and push the previous SP which is captured |
| 832 // in the fp register (r31) |
| 833 // Then - we buy a new frame |
| 834 |
1038 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { | 835 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { |
1039 // Set up the frame structure on the stack. | 836 // Set up the frame structure on the stack. |
1040 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); | 837 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); |
1041 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); | 838 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); |
1042 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); | 839 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); |
1043 Push(lr, fp); | 840 DCHECK(stack_space > 0); |
1044 mov(fp, Operand(sp)); // Set up new frame pointer. | 841 |
| 842 // This is an opportunity to build a frame to wrap |
| 843 // all of the pushes that have happened inside of V8 |
| 844 // since we were called from C code |
| 845 |
| 846 // replicate ARM frame - TODO make this more closely follow PPC ABI |
| 847 mflr(r0); |
| 848 Push(r0, fp); |
| 849 mr(fp, sp); |
1045 // Reserve room for saved entry sp and code object. | 850 // Reserve room for saved entry sp and code object. |
1046 sub(sp, sp, Operand(ExitFrameConstants::kFrameSize)); | 851 subi(sp, sp, Operand(ExitFrameConstants::kFrameSize)); |
| 852 |
1047 if (emit_debug_code()) { | 853 if (emit_debug_code()) { |
1048 mov(ip, Operand::Zero()); | 854 li(r8, Operand::Zero()); |
1049 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 855 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
1050 } | 856 } |
1051 if (FLAG_enable_ool_constant_pool) { | 857 #if V8_OOL_CONSTANT_POOL |
1052 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); | 858 StoreP(kConstantPoolRegister, |
1053 } | 859 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); |
1054 mov(ip, Operand(CodeObject())); | 860 #endif |
1055 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); | 861 mov(r8, Operand(CodeObject())); |
| 862 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); |
1056 | 863 |
1057 // Save the frame pointer and the context in top. | 864 // Save the frame pointer and the context in top. |
1058 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 865 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
1059 str(fp, MemOperand(ip)); | 866 StoreP(fp, MemOperand(r8)); |
1060 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 867 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
1061 str(cp, MemOperand(ip)); | 868 StoreP(cp, MemOperand(r8)); |
1062 | 869 |
1063 // Optionally save all double registers. | 870 // Optionally save all volatile double registers. |
1064 if (save_doubles) { | 871 if (save_doubles) { |
1065 SaveFPRegs(sp, ip); | 872 SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters); |
1066 // Note that d0 will be accessible at | 873 // Note that d0 will be accessible at |
1067 // fp - ExitFrameConstants::kFrameSize - | 874 // fp - ExitFrameConstants::kFrameSize - |
1068 // DwVfpRegister::kMaxNumRegisters * kDoubleSize, | 875 // kNumVolatileRegisters * kDoubleSize, |
1069 // since the sp slot, code slot and constant pool slot (if | 876 // since the sp slot and code slot were pushed after the fp. |
1070 // FLAG_enable_ool_constant_pool) were pushed after the fp. | 877 } |
1071 } | 878 |
1072 | 879 addi(sp, sp, Operand(-stack_space * kPointerSize)); |
1073 // Reserve place for the return address and stack space and align the frame | 880 |
1074 // preparing for calling the runtime function. | 881 // Allocate and align the frame preparing for calling the runtime |
1075 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 882 // function. |
1076 sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); | 883 const int frame_alignment = ActivationFrameAlignment(); |
1077 if (frame_alignment > 0) { | 884 if (frame_alignment > kPointerSize) { |
1078 DCHECK(IsPowerOf2(frame_alignment)); | 885 DCHECK(IsPowerOf2(frame_alignment)); |
1079 and_(sp, sp, Operand(-frame_alignment)); | 886 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); |
1080 } | 887 } |
| 888 li(r0, Operand::Zero()); |
| 889 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize)); |
1081 | 890 |
1082 // Set the exit frame sp value to point just before the return address | 891 // Set the exit frame sp value to point just before the return address |
1083 // location. | 892 // location. |
1084 add(ip, sp, Operand(kPointerSize)); | 893 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize)); |
1085 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 894 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
1086 } | 895 } |
1087 | 896 |
1088 | 897 |
1089 void MacroAssembler::InitializeNewString(Register string, | 898 void MacroAssembler::InitializeNewString(Register string, |
1090 Register length, | 899 Register length, |
1091 Heap::RootListIndex map_index, | 900 Heap::RootListIndex map_index, |
1092 Register scratch1, | 901 Register scratch1, |
1093 Register scratch2) { | 902 Register scratch2) { |
1094 SmiTag(scratch1, length); | 903 SmiTag(scratch1, length); |
1095 LoadRoot(scratch2, map_index); | 904 LoadRoot(scratch2, map_index); |
1096 str(scratch1, FieldMemOperand(string, String::kLengthOffset)); | 905 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0); |
1097 mov(scratch1, Operand(String::kEmptyHashField)); | 906 li(scratch1, Operand(String::kEmptyHashField)); |
1098 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); | 907 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0); |
1099 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); | 908 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0); |
1100 } | 909 } |
1101 | 910 |
1102 | 911 |
1103 int MacroAssembler::ActivationFrameAlignment() { | 912 int MacroAssembler::ActivationFrameAlignment() { |
1104 #if V8_HOST_ARCH_ARM | 913 #if !defined(USE_SIMULATOR) |
1105 // Running on the real platform. Use the alignment as mandated by the local | 914 // Running on the real platform. Use the alignment as mandated by the local |
1106 // environment. | 915 // environment. |
1107 // Note: This will break if we ever start generating snapshots on one ARM | 916 // Note: This will break if we ever start generating snapshots on one PPC |
1108 // platform for another ARM platform with a different alignment. | 917 // platform for another PPC platform with a different alignment. |
1109 return base::OS::ActivationFrameAlignment(); | 918 return base::OS::ActivationFrameAlignment(); |
1110 #else // V8_HOST_ARCH_ARM | 919 #else // Simulated |
1111 // If we are using the simulator then we should always align to the expected | 920 // If we are using the simulator then we should always align to the expected |
1112 // alignment. As the simulator is used to generate snapshots we do not know | 921 // alignment. As the simulator is used to generate snapshots we do not know |
1113 // if the target platform will need alignment, so this is controlled from a | 922 // if the target platform will need alignment, so this is controlled from a |
1114 // flag. | 923 // flag. |
1115 return FLAG_sim_stack_alignment; | 924 return FLAG_sim_stack_alignment; |
1116 #endif // V8_HOST_ARCH_ARM | 925 #endif |
1117 } | 926 } |
1118 | 927 |
1119 | 928 |
1120 void MacroAssembler::LeaveExitFrame(bool save_doubles, | 929 void MacroAssembler::LeaveExitFrame(bool save_doubles, |
1121 Register argument_count, | 930 Register argument_count, |
1122 bool restore_context) { | 931 bool restore_context) { |
| 932 #if V8_OOL_CONSTANT_POOL |
1123 ConstantPoolUnavailableScope constant_pool_unavailable(this); | 933 ConstantPoolUnavailableScope constant_pool_unavailable(this); |
1124 | 934 #endif |
1125 // Optionally restore all double registers. | 935 // Optionally restore all double registers. |
1126 if (save_doubles) { | 936 if (save_doubles) { |
1127 // Calculate the stack location of the saved doubles and restore them. | 937 // Calculate the stack location of the saved doubles and restore them. |
1128 const int offset = ExitFrameConstants::kFrameSize; | 938 const int kNumRegs = DoubleRegister::kNumVolatileRegisters; |
1129 sub(r3, fp, | 939 const int offset = (ExitFrameConstants::kFrameSize + |
1130 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); | 940 kNumRegs * kDoubleSize); |
1131 RestoreFPRegs(r3, ip); | 941 addi(r6, fp, Operand(-offset)); |
| 942 RestoreFPRegs(r6, 0, kNumRegs); |
1132 } | 943 } |
1133 | 944 |
1134 // Clear top frame. | 945 // Clear top frame. |
1135 mov(r3, Operand::Zero()); | 946 li(r6, Operand::Zero()); |
1136 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 947 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
1137 str(r3, MemOperand(ip)); | 948 StoreP(r6, MemOperand(ip)); |
1138 | 949 |
1139 // Restore current context from top and clear it in debug mode. | 950 // Restore current context from top and clear it in debug mode. |
1140 if (restore_context) { | 951 if (restore_context) { |
1141 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 952 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
1142 ldr(cp, MemOperand(ip)); | 953 LoadP(cp, MemOperand(ip)); |
1143 } | 954 } |
1144 #ifdef DEBUG | 955 #ifdef DEBUG |
1145 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 956 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
1146 str(r3, MemOperand(ip)); | 957 StoreP(r6, MemOperand(ip)); |
1147 #endif | 958 #endif |
1148 | 959 |
1149 // Tear down the exit frame, pop the arguments, and return. | 960 // Tear down the exit frame, pop the arguments, and return. |
1150 if (FLAG_enable_ool_constant_pool) { | 961 #if V8_OOL_CONSTANT_POOL |
1151 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); | 962 LoadP(kConstantPoolRegister, |
1152 } | 963 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); |
1153 mov(sp, Operand(fp)); | 964 #endif |
1154 ldm(ia_w, sp, fp.bit() | lr.bit()); | 965 mr(sp, fp); |
| 966 pop(fp); |
| 967 pop(r0); |
| 968 mtlr(r0); |
| 969 |
1155 if (argument_count.is_valid()) { | 970 if (argument_count.is_valid()) { |
1156 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); | 971 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2)); |
| 972 add(sp, sp, argument_count); |
1157 } | 973 } |
1158 } | 974 } |
1159 | 975 |
1160 | 976 |
1161 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) { | 977 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { |
1162 if (use_eabi_hardfloat()) { | 978 Move(dst, d1); |
1163 Move(dst, d0); | |
1164 } else { | |
1165 vmov(dst, r0, r1); | |
1166 } | |
1167 } | 979 } |
1168 | 980 |
1169 | 981 |
1170 // On ARM this is just a synonym to make the purpose clear. | 982 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { |
1171 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) { | 983 Move(dst, d1); |
1172 MovFromFloatResult(dst); | |
1173 } | 984 } |
1174 | 985 |
1175 | 986 |
1176 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | 987 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
1177 const ParameterCount& actual, | 988 const ParameterCount& actual, |
1178 Handle<Code> code_constant, | 989 Handle<Code> code_constant, |
1179 Register code_reg, | 990 Register code_reg, |
1180 Label* done, | 991 Label* done, |
1181 bool* definitely_mismatches, | 992 bool* definitely_mismatches, |
1182 InvokeFlag flag, | 993 InvokeFlag flag, |
1183 const CallWrapper& call_wrapper) { | 994 const CallWrapper& call_wrapper) { |
1184 bool definitely_matches = false; | 995 bool definitely_matches = false; |
1185 *definitely_mismatches = false; | 996 *definitely_mismatches = false; |
1186 Label regular_invoke; | 997 Label regular_invoke; |
1187 | 998 |
1188 // Check whether the expected and actual arguments count match. If not, | 999 // Check whether the expected and actual arguments count match. If not, |
1189 // setup registers according to contract with ArgumentsAdaptorTrampoline: | 1000 // setup registers according to contract with ArgumentsAdaptorTrampoline: |
1190 // r0: actual arguments count | 1001 // r3: actual arguments count |
1191 // r1: function (passed through to callee) | 1002 // r4: function (passed through to callee) |
1192 // r2: expected arguments count | 1003 // r5: expected arguments count |
1193 | 1004 |
1194 // The code below is made a lot easier because the calling code already sets | 1005 // The code below is made a lot easier because the calling code already sets |
1195 // up actual and expected registers according to the contract if values are | 1006 // up actual and expected registers according to the contract if values are |
1196 // passed in registers. | 1007 // passed in registers. |
1197 DCHECK(actual.is_immediate() || actual.reg().is(r0)); | 1008 |
1198 DCHECK(expected.is_immediate() || expected.reg().is(r2)); | 1009 // roohack - remove these 3 checks temporarily |
1199 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); | 1010 // DCHECK(actual.is_immediate() || actual.reg().is(r3)); |
| 1011 // DCHECK(expected.is_immediate() || expected.reg().is(r5)); |
| 1012 // DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) |
| 1013 // || code_reg.is(r6)); |
1200 | 1014 |
1201 if (expected.is_immediate()) { | 1015 if (expected.is_immediate()) { |
1202 DCHECK(actual.is_immediate()); | 1016 DCHECK(actual.is_immediate()); |
1203 if (expected.immediate() == actual.immediate()) { | 1017 if (expected.immediate() == actual.immediate()) { |
1204 definitely_matches = true; | 1018 definitely_matches = true; |
1205 } else { | 1019 } else { |
1206 mov(r0, Operand(actual.immediate())); | 1020 mov(r3, Operand(actual.immediate())); |
1207 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; | 1021 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
1208 if (expected.immediate() == sentinel) { | 1022 if (expected.immediate() == sentinel) { |
1209 // Don't worry about adapting arguments for builtins that | 1023 // Don't worry about adapting arguments for builtins that |
1210 // don't want that done. Skip adaption code by making it look | 1024 // don't want that done. Skip adaption code by making it look |
1211 // like we have a match between expected and actual number of | 1025 // like we have a match between expected and actual number of |
1212 // arguments. | 1026 // arguments. |
1213 definitely_matches = true; | 1027 definitely_matches = true; |
1214 } else { | 1028 } else { |
1215 *definitely_mismatches = true; | 1029 *definitely_mismatches = true; |
1216 mov(r2, Operand(expected.immediate())); | 1030 mov(r5, Operand(expected.immediate())); |
1217 } | 1031 } |
1218 } | 1032 } |
1219 } else { | 1033 } else { |
1220 if (actual.is_immediate()) { | 1034 if (actual.is_immediate()) { |
1221 cmp(expected.reg(), Operand(actual.immediate())); | 1035 cmpi(expected.reg(), Operand(actual.immediate())); |
1222 b(eq, ®ular_invoke); | 1036 beq(®ular_invoke); |
1223 mov(r0, Operand(actual.immediate())); | 1037 mov(r3, Operand(actual.immediate())); |
1224 } else { | 1038 } else { |
1225 cmp(expected.reg(), Operand(actual.reg())); | 1039 cmp(expected.reg(), actual.reg()); |
1226 b(eq, ®ular_invoke); | 1040 beq(®ular_invoke); |
1227 } | 1041 } |
1228 } | 1042 } |
1229 | 1043 |
1230 if (!definitely_matches) { | 1044 if (!definitely_matches) { |
1231 if (!code_constant.is_null()) { | 1045 if (!code_constant.is_null()) { |
1232 mov(r3, Operand(code_constant)); | 1046 mov(r6, Operand(code_constant)); |
1233 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1047 addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); |
1234 } | 1048 } |
1235 | 1049 |
1236 Handle<Code> adaptor = | 1050 Handle<Code> adaptor = |
1237 isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 1051 isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
1238 if (flag == CALL_FUNCTION) { | 1052 if (flag == CALL_FUNCTION) { |
1239 call_wrapper.BeforeCall(CallSize(adaptor)); | 1053 call_wrapper.BeforeCall(CallSize(adaptor)); |
1240 Call(adaptor); | 1054 Call(adaptor); |
1241 call_wrapper.AfterCall(); | 1055 call_wrapper.AfterCall(); |
1242 if (!*definitely_mismatches) { | 1056 if (!*definitely_mismatches) { |
1243 b(done); | 1057 b(done); |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1280 } | 1094 } |
1281 | 1095 |
1282 | 1096 |
1283 void MacroAssembler::InvokeFunction(Register fun, | 1097 void MacroAssembler::InvokeFunction(Register fun, |
1284 const ParameterCount& actual, | 1098 const ParameterCount& actual, |
1285 InvokeFlag flag, | 1099 InvokeFlag flag, |
1286 const CallWrapper& call_wrapper) { | 1100 const CallWrapper& call_wrapper) { |
1287 // You can't call a function without a valid frame. | 1101 // You can't call a function without a valid frame. |
1288 DCHECK(flag == JUMP_FUNCTION || has_frame()); | 1102 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
1289 | 1103 |
1290 // Contract with called JS functions requires that function is passed in r1. | 1104 // Contract with called JS functions requires that function is passed in r4. |
1291 DCHECK(fun.is(r1)); | 1105 DCHECK(fun.is(r4)); |
1292 | 1106 |
1293 Register expected_reg = r2; | 1107 Register expected_reg = r5; |
1294 Register code_reg = r3; | 1108 Register code_reg = r6; |
1295 | 1109 |
1296 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 1110 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); |
1297 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 1111 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); |
1298 ldr(expected_reg, | 1112 LoadWordArith(expected_reg, |
1299 FieldMemOperand(code_reg, | 1113 FieldMemOperand(code_reg, |
1300 SharedFunctionInfo::kFormalParameterCountOffset)); | 1114 SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1115 #if !defined(V8_TARGET_ARCH_PPC64) |
1301 SmiUntag(expected_reg); | 1116 SmiUntag(expected_reg); |
1302 ldr(code_reg, | 1117 #endif |
1303 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 1118 LoadP(code_reg, |
| 1119 FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
1304 | 1120 |
1305 ParameterCount expected(expected_reg); | 1121 ParameterCount expected(expected_reg); |
1306 InvokeCode(code_reg, expected, actual, flag, call_wrapper); | 1122 InvokeCode(code_reg, expected, actual, flag, call_wrapper); |
1307 } | 1123 } |
1308 | 1124 |
1309 | 1125 |
1310 void MacroAssembler::InvokeFunction(Register function, | 1126 void MacroAssembler::InvokeFunction(Register function, |
1311 const ParameterCount& expected, | 1127 const ParameterCount& expected, |
1312 const ParameterCount& actual, | 1128 const ParameterCount& actual, |
1313 InvokeFlag flag, | 1129 InvokeFlag flag, |
1314 const CallWrapper& call_wrapper) { | 1130 const CallWrapper& call_wrapper) { |
1315 // You can't call a function without a valid frame. | 1131 // You can't call a function without a valid frame. |
1316 DCHECK(flag == JUMP_FUNCTION || has_frame()); | 1132 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
1317 | 1133 |
1318 // Contract with called JS functions requires that function is passed in r1. | 1134 // Contract with called JS functions requires that function is passed in r4. |
1319 DCHECK(function.is(r1)); | 1135 DCHECK(function.is(r4)); |
1320 | 1136 |
1321 // Get the function and setup the context. | 1137 // Get the function and setup the context. |
1322 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 1138 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); |
1323 | 1139 |
1324 // We call indirectly through the code field in the function to | 1140 // We call indirectly through the code field in the function to |
1325 // allow recompilation to take effect without changing any of the | 1141 // allow recompilation to take effect without changing any of the |
1326 // call sites. | 1142 // call sites. |
1327 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 1143 LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
1328 InvokeCode(r3, expected, actual, flag, call_wrapper); | 1144 InvokeCode(r6, expected, actual, flag, call_wrapper); |
1329 } | 1145 } |
1330 | 1146 |
1331 | 1147 |
1332 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 1148 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
1333 const ParameterCount& expected, | 1149 const ParameterCount& expected, |
1334 const ParameterCount& actual, | 1150 const ParameterCount& actual, |
1335 InvokeFlag flag, | 1151 InvokeFlag flag, |
1336 const CallWrapper& call_wrapper) { | 1152 const CallWrapper& call_wrapper) { |
1337 Move(r1, function); | 1153 Move(r4, function); |
1338 InvokeFunction(r1, expected, actual, flag, call_wrapper); | 1154 InvokeFunction(r4, expected, actual, flag, call_wrapper); |
1339 } | 1155 } |
1340 | 1156 |
1341 | 1157 |
1342 void MacroAssembler::IsObjectJSObjectType(Register heap_object, | 1158 void MacroAssembler::IsObjectJSObjectType(Register heap_object, |
1343 Register map, | 1159 Register map, |
1344 Register scratch, | 1160 Register scratch, |
1345 Label* fail) { | 1161 Label* fail) { |
1346 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); | 1162 LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); |
1347 IsInstanceJSObjectType(map, scratch, fail); | 1163 IsInstanceJSObjectType(map, scratch, fail); |
1348 } | 1164 } |
1349 | 1165 |
1350 | 1166 |
1351 void MacroAssembler::IsInstanceJSObjectType(Register map, | 1167 void MacroAssembler::IsInstanceJSObjectType(Register map, |
1352 Register scratch, | 1168 Register scratch, |
1353 Label* fail) { | 1169 Label* fail) { |
1354 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 1170 lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
1355 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 1171 cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
1356 b(lt, fail); | 1172 blt(fail); |
1357 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 1173 cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
1358 b(gt, fail); | 1174 bgt(fail); |
1359 } | 1175 } |
1360 | 1176 |
1361 | 1177 |
1362 void MacroAssembler::IsObjectJSStringType(Register object, | 1178 void MacroAssembler::IsObjectJSStringType(Register object, |
1363 Register scratch, | 1179 Register scratch, |
1364 Label* fail) { | 1180 Label* fail) { |
1365 DCHECK(kNotStringTag != 0); | 1181 DCHECK(kNotStringTag != 0); |
1366 | 1182 |
1367 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 1183 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
1368 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 1184 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
1369 tst(scratch, Operand(kIsNotStringMask)); | 1185 andi(r0, scratch, Operand(kIsNotStringMask)); |
1370 b(ne, fail); | 1186 bne(fail, cr0); |
1371 } | 1187 } |
1372 | 1188 |
1373 | 1189 |
1374 void MacroAssembler::IsObjectNameType(Register object, | 1190 void MacroAssembler::IsObjectNameType(Register object, |
1375 Register scratch, | 1191 Register scratch, |
1376 Label* fail) { | 1192 Label* fail) { |
1377 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 1193 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
1378 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 1194 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
1379 cmp(scratch, Operand(LAST_NAME_TYPE)); | 1195 cmpi(scratch, Operand(LAST_NAME_TYPE)); |
1380 b(hi, fail); | 1196 bgt(fail); |
1381 } | 1197 } |
1382 | 1198 |
1383 | 1199 |
1384 void MacroAssembler::DebugBreak() { | 1200 void MacroAssembler::DebugBreak() { |
1385 mov(r0, Operand::Zero()); | 1201 li(r3, Operand::Zero()); |
1386 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); | 1202 mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); |
1387 CEntryStub ces(isolate(), 1); | 1203 CEntryStub ces(isolate(), 1); |
1388 DCHECK(AllowThisStubCall(&ces)); | 1204 DCHECK(AllowThisStubCall(&ces)); |
1389 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); | 1205 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); |
1390 } | 1206 } |
1391 | 1207 |
1392 | 1208 |
1393 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, | 1209 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, |
1394 int handler_index) { | 1210 int handler_index) { |
1395 // Adjust this code if not the case. | 1211 // Adjust this code if not the case. |
1396 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 1212 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
1397 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 1213 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
1398 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 1214 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
1399 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 1215 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
1400 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 1216 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
1401 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 1217 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
1402 | 1218 |
1403 // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available. | 1219 // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available. |
1404 // We will build up the handler from the bottom by pushing on the stack. | 1220 // We want the stack to look like |
1405 // Set up the code object (r5) and the state (r6) for pushing. | 1221 // sp -> NextOffset |
| 1222 // CodeObject |
| 1223 // state |
| 1224 // context |
| 1225 // frame pointer |
| 1226 |
| 1227 // Link the current handler as the next handler. |
| 1228 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 1229 LoadP(r0, MemOperand(r8)); |
| 1230 StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize)); |
| 1231 // Set this new handler as the current one. |
| 1232 StoreP(sp, MemOperand(r8)); |
| 1233 |
| 1234 if (kind == StackHandler::JS_ENTRY) { |
| 1235 li(r8, Operand::Zero()); // NULL frame pointer. |
| 1236 StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset)); |
| 1237 LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context. |
| 1238 StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset)); |
| 1239 } else { |
| 1240 // still not sure if fp is right |
| 1241 StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset)); |
| 1242 StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset)); |
| 1243 } |
1406 unsigned state = | 1244 unsigned state = |
1407 StackHandler::IndexField::encode(handler_index) | | 1245 StackHandler::IndexField::encode(handler_index) | |
1408 StackHandler::KindField::encode(kind); | 1246 StackHandler::KindField::encode(kind); |
1409 mov(r5, Operand(CodeObject())); | 1247 LoadIntLiteral(r8, state); |
1410 mov(r6, Operand(state)); | 1248 StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset)); |
1411 | 1249 mov(r8, Operand(CodeObject())); |
1412 // Push the frame pointer, context, state, and code object. | 1250 StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset)); |
1413 if (kind == StackHandler::JS_ENTRY) { | |
1414 mov(cp, Operand(Smi::FromInt(0))); // Indicates no context. | |
1415 mov(ip, Operand::Zero()); // NULL frame pointer. | |
1416 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit()); | |
1417 } else { | |
1418 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit()); | |
1419 } | |
1420 | |
1421 // Link the current handler as the next handler. | |
1422 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | |
1423 ldr(r5, MemOperand(r6)); | |
1424 push(r5); | |
1425 // Set this new handler as the current one. | |
1426 str(sp, MemOperand(r6)); | |
1427 } | 1251 } |
1428 | 1252 |
1429 | 1253 |
1430 void MacroAssembler::PopTryHandler() { | 1254 void MacroAssembler::PopTryHandler() { |
1431 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 1255 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
1432 pop(r1); | 1256 pop(r4); |
1433 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 1257 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
1434 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); | 1258 addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); |
1435 str(r1, MemOperand(ip)); | 1259 StoreP(r4, MemOperand(ip)); |
1436 } | 1260 } |
1437 | 1261 |
1438 | 1262 |
| 1263 // PPC - make use of ip as a temporary register |
1439 void MacroAssembler::JumpToHandlerEntry() { | 1264 void MacroAssembler::JumpToHandlerEntry() { |
1440 // Compute the handler entry address and jump to it. The handler table is | 1265 // Compute the handler entry address and jump to it. The handler table is |
1441 // a fixed array of (smi-tagged) code offsets. | 1266 // a fixed array of (smi-tagged) code offsets. |
1442 // r0 = exception, r1 = code object, r2 = state. | 1267 // r3 = exception, r4 = code object, r5 = state. |
1443 | 1268 #if V8_OOL_CONSTANT_POOL |
1444 ConstantPoolUnavailableScope constant_pool_unavailable(this); | 1269 ConstantPoolUnavailableScope constant_pool_unavailable(this); |
1445 if (FLAG_enable_ool_constant_pool) { | 1270 LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset)); |
1446 ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool. | 1271 #endif |
1447 } | 1272 LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table. |
1448 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table. | 1273 addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
1449 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 1274 srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index. |
1450 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index. | 1275 slwi(ip, r5, Operand(kPointerSizeLog2)); |
1451 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset. | 1276 add(ip, r6, ip); |
1452 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. | 1277 LoadP(r5, MemOperand(ip)); // Smi-tagged offset. |
1453 add(pc, r1, Operand::SmiUntag(r2)); // Jump | 1278 addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. |
| 1279 SmiUntag(ip, r5); |
| 1280 add(r0, r4, ip); |
| 1281 mtctr(r0); |
| 1282 bctr(); |
1454 } | 1283 } |
1455 | 1284 |
1456 | 1285 |
1457 void MacroAssembler::Throw(Register value) { | 1286 void MacroAssembler::Throw(Register value) { |
1458 // Adjust this code if not the case. | 1287 // Adjust this code if not the case. |
1459 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 1288 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
1460 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 1289 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
1461 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 1290 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
1462 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 1291 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
1463 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 1292 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
1464 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 1293 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
| 1294 Label skip; |
1465 | 1295 |
1466 // The exception is expected in r0. | 1296 // The exception is expected in r3. |
1467 if (!value.is(r0)) { | 1297 if (!value.is(r3)) { |
1468 mov(r0, value); | 1298 mr(r3, value); |
1469 } | 1299 } |
1470 // Drop the stack pointer to the top of the top handler. | 1300 // Drop the stack pointer to the top of the top handler. |
1471 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 1301 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
1472 ldr(sp, MemOperand(r3)); | 1302 LoadP(sp, MemOperand(r6)); |
1473 // Restore the next handler. | 1303 // Restore the next handler. |
1474 pop(r2); | 1304 pop(r5); |
1475 str(r2, MemOperand(r3)); | 1305 StoreP(r5, MemOperand(r6)); |
1476 | 1306 |
1477 // Get the code object (r1) and state (r2). Restore the context and frame | 1307 // Get the code object (r4) and state (r5). Restore the context and frame |
1478 // pointer. | 1308 // pointer. |
1479 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit()); | 1309 pop(r4); |
| 1310 pop(r5); |
| 1311 pop(cp); |
| 1312 pop(fp); |
1480 | 1313 |
1481 // If the handler is a JS frame, restore the context to the frame. | 1314 // If the handler is a JS frame, restore the context to the frame. |
1482 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp | 1315 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp |
1483 // or cp. | 1316 // or cp. |
1484 tst(cp, cp); | 1317 cmpi(cp, Operand::Zero()); |
1485 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | 1318 beq(&skip); |
| 1319 StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 1320 bind(&skip); |
1486 | 1321 |
1487 JumpToHandlerEntry(); | 1322 JumpToHandlerEntry(); |
1488 } | 1323 } |
1489 | 1324 |
1490 | 1325 |
1491 void MacroAssembler::ThrowUncatchable(Register value) { | 1326 void MacroAssembler::ThrowUncatchable(Register value) { |
1492 // Adjust this code if not the case. | 1327 // Adjust this code if not the case. |
1493 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 1328 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
1494 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 1329 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
1495 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 1330 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
1496 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 1331 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
1497 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 1332 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
1498 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 1333 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
1499 | 1334 |
1500 // The exception is expected in r0. | 1335 // The exception is expected in r3. |
1501 if (!value.is(r0)) { | 1336 if (!value.is(r3)) { |
1502 mov(r0, value); | 1337 mr(r3, value); |
1503 } | 1338 } |
1504 // Drop the stack pointer to the top of the top stack handler. | 1339 // Drop the stack pointer to the top of the top stack handler. |
1505 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 1340 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
1506 ldr(sp, MemOperand(r3)); | 1341 LoadP(sp, MemOperand(r6)); |
1507 | 1342 |
1508 // Unwind the handlers until the ENTRY handler is found. | 1343 // Unwind the handlers until the ENTRY handler is found. |
1509 Label fetch_next, check_kind; | 1344 Label fetch_next, check_kind; |
1510 jmp(&check_kind); | 1345 b(&check_kind); |
1511 bind(&fetch_next); | 1346 bind(&fetch_next); |
1512 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); | 1347 LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); |
1513 | 1348 |
1514 bind(&check_kind); | 1349 bind(&check_kind); |
1515 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); | 1350 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); |
1516 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset)); | 1351 LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset)); |
1517 tst(r2, Operand(StackHandler::KindField::kMask)); | 1352 andi(r0, r5, Operand(StackHandler::KindField::kMask)); |
1518 b(ne, &fetch_next); | 1353 bne(&fetch_next, cr0); |
1519 | 1354 |
1520 // Set the top handler address to next handler past the top ENTRY handler. | 1355 // Set the top handler address to next handler past the top ENTRY handler. |
1521 pop(r2); | 1356 pop(r5); |
1522 str(r2, MemOperand(r3)); | 1357 StoreP(r5, MemOperand(r6)); |
1523 // Get the code object (r1) and state (r2). Clear the context and frame | 1358 // Get the code object (r4) and state (r5). Clear the context and frame |
1524 // pointer (0 was saved in the handler). | 1359 // pointer (0 was saved in the handler). |
1525 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit()); | 1360 pop(r4); |
| 1361 pop(r5); |
| 1362 pop(cp); |
| 1363 pop(fp); |
1526 | 1364 |
1527 JumpToHandlerEntry(); | 1365 JumpToHandlerEntry(); |
1528 } | 1366 } |
1529 | 1367 |
1530 | 1368 |
1531 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 1369 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
1532 Register scratch, | 1370 Register scratch, |
1533 Label* miss) { | 1371 Label* miss) { |
1534 Label same_contexts; | 1372 Label same_contexts; |
1535 | 1373 |
1536 DCHECK(!holder_reg.is(scratch)); | 1374 DCHECK(!holder_reg.is(scratch)); |
1537 DCHECK(!holder_reg.is(ip)); | 1375 DCHECK(!holder_reg.is(ip)); |
1538 DCHECK(!scratch.is(ip)); | 1376 DCHECK(!scratch.is(ip)); |
1539 | 1377 |
1540 // Load current lexical context from the stack frame. | 1378 // Load current lexical context from the stack frame. |
1541 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1379 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
1542 // In debug mode, make sure the lexical context is set. | 1380 // In debug mode, make sure the lexical context is set. |
1543 #ifdef DEBUG | 1381 #ifdef DEBUG |
1544 cmp(scratch, Operand::Zero()); | 1382 cmpi(scratch, Operand::Zero()); |
1545 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | 1383 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); |
1546 #endif | 1384 #endif |
1547 | 1385 |
1548 // Load the native context of the current context. | 1386 // Load the native context of the current context. |
1549 int offset = | 1387 int offset = |
1550 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; | 1388 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
1551 ldr(scratch, FieldMemOperand(scratch, offset)); | 1389 LoadP(scratch, FieldMemOperand(scratch, offset)); |
1552 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 1390 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
1553 | 1391 |
1554 // Check the context is a native context. | 1392 // Check the context is a native context. |
1555 if (emit_debug_code()) { | 1393 if (emit_debug_code()) { |
1556 // Cannot use ip as a temporary in this verification code. Due to the fact | 1394 // Cannot use ip as a temporary in this verification code. Due to the fact |
1557 // that ip is clobbered as part of cmp with an object Operand. | 1395 // that ip is clobbered as part of cmp with an object Operand. |
1558 push(holder_reg); // Temporarily save holder on the stack. | 1396 push(holder_reg); // Temporarily save holder on the stack. |
1559 // Read the first word and compare to the native_context_map. | 1397 // Read the first word and compare to the native_context_map. |
1560 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 1398 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
1561 LoadRoot(ip, Heap::kNativeContextMapRootIndex); | 1399 LoadRoot(ip, Heap::kNativeContextMapRootIndex); |
1562 cmp(holder_reg, ip); | 1400 cmp(holder_reg, ip); |
1563 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); | 1401 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); |
1564 pop(holder_reg); // Restore holder. | 1402 pop(holder_reg); // Restore holder. |
1565 } | 1403 } |
1566 | 1404 |
1567 // Check if both contexts are the same. | 1405 // Check if both contexts are the same. |
1568 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 1406 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); |
1569 cmp(scratch, Operand(ip)); | 1407 cmp(scratch, ip); |
1570 b(eq, &same_contexts); | 1408 beq(&same_contexts); |
1571 | 1409 |
1572 // Check the context is a native context. | 1410 // Check the context is a native context. |
1573 if (emit_debug_code()) { | 1411 if (emit_debug_code()) { |
1574 // Cannot use ip as a temporary in this verification code. Due to the fact | 1412 // Cannot use ip as a temporary in this verification code. Due to the fact |
1575 // that ip is clobbered as part of cmp with an object Operand. | 1413 // that ip is clobbered as part of cmp with an object Operand. |
1576 push(holder_reg); // Temporarily save holder on the stack. | 1414 push(holder_reg); // Temporarily save holder on the stack. |
1577 mov(holder_reg, ip); // Move ip to its holding place. | 1415 mr(holder_reg, ip); // Move ip to its holding place. |
1578 LoadRoot(ip, Heap::kNullValueRootIndex); | 1416 LoadRoot(ip, Heap::kNullValueRootIndex); |
1579 cmp(holder_reg, ip); | 1417 cmp(holder_reg, ip); |
1580 Check(ne, kJSGlobalProxyContextShouldNotBeNull); | 1418 Check(ne, kJSGlobalProxyContextShouldNotBeNull); |
1581 | 1419 |
1582 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); | 1420 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); |
1583 LoadRoot(ip, Heap::kNativeContextMapRootIndex); | 1421 LoadRoot(ip, Heap::kNativeContextMapRootIndex); |
1584 cmp(holder_reg, ip); | 1422 cmp(holder_reg, ip); |
1585 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); | 1423 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); |
1586 // Restore ip is not needed. ip is reloaded below. | 1424 // Restore ip is not needed. ip is reloaded below. |
1587 pop(holder_reg); // Restore holder. | 1425 pop(holder_reg); // Restore holder. |
1588 // Restore ip to holder's context. | 1426 // Restore ip to holder's context. |
1589 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 1427 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); |
1590 } | 1428 } |
1591 | 1429 |
1592 // Check that the security token in the calling global object is | 1430 // Check that the security token in the calling global object is |
1593 // compatible with the security token in the receiving global | 1431 // compatible with the security token in the receiving global |
1594 // object. | 1432 // object. |
1595 int token_offset = Context::kHeaderSize + | 1433 int token_offset = Context::kHeaderSize + |
1596 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 1434 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
1597 | 1435 |
1598 ldr(scratch, FieldMemOperand(scratch, token_offset)); | 1436 LoadP(scratch, FieldMemOperand(scratch, token_offset)); |
1599 ldr(ip, FieldMemOperand(ip, token_offset)); | 1437 LoadP(ip, FieldMemOperand(ip, token_offset)); |
1600 cmp(scratch, Operand(ip)); | 1438 cmp(scratch, ip); |
1601 b(ne, miss); | 1439 bne(miss); |
1602 | 1440 |
1603 bind(&same_contexts); | 1441 bind(&same_contexts); |
1604 } | 1442 } |
1605 | 1443 |
1606 | 1444 |
1607 // Compute the hash code from the untagged key. This must be kept in sync with | 1445 // Compute the hash code from the untagged key. This must be kept in sync with |
1608 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in | 1446 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in |
1609 // code-stub-hydrogen.cc | 1447 // code-stub-hydrogen.cc |
1610 void MacroAssembler::GetNumberHash(Register t0, Register scratch) { | 1448 void MacroAssembler::GetNumberHash(Register t0, Register scratch) { |
1611 // First of all we assign the hash seed to scratch. | 1449 // First of all we assign the hash seed to scratch. |
1612 LoadRoot(scratch, Heap::kHashSeedRootIndex); | 1450 LoadRoot(scratch, Heap::kHashSeedRootIndex); |
1613 SmiUntag(scratch); | 1451 SmiUntag(scratch); |
1614 | 1452 |
1615 // Xor original key with a seed. | 1453 // Xor original key with a seed. |
1616 eor(t0, t0, Operand(scratch)); | 1454 xor_(t0, t0, scratch); |
1617 | 1455 |
1618 // Compute the hash code from the untagged key. This must be kept in sync | 1456 // Compute the hash code from the untagged key. This must be kept in sync |
1619 // with ComputeIntegerHash in utils.h. | 1457 // with ComputeIntegerHash in utils.h. |
1620 // | 1458 // |
1621 // hash = ~hash + (hash << 15); | 1459 // hash = ~hash + (hash << 15); |
1622 mvn(scratch, Operand(t0)); | 1460 notx(scratch, t0); |
1623 add(t0, scratch, Operand(t0, LSL, 15)); | 1461 slwi(t0, t0, Operand(15)); |
| 1462 add(t0, scratch, t0); |
1624 // hash = hash ^ (hash >> 12); | 1463 // hash = hash ^ (hash >> 12); |
1625 eor(t0, t0, Operand(t0, LSR, 12)); | 1464 srwi(scratch, t0, Operand(12)); |
| 1465 xor_(t0, t0, scratch); |
1626 // hash = hash + (hash << 2); | 1466 // hash = hash + (hash << 2); |
1627 add(t0, t0, Operand(t0, LSL, 2)); | 1467 slwi(scratch, t0, Operand(2)); |
| 1468 add(t0, t0, scratch); |
1628 // hash = hash ^ (hash >> 4); | 1469 // hash = hash ^ (hash >> 4); |
1629 eor(t0, t0, Operand(t0, LSR, 4)); | 1470 srwi(scratch, t0, Operand(4)); |
| 1471 xor_(t0, t0, scratch); |
1630 // hash = hash * 2057; | 1472 // hash = hash * 2057; |
1631 mov(scratch, Operand(t0, LSL, 11)); | 1473 mr(r0, t0); |
1632 add(t0, t0, Operand(t0, LSL, 3)); | 1474 slwi(scratch, t0, Operand(3)); |
| 1475 add(t0, t0, scratch); |
| 1476 slwi(scratch, r0, Operand(11)); |
1633 add(t0, t0, scratch); | 1477 add(t0, t0, scratch); |
1634 // hash = hash ^ (hash >> 16); | 1478 // hash = hash ^ (hash >> 16); |
1635 eor(t0, t0, Operand(t0, LSR, 16)); | 1479 srwi(scratch, t0, Operand(16)); |
| 1480 xor_(t0, t0, scratch); |
1636 } | 1481 } |
1637 | 1482 |
1638 | 1483 |
1639 void MacroAssembler::LoadFromNumberDictionary(Label* miss, | 1484 void MacroAssembler::LoadFromNumberDictionary(Label* miss, |
1640 Register elements, | 1485 Register elements, |
1641 Register key, | 1486 Register key, |
1642 Register result, | 1487 Register result, |
1643 Register t0, | 1488 Register t0, |
1644 Register t1, | 1489 Register t1, |
1645 Register t2) { | 1490 Register t2) { |
(...skipping 15 matching lines...) Expand all Loading... |
1661 // t0 - holds the untagged key on entry and holds the hash once computed. | 1506 // t0 - holds the untagged key on entry and holds the hash once computed. |
1662 // | 1507 // |
1663 // t1 - used to hold the capacity mask of the dictionary | 1508 // t1 - used to hold the capacity mask of the dictionary |
1664 // | 1509 // |
1665 // t2 - used for the index into the dictionary. | 1510 // t2 - used for the index into the dictionary. |
1666 Label done; | 1511 Label done; |
1667 | 1512 |
1668 GetNumberHash(t0, t1); | 1513 GetNumberHash(t0, t1); |
1669 | 1514 |
1670 // Compute the capacity mask. | 1515 // Compute the capacity mask. |
1671 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); | 1516 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); |
1672 SmiUntag(t1); | 1517 SmiUntag(t1); |
1673 sub(t1, t1, Operand(1)); | 1518 subi(t1, t1, Operand(1)); |
1674 | 1519 |
1675 // Generate an unrolled loop that performs a few probes before giving up. | 1520 // Generate an unrolled loop that performs a few probes before giving up. |
1676 for (int i = 0; i < kNumberDictionaryProbes; i++) { | 1521 for (int i = 0; i < kNumberDictionaryProbes; i++) { |
1677 // Use t2 for index calculations and keep the hash intact in t0. | 1522 // Use t2 for index calculations and keep the hash intact in t0. |
1678 mov(t2, t0); | 1523 mr(t2, t0); |
1679 // Compute the masked index: (hash + i + i * i) & mask. | 1524 // Compute the masked index: (hash + i + i * i) & mask. |
1680 if (i > 0) { | 1525 if (i > 0) { |
1681 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); | 1526 addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); |
1682 } | 1527 } |
1683 and_(t2, t2, Operand(t1)); | 1528 and_(t2, t2, t1); |
1684 | 1529 |
1685 // Scale the index by multiplying by the element size. | 1530 // Scale the index by multiplying by the element size. |
1686 DCHECK(SeededNumberDictionary::kEntrySize == 3); | 1531 DCHECK(SeededNumberDictionary::kEntrySize == 3); |
1687 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 | 1532 slwi(ip, t2, Operand(1)); |
| 1533 add(t2, t2, ip); // t2 = t2 * 3 |
1688 | 1534 |
1689 // Check if the key is identical to the name. | 1535 // Check if the key is identical to the name. |
1690 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); | 1536 slwi(t2, t2, Operand(kPointerSizeLog2)); |
1691 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); | 1537 add(t2, elements, t2); |
1692 cmp(key, Operand(ip)); | 1538 LoadP(ip, |
| 1539 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); |
| 1540 cmp(key, ip); |
1693 if (i != kNumberDictionaryProbes - 1) { | 1541 if (i != kNumberDictionaryProbes - 1) { |
1694 b(eq, &done); | 1542 beq(&done); |
1695 } else { | 1543 } else { |
1696 b(ne, miss); | 1544 bne(miss); |
1697 } | 1545 } |
1698 } | 1546 } |
1699 | 1547 |
1700 bind(&done); | 1548 bind(&done); |
1701 // Check that the value is a normal property. | 1549 // Check that the value is a normal property. |
1702 // t2: elements + (index * kPointerSize) | 1550 // t2: elements + (index * kPointerSize) |
1703 const int kDetailsOffset = | 1551 const int kDetailsOffset = |
1704 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; | 1552 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; |
1705 ldr(t1, FieldMemOperand(t2, kDetailsOffset)); | 1553 LoadP(t1, FieldMemOperand(t2, kDetailsOffset)); |
1706 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); | 1554 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask)); |
1707 b(ne, miss); | 1555 and_(r0, t1, ip, SetRC); |
| 1556 bne(miss, cr0); |
1708 | 1557 |
1709 // Get the value at the masked, scaled index and return. | 1558 // Get the value at the masked, scaled index and return. |
1710 const int kValueOffset = | 1559 const int kValueOffset = |
1711 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 1560 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
1712 ldr(result, FieldMemOperand(t2, kValueOffset)); | 1561 LoadP(result, FieldMemOperand(t2, kValueOffset)); |
1713 } | 1562 } |
1714 | 1563 |
1715 | 1564 |
1716 void MacroAssembler::Allocate(int object_size, | 1565 void MacroAssembler::Allocate(int object_size, |
1717 Register result, | 1566 Register result, |
1718 Register scratch1, | 1567 Register scratch1, |
1719 Register scratch2, | 1568 Register scratch2, |
1720 Label* gc_required, | 1569 Label* gc_required, |
1721 AllocationFlags flags) { | 1570 AllocationFlags flags) { |
1722 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); | 1571 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); |
1723 if (!FLAG_inline_new) { | 1572 if (!FLAG_inline_new) { |
1724 if (emit_debug_code()) { | 1573 if (emit_debug_code()) { |
1725 // Trash the registers to simulate an allocation failure. | 1574 // Trash the registers to simulate an allocation failure. |
1726 mov(result, Operand(0x7091)); | 1575 li(result, Operand(0x7091)); |
1727 mov(scratch1, Operand(0x7191)); | 1576 li(scratch1, Operand(0x7191)); |
1728 mov(scratch2, Operand(0x7291)); | 1577 li(scratch2, Operand(0x7291)); |
1729 } | 1578 } |
1730 jmp(gc_required); | 1579 b(gc_required); |
1731 return; | 1580 return; |
1732 } | 1581 } |
1733 | 1582 |
1734 DCHECK(!result.is(scratch1)); | 1583 DCHECK(!result.is(scratch1)); |
1735 DCHECK(!result.is(scratch2)); | 1584 DCHECK(!result.is(scratch2)); |
1736 DCHECK(!scratch1.is(scratch2)); | 1585 DCHECK(!scratch1.is(scratch2)); |
1737 DCHECK(!scratch1.is(ip)); | 1586 DCHECK(!scratch1.is(ip)); |
1738 DCHECK(!scratch2.is(ip)); | 1587 DCHECK(!scratch2.is(ip)); |
1739 | 1588 |
1740 // Make object size into bytes. | 1589 // Make object size into bytes. |
1741 if ((flags & SIZE_IN_WORDS) != 0) { | 1590 if ((flags & SIZE_IN_WORDS) != 0) { |
1742 object_size *= kPointerSize; | 1591 object_size *= kPointerSize; |
1743 } | 1592 } |
1744 DCHECK_EQ(0, object_size & kObjectAlignmentMask); | 1593 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask)); |
1745 | 1594 |
1746 // Check relative positions of allocation top and limit addresses. | 1595 // Check relative positions of allocation top and limit addresses. |
1747 // The values must be adjacent in memory to allow the use of LDM. | |
1748 // Also, assert that the registers are numbered such that the values | |
1749 // are loaded in the correct order. | |
1750 ExternalReference allocation_top = | 1596 ExternalReference allocation_top = |
1751 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 1597 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
1752 ExternalReference allocation_limit = | 1598 ExternalReference allocation_limit = |
1753 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1599 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1754 | 1600 |
1755 intptr_t top = | 1601 intptr_t top = |
1756 reinterpret_cast<intptr_t>(allocation_top.address()); | 1602 reinterpret_cast<intptr_t>(allocation_top.address()); |
1757 intptr_t limit = | 1603 intptr_t limit = |
1758 reinterpret_cast<intptr_t>(allocation_limit.address()); | 1604 reinterpret_cast<intptr_t>(allocation_limit.address()); |
1759 DCHECK((limit - top) == kPointerSize); | 1605 DCHECK((limit - top) == kPointerSize); |
1760 DCHECK(result.code() < ip.code()); | |
1761 | 1606 |
1762 // Set up allocation top address register. | 1607 // Set up allocation top address register. |
1763 Register topaddr = scratch1; | 1608 Register topaddr = scratch1; |
1764 mov(topaddr, Operand(allocation_top)); | 1609 mov(topaddr, Operand(allocation_top)); |
1765 | 1610 |
1766 // This code stores a temporary value in ip. This is OK, as the code below | 1611 // This code stores a temporary value in ip. This is OK, as the code below |
1767 // does not need ip for implicit literal generation. | 1612 // does not need ip for implicit literal generation. |
1768 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1613 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
1769 // Load allocation top into result and allocation limit into ip. | 1614 // Load allocation top into result and allocation limit into ip. |
1770 ldm(ia, topaddr, result.bit() | ip.bit()); | 1615 LoadP(result, MemOperand(topaddr)); |
| 1616 LoadP(ip, MemOperand(topaddr, kPointerSize)); |
1771 } else { | 1617 } else { |
1772 if (emit_debug_code()) { | 1618 if (emit_debug_code()) { |
1773 // Assert that result actually contains top on entry. ip is used | 1619 // Assert that result actually contains top on entry. ip is used |
1774 // immediately below so this use of ip does not cause difference with | 1620 // immediately below so this use of ip does not cause difference with |
1775 // respect to register content between debug and release mode. | 1621 // respect to register content between debug and release mode. |
1776 ldr(ip, MemOperand(topaddr)); | 1622 LoadP(ip, MemOperand(topaddr)); |
1777 cmp(result, ip); | 1623 cmp(result, ip); |
1778 Check(eq, kUnexpectedAllocationTop); | 1624 Check(eq, kUnexpectedAllocationTop); |
1779 } | 1625 } |
1780 // Load allocation limit into ip. Result already contains allocation top. | 1626 // Load allocation limit into ip. Result already contains allocation top. |
1781 ldr(ip, MemOperand(topaddr, limit - top)); | 1627 LoadP(ip, MemOperand(topaddr, limit - top), r0); |
1782 } | 1628 } |
1783 | 1629 |
1784 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1630 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1785 // Align the next allocation. Storing the filler map without checking top is | 1631 // Align the next allocation. Storing the filler map without checking top is |
1786 // safe in new-space because the limit of the heap is aligned there. | 1632 // safe in new-space because the limit of the heap is aligned there. |
1787 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1633 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
| 1634 #if V8_TARGET_ARCH_PPC64 |
| 1635 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 1636 #else |
1788 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1637 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
1789 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); | 1638 andi(scratch2, result, Operand(kDoubleAlignmentMask)); |
1790 Label aligned; | 1639 Label aligned; |
1791 b(eq, &aligned); | 1640 beq(&aligned, cr0); |
1792 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1641 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1793 cmp(result, Operand(ip)); | 1642 cmpl(result, ip); |
1794 b(hs, gc_required); | 1643 bge(gc_required); |
1795 } | 1644 } |
1796 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | 1645 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); |
1797 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); | 1646 stw(scratch2, MemOperand(result)); |
| 1647 addi(result, result, Operand(kDoubleSize / 2)); |
1798 bind(&aligned); | 1648 bind(&aligned); |
| 1649 #endif |
1799 } | 1650 } |
1800 | 1651 |
1801 // Calculate new top and bail out if new space is exhausted. Use result | 1652 // Calculate new top and bail out if new space is exhausted. Use result |
1802 // to calculate the new top. We must preserve the ip register at this | 1653 // to calculate the new top. |
1803 // point, so we cannot just use add(). | 1654 li(r0, Operand(-1)); |
1804 DCHECK(object_size > 0); | 1655 if (is_int16(object_size)) { |
1805 Register source = result; | 1656 addic(scratch2, result, Operand(object_size)); |
1806 Condition cond = al; | 1657 } else { |
1807 int shift = 0; | 1658 mov(scratch2, Operand(object_size)); |
1808 while (object_size != 0) { | 1659 addc(scratch2, result, scratch2); |
1809 if (((object_size >> shift) & 0x03) == 0) { | |
1810 shift += 2; | |
1811 } else { | |
1812 int bits = object_size & (0xff << shift); | |
1813 object_size -= bits; | |
1814 shift += 8; | |
1815 Operand bits_operand(bits); | |
1816 DCHECK(bits_operand.instructions_required(this) == 1); | |
1817 add(scratch2, source, bits_operand, SetCC, cond); | |
1818 source = scratch2; | |
1819 cond = cc; | |
1820 } | |
1821 } | 1660 } |
1822 b(cs, gc_required); | 1661 addze(r0, r0, LeaveOE, SetRC); |
1823 cmp(scratch2, Operand(ip)); | 1662 beq(gc_required, cr0); |
1824 b(hi, gc_required); | 1663 cmpl(scratch2, ip); |
1825 str(scratch2, MemOperand(topaddr)); | 1664 bgt(gc_required); |
| 1665 StoreP(scratch2, MemOperand(topaddr)); |
1826 | 1666 |
1827 // Tag object if requested. | 1667 // Tag object if requested. |
1828 if ((flags & TAG_OBJECT) != 0) { | 1668 if ((flags & TAG_OBJECT) != 0) { |
1829 add(result, result, Operand(kHeapObjectTag)); | 1669 addi(result, result, Operand(kHeapObjectTag)); |
1830 } | 1670 } |
1831 } | 1671 } |
1832 | 1672 |
1833 | 1673 |
1834 void MacroAssembler::Allocate(Register object_size, | 1674 void MacroAssembler::Allocate(Register object_size, |
1835 Register result, | 1675 Register result, |
1836 Register scratch1, | 1676 Register scratch1, |
1837 Register scratch2, | 1677 Register scratch2, |
1838 Label* gc_required, | 1678 Label* gc_required, |
1839 AllocationFlags flags) { | 1679 AllocationFlags flags) { |
1840 if (!FLAG_inline_new) { | 1680 if (!FLAG_inline_new) { |
1841 if (emit_debug_code()) { | 1681 if (emit_debug_code()) { |
1842 // Trash the registers to simulate an allocation failure. | 1682 // Trash the registers to simulate an allocation failure. |
1843 mov(result, Operand(0x7091)); | 1683 li(result, Operand(0x7091)); |
1844 mov(scratch1, Operand(0x7191)); | 1684 li(scratch1, Operand(0x7191)); |
1845 mov(scratch2, Operand(0x7291)); | 1685 li(scratch2, Operand(0x7291)); |
1846 } | 1686 } |
1847 jmp(gc_required); | 1687 b(gc_required); |
1848 return; | 1688 return; |
1849 } | 1689 } |
1850 | 1690 |
1851 // Assert that the register arguments are different and that none of | 1691 // Assert that the register arguments are different and that none of |
1852 // them are ip. ip is used explicitly in the code generated below. | 1692 // them are ip. ip is used explicitly in the code generated below. |
1853 DCHECK(!result.is(scratch1)); | 1693 DCHECK(!result.is(scratch1)); |
1854 DCHECK(!result.is(scratch2)); | 1694 DCHECK(!result.is(scratch2)); |
1855 DCHECK(!scratch1.is(scratch2)); | 1695 DCHECK(!scratch1.is(scratch2)); |
1856 DCHECK(!object_size.is(ip)); | 1696 DCHECK(!object_size.is(ip)); |
1857 DCHECK(!result.is(ip)); | 1697 DCHECK(!result.is(ip)); |
1858 DCHECK(!scratch1.is(ip)); | 1698 DCHECK(!scratch1.is(ip)); |
1859 DCHECK(!scratch2.is(ip)); | 1699 DCHECK(!scratch2.is(ip)); |
1860 | 1700 |
1861 // Check relative positions of allocation top and limit addresses. | 1701 // Check relative positions of allocation top and limit addresses. |
1862 // The values must be adjacent in memory to allow the use of LDM. | |
1863 // Also, assert that the registers are numbered such that the values | |
1864 // are loaded in the correct order. | |
1865 ExternalReference allocation_top = | 1702 ExternalReference allocation_top = |
1866 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 1703 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
1867 ExternalReference allocation_limit = | 1704 ExternalReference allocation_limit = |
1868 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1705 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1869 intptr_t top = | 1706 intptr_t top = |
1870 reinterpret_cast<intptr_t>(allocation_top.address()); | 1707 reinterpret_cast<intptr_t>(allocation_top.address()); |
1871 intptr_t limit = | 1708 intptr_t limit = |
1872 reinterpret_cast<intptr_t>(allocation_limit.address()); | 1709 reinterpret_cast<intptr_t>(allocation_limit.address()); |
1873 DCHECK((limit - top) == kPointerSize); | 1710 DCHECK((limit - top) == kPointerSize); |
1874 DCHECK(result.code() < ip.code()); | |
1875 | 1711 |
1876 // Set up allocation top address. | 1712 // Set up allocation top address. |
1877 Register topaddr = scratch1; | 1713 Register topaddr = scratch1; |
1878 mov(topaddr, Operand(allocation_top)); | 1714 mov(topaddr, Operand(allocation_top)); |
1879 | 1715 |
1880 // This code stores a temporary value in ip. This is OK, as the code below | 1716 // This code stores a temporary value in ip. This is OK, as the code below |
1881 // does not need ip for implicit literal generation. | 1717 // does not need ip for implicit literal generation. |
1882 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1718 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
1883 // Load allocation top into result and allocation limit into ip. | 1719 // Load allocation top into result and allocation limit into ip. |
1884 ldm(ia, topaddr, result.bit() | ip.bit()); | 1720 LoadP(result, MemOperand(topaddr)); |
| 1721 LoadP(ip, MemOperand(topaddr, kPointerSize)); |
1885 } else { | 1722 } else { |
1886 if (emit_debug_code()) { | 1723 if (emit_debug_code()) { |
1887 // Assert that result actually contains top on entry. ip is used | 1724 // Assert that result actually contains top on entry. ip is used |
1888 // immediately below so this use of ip does not cause difference with | 1725 // immediately below so this use of ip does not cause difference with |
1889 // respect to register content between debug and release mode. | 1726 // respect to register content between debug and release mode. |
1890 ldr(ip, MemOperand(topaddr)); | 1727 LoadP(ip, MemOperand(topaddr)); |
1891 cmp(result, ip); | 1728 cmp(result, ip); |
1892 Check(eq, kUnexpectedAllocationTop); | 1729 Check(eq, kUnexpectedAllocationTop); |
1893 } | 1730 } |
1894 // Load allocation limit into ip. Result already contains allocation top. | 1731 // Load allocation limit into ip. Result already contains allocation top. |
1895 ldr(ip, MemOperand(topaddr, limit - top)); | 1732 LoadP(ip, MemOperand(topaddr, limit - top)); |
1896 } | 1733 } |
1897 | 1734 |
1898 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1735 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1899 // Align the next allocation. Storing the filler map without checking top is | 1736 // Align the next allocation. Storing the filler map without checking top is |
1900 // safe in new-space because the limit of the heap is aligned there. | 1737 // safe in new-space because the limit of the heap is aligned there. |
1901 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1738 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
1902 DCHECK(kPointerAlignment * 2 == kDoubleAlignment); | 1739 #if V8_TARGET_ARCH_PPC64 |
1903 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); | 1740 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 1741 #else |
| 1742 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
| 1743 andi(scratch2, result, Operand(kDoubleAlignmentMask)); |
1904 Label aligned; | 1744 Label aligned; |
1905 b(eq, &aligned); | 1745 beq(&aligned, cr0); |
1906 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1746 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1907 cmp(result, Operand(ip)); | 1747 cmpl(result, ip); |
1908 b(hs, gc_required); | 1748 bge(gc_required); |
1909 } | 1749 } |
1910 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | 1750 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); |
1911 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); | 1751 stw(scratch2, MemOperand(result)); |
| 1752 addi(result, result, Operand(kDoubleSize / 2)); |
1912 bind(&aligned); | 1753 bind(&aligned); |
| 1754 #endif |
1913 } | 1755 } |
1914 | 1756 |
1915 // Calculate new top and bail out if new space is exhausted. Use result | 1757 // Calculate new top and bail out if new space is exhausted. Use result |
1916 // to calculate the new top. Object size may be in words so a shift is | 1758 // to calculate the new top. Object size may be in words so a shift is |
1917 // required to get the number of bytes. | 1759 // required to get the number of bytes. |
| 1760 li(r0, Operand(-1)); |
1918 if ((flags & SIZE_IN_WORDS) != 0) { | 1761 if ((flags & SIZE_IN_WORDS) != 0) { |
1919 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC); | 1762 ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2)); |
| 1763 addc(scratch2, result, scratch2); |
1920 } else { | 1764 } else { |
1921 add(scratch2, result, Operand(object_size), SetCC); | 1765 addc(scratch2, result, object_size); |
1922 } | 1766 } |
1923 b(cs, gc_required); | 1767 addze(r0, r0, LeaveOE, SetRC); |
1924 cmp(scratch2, Operand(ip)); | 1768 beq(gc_required, cr0); |
1925 b(hi, gc_required); | 1769 cmpl(scratch2, ip); |
| 1770 bgt(gc_required); |
1926 | 1771 |
1927 // Update allocation top. result temporarily holds the new top. | 1772 // Update allocation top. result temporarily holds the new top. |
1928 if (emit_debug_code()) { | 1773 if (emit_debug_code()) { |
1929 tst(scratch2, Operand(kObjectAlignmentMask)); | 1774 andi(r0, scratch2, Operand(kObjectAlignmentMask)); |
1930 Check(eq, kUnalignedAllocationInNewSpace); | 1775 Check(eq, kUnalignedAllocationInNewSpace, cr0); |
1931 } | 1776 } |
1932 str(scratch2, MemOperand(topaddr)); | 1777 StoreP(scratch2, MemOperand(topaddr)); |
1933 | 1778 |
1934 // Tag object if requested. | 1779 // Tag object if requested. |
1935 if ((flags & TAG_OBJECT) != 0) { | 1780 if ((flags & TAG_OBJECT) != 0) { |
1936 add(result, result, Operand(kHeapObjectTag)); | 1781 addi(result, result, Operand(kHeapObjectTag)); |
1937 } | 1782 } |
1938 } | 1783 } |
1939 | 1784 |
1940 | 1785 |
1941 void MacroAssembler::UndoAllocationInNewSpace(Register object, | 1786 void MacroAssembler::UndoAllocationInNewSpace(Register object, |
1942 Register scratch) { | 1787 Register scratch) { |
1943 ExternalReference new_space_allocation_top = | 1788 ExternalReference new_space_allocation_top = |
1944 ExternalReference::new_space_allocation_top_address(isolate()); | 1789 ExternalReference::new_space_allocation_top_address(isolate()); |
1945 | 1790 |
1946 // Make sure the object has no tag before resetting top. | 1791 // Make sure the object has no tag before resetting top. |
1947 and_(object, object, Operand(~kHeapObjectTagMask)); | 1792 mov(r0, Operand(~kHeapObjectTagMask)); |
| 1793 and_(object, object, r0); |
| 1794 // was.. and_(object, object, Operand(~kHeapObjectTagMask)); |
1948 #ifdef DEBUG | 1795 #ifdef DEBUG |
1949 // Check that the object un-allocated is below the current top. | 1796 // Check that the object un-allocated is below the current top. |
1950 mov(scratch, Operand(new_space_allocation_top)); | 1797 mov(scratch, Operand(new_space_allocation_top)); |
1951 ldr(scratch, MemOperand(scratch)); | 1798 LoadP(scratch, MemOperand(scratch)); |
1952 cmp(object, scratch); | 1799 cmp(object, scratch); |
1953 Check(lt, kUndoAllocationOfNonAllocatedMemory); | 1800 Check(lt, kUndoAllocationOfNonAllocatedMemory); |
1954 #endif | 1801 #endif |
1955 // Write the address of the object to un-allocate as the current top. | 1802 // Write the address of the object to un-allocate as the current top. |
1956 mov(scratch, Operand(new_space_allocation_top)); | 1803 mov(scratch, Operand(new_space_allocation_top)); |
1957 str(object, MemOperand(scratch)); | 1804 StoreP(object, MemOperand(scratch)); |
1958 } | 1805 } |
1959 | 1806 |
1960 | 1807 |
1961 void MacroAssembler::AllocateTwoByteString(Register result, | 1808 void MacroAssembler::AllocateTwoByteString(Register result, |
1962 Register length, | 1809 Register length, |
1963 Register scratch1, | 1810 Register scratch1, |
1964 Register scratch2, | 1811 Register scratch2, |
1965 Register scratch3, | 1812 Register scratch3, |
1966 Label* gc_required) { | 1813 Label* gc_required) { |
1967 // Calculate the number of bytes needed for the characters in the string while | 1814 // Calculate the number of bytes needed for the characters in the string while |
1968 // observing object alignment. | 1815 // observing object alignment. |
1969 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1816 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
1970 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars. | 1817 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars. |
1971 add(scratch1, scratch1, | 1818 addi(scratch1, scratch1, |
1972 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); | 1819 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); |
1973 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); | 1820 mov(r0, Operand(~kObjectAlignmentMask)); |
| 1821 and_(scratch1, scratch1, r0); |
1974 | 1822 |
1975 // Allocate two-byte string in new space. | 1823 // Allocate two-byte string in new space. |
1976 Allocate(scratch1, | 1824 Allocate(scratch1, |
1977 result, | 1825 result, |
1978 scratch2, | 1826 scratch2, |
1979 scratch3, | 1827 scratch3, |
1980 gc_required, | 1828 gc_required, |
1981 TAG_OBJECT); | 1829 TAG_OBJECT); |
1982 | 1830 |
1983 // Set the map, length and hash field. | 1831 // Set the map, length and hash field. |
1984 InitializeNewString(result, | 1832 InitializeNewString(result, |
1985 length, | 1833 length, |
1986 Heap::kStringMapRootIndex, | 1834 Heap::kStringMapRootIndex, |
1987 scratch1, | 1835 scratch1, |
1988 scratch2); | 1836 scratch2); |
1989 } | 1837 } |
1990 | 1838 |
1991 | 1839 |
1992 void MacroAssembler::AllocateAsciiString(Register result, | 1840 void MacroAssembler::AllocateAsciiString(Register result, |
1993 Register length, | 1841 Register length, |
1994 Register scratch1, | 1842 Register scratch1, |
1995 Register scratch2, | 1843 Register scratch2, |
1996 Register scratch3, | 1844 Register scratch3, |
1997 Label* gc_required) { | 1845 Label* gc_required) { |
1998 // Calculate the number of bytes needed for the characters in the string while | 1846 // Calculate the number of bytes needed for the characters in the string while |
1999 // observing object alignment. | 1847 // observing object alignment. |
2000 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1848 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
2001 DCHECK(kCharSize == 1); | 1849 DCHECK(kCharSize == 1); |
2002 add(scratch1, length, | 1850 addi(scratch1, length, |
2003 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); | 1851 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); |
2004 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); | 1852 li(r0, Operand(~kObjectAlignmentMask)); |
| 1853 and_(scratch1, scratch1, r0); |
2005 | 1854 |
2006 // Allocate ASCII string in new space. | 1855 // Allocate ASCII string in new space. |
2007 Allocate(scratch1, | 1856 Allocate(scratch1, |
2008 result, | 1857 result, |
2009 scratch2, | 1858 scratch2, |
2010 scratch3, | 1859 scratch3, |
2011 gc_required, | 1860 gc_required, |
2012 TAG_OBJECT); | 1861 TAG_OBJECT); |
2013 | 1862 |
2014 // Set the map, length and hash field. | 1863 // Set the map, length and hash field. |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2087 scratch2); | 1936 scratch2); |
2088 } | 1937 } |
2089 | 1938 |
2090 | 1939 |
2091 void MacroAssembler::CompareObjectType(Register object, | 1940 void MacroAssembler::CompareObjectType(Register object, |
2092 Register map, | 1941 Register map, |
2093 Register type_reg, | 1942 Register type_reg, |
2094 InstanceType type) { | 1943 InstanceType type) { |
2095 const Register temp = type_reg.is(no_reg) ? ip : type_reg; | 1944 const Register temp = type_reg.is(no_reg) ? ip : type_reg; |
2096 | 1945 |
2097 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 1946 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
2098 CompareInstanceType(map, temp, type); | 1947 CompareInstanceType(map, temp, type); |
2099 } | 1948 } |
2100 | 1949 |
2101 | 1950 |
2102 void MacroAssembler::CheckObjectTypeRange(Register object, | 1951 void MacroAssembler::CheckObjectTypeRange(Register object, |
2103 Register map, | 1952 Register map, |
2104 InstanceType min_type, | 1953 InstanceType min_type, |
2105 InstanceType max_type, | 1954 InstanceType max_type, |
2106 Label* false_label) { | 1955 Label* false_label) { |
2107 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); | 1956 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); |
2108 STATIC_ASSERT(LAST_TYPE < 256); | 1957 STATIC_ASSERT(LAST_TYPE < 256); |
2109 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 1958 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
2110 ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 1959 lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
2111 sub(ip, ip, Operand(min_type)); | 1960 subi(ip, ip, Operand(min_type)); |
2112 cmp(ip, Operand(max_type - min_type)); | 1961 cmpli(ip, Operand(max_type - min_type)); |
2113 b(hi, false_label); | 1962 bgt(false_label); |
2114 } | 1963 } |
2115 | 1964 |
2116 | 1965 |
2117 void MacroAssembler::CompareInstanceType(Register map, | 1966 void MacroAssembler::CompareInstanceType(Register map, |
2118 Register type_reg, | 1967 Register type_reg, |
2119 InstanceType type) { | 1968 InstanceType type) { |
2120 // Registers map and type_reg can be ip. These two lines assert | |
2121 // that ip can be used with the two instructions (the constants | |
2122 // will never need ip). | |
2123 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); | 1969 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); |
2124 STATIC_ASSERT(LAST_TYPE < 256); | 1970 STATIC_ASSERT(LAST_TYPE < 256); |
2125 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 1971 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
2126 cmp(type_reg, Operand(type)); | 1972 cmpi(type_reg, Operand(type)); |
2127 } | 1973 } |
2128 | 1974 |
2129 | 1975 |
2130 void MacroAssembler::CompareRoot(Register obj, | 1976 void MacroAssembler::CompareRoot(Register obj, |
2131 Heap::RootListIndex index) { | 1977 Heap::RootListIndex index) { |
2132 DCHECK(!obj.is(ip)); | 1978 DCHECK(!obj.is(ip)); |
2133 LoadRoot(ip, index); | 1979 LoadRoot(ip, index); |
2134 cmp(obj, ip); | 1980 cmp(obj, ip); |
2135 } | 1981 } |
2136 | 1982 |
2137 | 1983 |
2138 void MacroAssembler::CheckFastElements(Register map, | 1984 void MacroAssembler::CheckFastElements(Register map, |
2139 Register scratch, | 1985 Register scratch, |
2140 Label* fail) { | 1986 Label* fail) { |
2141 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 1987 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
2142 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 1988 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
2143 STATIC_ASSERT(FAST_ELEMENTS == 2); | 1989 STATIC_ASSERT(FAST_ELEMENTS == 2); |
2144 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 1990 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
2145 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 1991 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
2146 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); | 1992 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000); |
2147 b(hi, fail); | 1993 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); |
| 1994 bgt(fail); |
2148 } | 1995 } |
2149 | 1996 |
2150 | 1997 |
2151 void MacroAssembler::CheckFastObjectElements(Register map, | 1998 void MacroAssembler::CheckFastObjectElements(Register map, |
2152 Register scratch, | 1999 Register scratch, |
2153 Label* fail) { | 2000 Label* fail) { |
2154 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 2001 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
2155 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 2002 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
2156 STATIC_ASSERT(FAST_ELEMENTS == 2); | 2003 STATIC_ASSERT(FAST_ELEMENTS == 2); |
2157 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 2004 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
2158 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 2005 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
2159 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 2006 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
2160 b(ls, fail); | 2007 ble(fail); |
2161 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); | 2008 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); |
2162 b(hi, fail); | 2009 bgt(fail); |
2163 } | 2010 } |
2164 | 2011 |
2165 | 2012 |
2166 void MacroAssembler::CheckFastSmiElements(Register map, | 2013 void MacroAssembler::CheckFastSmiElements(Register map, |
2167 Register scratch, | 2014 Register scratch, |
2168 Label* fail) { | 2015 Label* fail) { |
2169 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 2016 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
2170 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 2017 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
2171 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 2018 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
2172 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 2019 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
2173 b(hi, fail); | 2020 bgt(fail); |
2174 } | 2021 } |
2175 | 2022 |
2176 | 2023 |
| 2024 |
2177 void MacroAssembler::StoreNumberToDoubleElements( | 2025 void MacroAssembler::StoreNumberToDoubleElements( |
2178 Register value_reg, | 2026 Register value_reg, |
2179 Register key_reg, | 2027 Register key_reg, |
2180 Register elements_reg, | 2028 Register elements_reg, |
2181 Register scratch1, | 2029 Register scratch1, |
2182 LowDwVfpRegister double_scratch, | 2030 DoubleRegister double_scratch, |
2183 Label* fail, | 2031 Label* fail, |
2184 int elements_offset) { | 2032 int elements_offset) { |
2185 Label smi_value, store; | 2033 Label smi_value, store; |
2186 | 2034 |
2187 // Handle smi values specially. | 2035 // Handle smi values specially. |
2188 JumpIfSmi(value_reg, &smi_value); | 2036 JumpIfSmi(value_reg, &smi_value); |
2189 | 2037 |
2190 // Ensure that the object is a heap number | 2038 // Ensure that the object is a heap number |
2191 CheckMap(value_reg, | 2039 CheckMap(value_reg, |
2192 scratch1, | 2040 scratch1, |
2193 isolate()->factory()->heap_number_map(), | 2041 isolate()->factory()->heap_number_map(), |
2194 fail, | 2042 fail, |
2195 DONT_DO_SMI_CHECK); | 2043 DONT_DO_SMI_CHECK); |
2196 | 2044 |
2197 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 2045 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
2198 // Force a canonical NaN. | 2046 // Force a canonical NaN. |
2199 if (emit_debug_code()) { | 2047 CanonicalizeNaN(double_scratch); |
2200 vmrs(ip); | |
2201 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); | |
2202 Assert(ne, kDefaultNaNModeNotSet); | |
2203 } | |
2204 VFPCanonicalizeNaN(double_scratch); | |
2205 b(&store); | 2048 b(&store); |
2206 | 2049 |
2207 bind(&smi_value); | 2050 bind(&smi_value); |
2208 SmiToDouble(double_scratch, value_reg); | 2051 SmiToDouble(double_scratch, value_reg); |
2209 | 2052 |
2210 bind(&store); | 2053 bind(&store); |
2211 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); | 2054 SmiToDoubleArrayOffset(scratch1, key_reg); |
2212 vstr(double_scratch, | 2055 add(scratch1, elements_reg, scratch1); |
| 2056 stfd(double_scratch, |
2213 FieldMemOperand(scratch1, | 2057 FieldMemOperand(scratch1, |
2214 FixedDoubleArray::kHeaderSize - elements_offset)); | 2058 FixedDoubleArray::kHeaderSize - elements_offset)); |
2215 } | 2059 } |
2216 | 2060 |
2217 | 2061 |
| 2062 void MacroAssembler::AddAndCheckForOverflow(Register dst, |
| 2063 Register left, |
| 2064 Register right, |
| 2065 Register overflow_dst, |
| 2066 Register scratch) { |
| 2067 DCHECK(!dst.is(overflow_dst)); |
| 2068 DCHECK(!dst.is(scratch)); |
| 2069 DCHECK(!overflow_dst.is(scratch)); |
| 2070 DCHECK(!overflow_dst.is(left)); |
| 2071 DCHECK(!overflow_dst.is(right)); |
| 2072 |
| 2073 // C = A+B; C overflows if A/B have same sign and C has diff sign than A |
| 2074 if (dst.is(left)) { |
| 2075 mr(scratch, left); // Preserve left. |
| 2076 add(dst, left, right); // Left is overwritten. |
| 2077 xor_(scratch, dst, scratch); // Original left. |
| 2078 xor_(overflow_dst, dst, right); |
| 2079 and_(overflow_dst, overflow_dst, scratch, SetRC); |
| 2080 } else if (dst.is(right)) { |
| 2081 mr(scratch, right); // Preserve right. |
| 2082 add(dst, left, right); // Right is overwritten. |
| 2083 xor_(scratch, dst, scratch); // Original right. |
| 2084 xor_(overflow_dst, dst, left); |
| 2085 and_(overflow_dst, overflow_dst, scratch, SetRC); |
| 2086 } else { |
| 2087 add(dst, left, right); |
| 2088 xor_(overflow_dst, dst, left); |
| 2089 xor_(scratch, dst, right); |
| 2090 and_(overflow_dst, scratch, overflow_dst, SetRC); |
| 2091 } |
| 2092 } |
| 2093 |
| 2094 void MacroAssembler::SubAndCheckForOverflow(Register dst, |
| 2095 Register left, |
| 2096 Register right, |
| 2097 Register overflow_dst, |
| 2098 Register scratch) { |
| 2099 DCHECK(!dst.is(overflow_dst)); |
| 2100 DCHECK(!dst.is(scratch)); |
| 2101 DCHECK(!overflow_dst.is(scratch)); |
| 2102 DCHECK(!overflow_dst.is(left)); |
| 2103 DCHECK(!overflow_dst.is(right)); |
| 2104 |
| 2105 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A |
| 2106 if (dst.is(left)) { |
| 2107 mr(scratch, left); // Preserve left. |
| 2108 sub(dst, left, right); // Left is overwritten. |
| 2109 xor_(overflow_dst, dst, scratch); |
| 2110 xor_(scratch, scratch, right); |
| 2111 and_(overflow_dst, overflow_dst, scratch, SetRC); |
| 2112 } else if (dst.is(right)) { |
| 2113 mr(scratch, right); // Preserve right. |
| 2114 sub(dst, left, right); // Right is overwritten. |
| 2115 xor_(overflow_dst, dst, left); |
| 2116 xor_(scratch, left, scratch); |
| 2117 and_(overflow_dst, overflow_dst, scratch, SetRC); |
| 2118 } else { |
| 2119 sub(dst, left, right); |
| 2120 xor_(overflow_dst, dst, left); |
| 2121 xor_(scratch, left, right); |
| 2122 and_(overflow_dst, scratch, overflow_dst, SetRC); |
| 2123 } |
| 2124 } |
| 2125 |
| 2126 |
2218 void MacroAssembler::CompareMap(Register obj, | 2127 void MacroAssembler::CompareMap(Register obj, |
2219 Register scratch, | 2128 Register scratch, |
2220 Handle<Map> map, | 2129 Handle<Map> map, |
2221 Label* early_success) { | 2130 Label* early_success) { |
2222 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2131 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2223 CompareMap(scratch, map, early_success); | 2132 CompareMap(scratch, map, early_success); |
2224 } | 2133 } |
2225 | 2134 |
2226 | 2135 |
2227 void MacroAssembler::CompareMap(Register obj_map, | 2136 void MacroAssembler::CompareMap(Register obj_map, |
2228 Handle<Map> map, | 2137 Handle<Map> map, |
2229 Label* early_success) { | 2138 Label* early_success) { |
2230 cmp(obj_map, Operand(map)); | 2139 mov(r0, Operand(map)); |
| 2140 cmp(obj_map, r0); |
2231 } | 2141 } |
2232 | 2142 |
2233 | 2143 |
2234 void MacroAssembler::CheckMap(Register obj, | 2144 void MacroAssembler::CheckMap(Register obj, |
2235 Register scratch, | 2145 Register scratch, |
2236 Handle<Map> map, | 2146 Handle<Map> map, |
2237 Label* fail, | 2147 Label* fail, |
2238 SmiCheckType smi_check_type) { | 2148 SmiCheckType smi_check_type) { |
2239 if (smi_check_type == DO_SMI_CHECK) { | 2149 if (smi_check_type == DO_SMI_CHECK) { |
2240 JumpIfSmi(obj, fail); | 2150 JumpIfSmi(obj, fail); |
2241 } | 2151 } |
2242 | 2152 |
2243 Label success; | 2153 Label success; |
2244 CompareMap(obj, scratch, map, &success); | 2154 CompareMap(obj, scratch, map, &success); |
2245 b(ne, fail); | 2155 bne(fail); |
2246 bind(&success); | 2156 bind(&success); |
2247 } | 2157 } |
2248 | 2158 |
2249 | 2159 |
2250 void MacroAssembler::CheckMap(Register obj, | 2160 void MacroAssembler::CheckMap(Register obj, |
2251 Register scratch, | 2161 Register scratch, |
2252 Heap::RootListIndex index, | 2162 Heap::RootListIndex index, |
2253 Label* fail, | 2163 Label* fail, |
2254 SmiCheckType smi_check_type) { | 2164 SmiCheckType smi_check_type) { |
2255 if (smi_check_type == DO_SMI_CHECK) { | 2165 if (smi_check_type == DO_SMI_CHECK) { |
2256 JumpIfSmi(obj, fail); | 2166 JumpIfSmi(obj, fail); |
2257 } | 2167 } |
2258 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2168 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2259 LoadRoot(ip, index); | 2169 LoadRoot(ip, index); |
2260 cmp(scratch, ip); | 2170 cmp(scratch, ip); |
2261 b(ne, fail); | 2171 bne(fail); |
2262 } | 2172 } |
2263 | 2173 |
2264 | 2174 |
2265 void MacroAssembler::DispatchMap(Register obj, | 2175 void MacroAssembler::DispatchMap(Register obj, |
2266 Register scratch, | 2176 Register scratch, |
2267 Handle<Map> map, | 2177 Handle<Map> map, |
2268 Handle<Code> success, | 2178 Handle<Code> success, |
2269 SmiCheckType smi_check_type) { | 2179 SmiCheckType smi_check_type) { |
2270 Label fail; | 2180 Label fail; |
2271 if (smi_check_type == DO_SMI_CHECK) { | 2181 if (smi_check_type == DO_SMI_CHECK) { |
2272 JumpIfSmi(obj, &fail); | 2182 JumpIfSmi(obj, &fail); |
2273 } | 2183 } |
2274 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2184 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2275 mov(ip, Operand(map)); | 2185 mov(ip, Operand(map)); |
2276 cmp(scratch, ip); | 2186 cmp(scratch, ip); |
2277 Jump(success, RelocInfo::CODE_TARGET, eq); | 2187 bne(&fail); |
| 2188 Jump(success, RelocInfo::CODE_TARGET, al); |
2278 bind(&fail); | 2189 bind(&fail); |
2279 } | 2190 } |
2280 | 2191 |
2281 | 2192 |
2282 void MacroAssembler::TryGetFunctionPrototype(Register function, | 2193 void MacroAssembler::TryGetFunctionPrototype(Register function, |
2283 Register result, | 2194 Register result, |
2284 Register scratch, | 2195 Register scratch, |
2285 Label* miss, | 2196 Label* miss, |
2286 bool miss_on_bound_function) { | 2197 bool miss_on_bound_function) { |
2287 Label non_instance; | 2198 Label non_instance; |
2288 if (miss_on_bound_function) { | 2199 if (miss_on_bound_function) { |
2289 // Check that the receiver isn't a smi. | 2200 // Check that the receiver isn't a smi. |
2290 JumpIfSmi(function, miss); | 2201 JumpIfSmi(function, miss); |
2291 | 2202 |
2292 // Check that the function really is a function. Load map into result reg. | 2203 // Check that the function really is a function. Load map into result reg. |
2293 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); | 2204 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); |
2294 b(ne, miss); | 2205 bne(miss); |
2295 | 2206 |
2296 ldr(scratch, | 2207 LoadP(scratch, |
2297 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | 2208 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
2298 ldr(scratch, | 2209 lwz(scratch, |
2299 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); | 2210 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); |
2300 tst(scratch, | 2211 TestBit(scratch, |
2301 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); | 2212 #if V8_TARGET_ARCH_PPC64 |
2302 b(ne, miss); | 2213 SharedFunctionInfo::kBoundFunction, |
| 2214 #else |
| 2215 SharedFunctionInfo::kBoundFunction + kSmiTagSize, |
| 2216 #endif |
| 2217 r0); |
| 2218 bne(miss, cr0); |
2303 | 2219 |
2304 // Make sure that the function has an instance prototype. | 2220 // Make sure that the function has an instance prototype. |
2305 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); | 2221 lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); |
2306 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); | 2222 andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype)); |
2307 b(ne, &non_instance); | 2223 bne(&non_instance, cr0); |
2308 } | 2224 } |
2309 | 2225 |
2310 // Get the prototype or initial map from the function. | 2226 // Get the prototype or initial map from the function. |
2311 ldr(result, | 2227 LoadP(result, |
2312 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2228 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
2313 | 2229 |
2314 // If the prototype or initial map is the hole, don't return it and | 2230 // If the prototype or initial map is the hole, don't return it and |
2315 // simply miss the cache instead. This will allow us to allocate a | 2231 // simply miss the cache instead. This will allow us to allocate a |
2316 // prototype object on-demand in the runtime system. | 2232 // prototype object on-demand in the runtime system. |
2317 LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2233 LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
2318 cmp(result, ip); | 2234 cmp(result, ip); |
2319 b(eq, miss); | 2235 beq(miss); |
2320 | 2236 |
2321 // If the function does not have an initial map, we're done. | 2237 // If the function does not have an initial map, we're done. |
2322 Label done; | 2238 Label done; |
2323 CompareObjectType(result, scratch, scratch, MAP_TYPE); | 2239 CompareObjectType(result, scratch, scratch, MAP_TYPE); |
2324 b(ne, &done); | 2240 bne(&done); |
2325 | 2241 |
2326 // Get the prototype from the initial map. | 2242 // Get the prototype from the initial map. |
2327 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 2243 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
2328 | 2244 |
2329 if (miss_on_bound_function) { | 2245 if (miss_on_bound_function) { |
2330 jmp(&done); | 2246 b(&done); |
2331 | 2247 |
2332 // Non-instance prototype: Fetch prototype from constructor field | 2248 // Non-instance prototype: Fetch prototype from constructor field |
2333 // in initial map. | 2249 // in initial map. |
2334 bind(&non_instance); | 2250 bind(&non_instance); |
2335 ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | 2251 LoadP(result, FieldMemOperand(result, Map::kConstructorOffset)); |
2336 } | 2252 } |
2337 | 2253 |
2338 // All done. | 2254 // All done. |
2339 bind(&done); | 2255 bind(&done); |
2340 } | 2256 } |
2341 | 2257 |
2342 | 2258 |
2343 void MacroAssembler::CallStub(CodeStub* stub, | 2259 void MacroAssembler::CallStub(CodeStub* stub, |
2344 TypeFeedbackId ast_id, | 2260 TypeFeedbackId ast_id, |
2345 Condition cond) { | 2261 Condition cond) { |
(...skipping 12 matching lines...) Expand all Loading... |
2358 } | 2274 } |
2359 | 2275 |
2360 | 2276 |
2361 void MacroAssembler::CallApiFunctionAndReturn( | 2277 void MacroAssembler::CallApiFunctionAndReturn( |
2362 Register function_address, | 2278 Register function_address, |
2363 ExternalReference thunk_ref, | 2279 ExternalReference thunk_ref, |
2364 int stack_space, | 2280 int stack_space, |
2365 MemOperand return_value_operand, | 2281 MemOperand return_value_operand, |
2366 MemOperand* context_restore_operand) { | 2282 MemOperand* context_restore_operand) { |
2367 ExternalReference next_address = | 2283 ExternalReference next_address = |
2368 ExternalReference::handle_scope_next_address(isolate()); | 2284 ExternalReference::handle_scope_next_address(isolate()); |
2369 const int kNextOffset = 0; | 2285 const int kNextOffset = 0; |
2370 const int kLimitOffset = AddressOffset( | 2286 const int kLimitOffset = AddressOffset( |
2371 ExternalReference::handle_scope_limit_address(isolate()), | 2287 ExternalReference::handle_scope_limit_address(isolate()), |
2372 next_address); | 2288 next_address); |
2373 const int kLevelOffset = AddressOffset( | 2289 const int kLevelOffset = AddressOffset( |
2374 ExternalReference::handle_scope_level_address(isolate()), | 2290 ExternalReference::handle_scope_level_address(isolate()), |
2375 next_address); | 2291 next_address); |
2376 | 2292 |
2377 DCHECK(function_address.is(r1) || function_address.is(r2)); | 2293 DCHECK(function_address.is(r4) || function_address.is(r5)); |
| 2294 Register scratch = r6; |
2378 | 2295 |
2379 Label profiler_disabled; | 2296 Label profiler_disabled; |
2380 Label end_profiler_check; | 2297 Label end_profiler_check; |
2381 mov(r9, Operand(ExternalReference::is_profiling_address(isolate()))); | 2298 mov(scratch, Operand(ExternalReference::is_profiling_address(isolate()))); |
2382 ldrb(r9, MemOperand(r9, 0)); | 2299 lbz(scratch, MemOperand(scratch, 0)); |
2383 cmp(r9, Operand(0)); | 2300 cmpi(scratch, Operand::Zero()); |
2384 b(eq, &profiler_disabled); | 2301 beq(&profiler_disabled); |
2385 | 2302 |
2386 // Additional parameter is the address of the actual callback. | 2303 // Additional parameter is the address of the actual callback. |
2387 mov(r3, Operand(thunk_ref)); | 2304 mov(scratch, Operand(thunk_ref)); |
2388 jmp(&end_profiler_check); | 2305 jmp(&end_profiler_check); |
2389 | 2306 |
2390 bind(&profiler_disabled); | 2307 bind(&profiler_disabled); |
2391 Move(r3, function_address); | 2308 mr(scratch, function_address); |
2392 bind(&end_profiler_check); | 2309 bind(&end_profiler_check); |
2393 | 2310 |
2394 // Allocate HandleScope in callee-save registers. | 2311 // Allocate HandleScope in callee-save registers. |
2395 mov(r9, Operand(next_address)); | 2312 // r17 - next_address |
2396 ldr(r4, MemOperand(r9, kNextOffset)); | 2313 // r14 - next_address->kNextOffset |
2397 ldr(r5, MemOperand(r9, kLimitOffset)); | 2314 // r15 - next_address->kLimitOffset |
2398 ldr(r6, MemOperand(r9, kLevelOffset)); | 2315 // r16 - next_address->kLevelOffset |
2399 add(r6, r6, Operand(1)); | 2316 mov(r17, Operand(next_address)); |
2400 str(r6, MemOperand(r9, kLevelOffset)); | 2317 LoadP(r14, MemOperand(r17, kNextOffset)); |
| 2318 LoadP(r15, MemOperand(r17, kLimitOffset)); |
| 2319 lwz(r16, MemOperand(r17, kLevelOffset)); |
| 2320 addi(r16, r16, Operand(1)); |
| 2321 stw(r16, MemOperand(r17, kLevelOffset)); |
2401 | 2322 |
2402 if (FLAG_log_timer_events) { | 2323 if (FLAG_log_timer_events) { |
2403 FrameScope frame(this, StackFrame::MANUAL); | 2324 FrameScope frame(this, StackFrame::MANUAL); |
2404 PushSafepointRegisters(); | 2325 PushSafepointRegisters(); |
2405 PrepareCallCFunction(1, r0); | 2326 PrepareCallCFunction(1, r3); |
2406 mov(r0, Operand(ExternalReference::isolate_address(isolate()))); | 2327 mov(r3, Operand(ExternalReference::isolate_address(isolate()))); |
2407 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); | 2328 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); |
2408 PopSafepointRegisters(); | 2329 PopSafepointRegisters(); |
2409 } | 2330 } |
2410 | 2331 |
2411 // Native call returns to the DirectCEntry stub which redirects to the | 2332 // Native call returns to the DirectCEntry stub which redirects to the |
2412 // return address pushed on stack (could have moved after GC). | 2333 // return address pushed on stack (could have moved after GC). |
2413 // DirectCEntry stub itself is generated early and never moves. | 2334 // DirectCEntry stub itself is generated early and never moves. |
2414 DirectCEntryStub stub(isolate()); | 2335 DirectCEntryStub stub(isolate()); |
2415 stub.GenerateCall(this, r3); | 2336 stub.GenerateCall(this, scratch); |
2416 | 2337 |
2417 if (FLAG_log_timer_events) { | 2338 if (FLAG_log_timer_events) { |
2418 FrameScope frame(this, StackFrame::MANUAL); | 2339 FrameScope frame(this, StackFrame::MANUAL); |
2419 PushSafepointRegisters(); | 2340 PushSafepointRegisters(); |
2420 PrepareCallCFunction(1, r0); | 2341 PrepareCallCFunction(1, r3); |
2421 mov(r0, Operand(ExternalReference::isolate_address(isolate()))); | 2342 mov(r3, Operand(ExternalReference::isolate_address(isolate()))); |
2422 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); | 2343 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); |
2423 PopSafepointRegisters(); | 2344 PopSafepointRegisters(); |
2424 } | 2345 } |
2425 | 2346 |
2426 Label promote_scheduled_exception; | 2347 Label promote_scheduled_exception; |
2427 Label exception_handled; | 2348 Label exception_handled; |
2428 Label delete_allocated_handles; | 2349 Label delete_allocated_handles; |
2429 Label leave_exit_frame; | 2350 Label leave_exit_frame; |
2430 Label return_value_loaded; | 2351 Label return_value_loaded; |
2431 | 2352 |
2432 // load value from ReturnValue | 2353 // load value from ReturnValue |
2433 ldr(r0, return_value_operand); | 2354 LoadP(r3, return_value_operand); |
2434 bind(&return_value_loaded); | 2355 bind(&return_value_loaded); |
2435 // No more valid handles (the result handle was the last one). Restore | 2356 // No more valid handles (the result handle was the last one). Restore |
2436 // previous handle scope. | 2357 // previous handle scope. |
2437 str(r4, MemOperand(r9, kNextOffset)); | 2358 StoreP(r14, MemOperand(r17, kNextOffset)); |
2438 if (emit_debug_code()) { | 2359 if (emit_debug_code()) { |
2439 ldr(r1, MemOperand(r9, kLevelOffset)); | 2360 lwz(r4, MemOperand(r17, kLevelOffset)); |
2440 cmp(r1, r6); | 2361 cmp(r4, r16); |
2441 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); | 2362 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); |
2442 } | 2363 } |
2443 sub(r6, r6, Operand(1)); | 2364 subi(r16, r16, Operand(1)); |
2444 str(r6, MemOperand(r9, kLevelOffset)); | 2365 stw(r16, MemOperand(r17, kLevelOffset)); |
2445 ldr(ip, MemOperand(r9, kLimitOffset)); | 2366 LoadP(ip, MemOperand(r17, kLimitOffset)); |
2446 cmp(r5, ip); | 2367 cmp(r15, ip); |
2447 b(ne, &delete_allocated_handles); | 2368 bne(&delete_allocated_handles); |
2448 | 2369 |
2449 // Check if the function scheduled an exception. | 2370 // Check if the function scheduled an exception. |
2450 bind(&leave_exit_frame); | 2371 bind(&leave_exit_frame); |
2451 LoadRoot(r4, Heap::kTheHoleValueRootIndex); | 2372 LoadRoot(r14, Heap::kTheHoleValueRootIndex); |
2452 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate()))); | 2373 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate()))); |
2453 ldr(r5, MemOperand(ip)); | 2374 LoadP(r15, MemOperand(ip)); |
2454 cmp(r4, r5); | 2375 cmp(r14, r15); |
2455 b(ne, &promote_scheduled_exception); | 2376 bne(&promote_scheduled_exception); |
2456 bind(&exception_handled); | 2377 bind(&exception_handled); |
2457 | 2378 |
2458 bool restore_context = context_restore_operand != NULL; | 2379 bool restore_context = context_restore_operand != NULL; |
2459 if (restore_context) { | 2380 if (restore_context) { |
2460 ldr(cp, *context_restore_operand); | 2381 LoadP(cp, *context_restore_operand); |
2461 } | 2382 } |
2462 // LeaveExitFrame expects unwind space to be in a register. | 2383 // LeaveExitFrame expects unwind space to be in a register. |
2463 mov(r4, Operand(stack_space)); | 2384 mov(r14, Operand(stack_space)); |
2464 LeaveExitFrame(false, r4, !restore_context); | 2385 LeaveExitFrame(false, r14, !restore_context); |
2465 mov(pc, lr); | 2386 blr(); |
2466 | 2387 |
2467 bind(&promote_scheduled_exception); | 2388 bind(&promote_scheduled_exception); |
2468 { | 2389 { |
2469 FrameScope frame(this, StackFrame::INTERNAL); | 2390 FrameScope frame(this, StackFrame::INTERNAL); |
2470 CallExternalReference( | 2391 CallExternalReference( |
2471 ExternalReference(Runtime::kPromoteScheduledException, isolate()), | 2392 ExternalReference(Runtime::kPromoteScheduledException, isolate()), |
2472 0); | 2393 0); |
2473 } | 2394 } |
2474 jmp(&exception_handled); | 2395 jmp(&exception_handled); |
2475 | 2396 |
2476 // HandleScope limit has changed. Delete allocated extensions. | 2397 // HandleScope limit has changed. Delete allocated extensions. |
2477 bind(&delete_allocated_handles); | 2398 bind(&delete_allocated_handles); |
2478 str(r5, MemOperand(r9, kLimitOffset)); | 2399 StoreP(r15, MemOperand(r17, kLimitOffset)); |
2479 mov(r4, r0); | 2400 mr(r14, r3); |
2480 PrepareCallCFunction(1, r5); | 2401 PrepareCallCFunction(1, r15); |
2481 mov(r0, Operand(ExternalReference::isolate_address(isolate()))); | 2402 mov(r3, Operand(ExternalReference::isolate_address(isolate()))); |
2482 CallCFunction( | 2403 CallCFunction( |
2483 ExternalReference::delete_handle_scope_extensions(isolate()), 1); | 2404 ExternalReference::delete_handle_scope_extensions(isolate()), 1); |
2484 mov(r0, r4); | 2405 mr(r3, r14); |
2485 jmp(&leave_exit_frame); | 2406 b(&leave_exit_frame); |
2486 } | 2407 } |
2487 | 2408 |
2488 | 2409 |
2489 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | 2410 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
2490 return has_frame_ || !stub->SometimesSetsUpAFrame(); | 2411 return has_frame_ || !stub->SometimesSetsUpAFrame(); |
2491 } | 2412 } |
2492 | 2413 |
2493 | 2414 |
2494 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 2415 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
2495 // If the hash field contains an array index pick it out. The assert checks | 2416 // If the hash field contains an array index pick it out. The assert checks |
2496 // that the constants for the maximum number of digits for an array index | 2417 // that the constants for the maximum number of digits for an array index |
2497 // cached in the hash field and the number of bits reserved for it does not | 2418 // cached in the hash field and the number of bits reserved for it does not |
2498 // conflict. | 2419 // conflict. |
2499 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < | 2420 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < |
2500 (1 << String::kArrayIndexValueBits)); | 2421 (1 << String::kArrayIndexValueBits)); |
2501 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); | 2422 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); |
2502 } | 2423 } |
2503 | 2424 |
2504 | 2425 |
2505 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { | 2426 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) { |
2506 if (CpuFeatures::IsSupported(VFP3)) { | 2427 SmiUntag(ip, smi); |
2507 vmov(value.low(), smi); | 2428 ConvertIntToDouble(ip, value); |
2508 vcvt_f64_s32(value, 1); | |
2509 } else { | |
2510 SmiUntag(ip, smi); | |
2511 vmov(value.low(), ip); | |
2512 vcvt_f64_s32(value, value.low()); | |
2513 } | |
2514 } | 2429 } |
2515 | 2430 |
2516 | 2431 |
2517 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, | 2432 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input, |
2518 LowDwVfpRegister double_scratch) { | 2433 Register scratch1, |
2519 DCHECK(!double_input.is(double_scratch)); | 2434 Register scratch2, |
2520 vcvt_s32_f64(double_scratch.low(), double_input); | 2435 DoubleRegister double_scratch) { |
2521 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2436 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch); |
2522 VFPCompareAndSetFlags(double_input, double_scratch); | |
2523 } | 2437 } |
2524 | 2438 |
2525 | 2439 |
2526 void MacroAssembler::TryDoubleToInt32Exact(Register result, | 2440 void MacroAssembler::TryDoubleToInt32Exact(Register result, |
2527 DwVfpRegister double_input, | 2441 DoubleRegister double_input, |
2528 LowDwVfpRegister double_scratch) { | 2442 Register scratch, |
| 2443 DoubleRegister double_scratch) { |
| 2444 Label done; |
2529 DCHECK(!double_input.is(double_scratch)); | 2445 DCHECK(!double_input.is(double_scratch)); |
2530 vcvt_s32_f64(double_scratch.low(), double_input); | 2446 |
2531 vmov(result, double_scratch.low()); | 2447 ConvertDoubleToInt64(double_input, |
2532 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2448 #if !V8_TARGET_ARCH_PPC64 |
2533 VFPCompareAndSetFlags(double_input, double_scratch); | 2449 scratch, |
| 2450 #endif |
| 2451 result, double_scratch); |
| 2452 |
| 2453 #if V8_TARGET_ARCH_PPC64 |
| 2454 TestIfInt32(result, scratch, r0); |
| 2455 #else |
| 2456 TestIfInt32(scratch, result, r0); |
| 2457 #endif |
| 2458 bne(&done); |
| 2459 |
| 2460 // convert back and compare |
| 2461 fcfid(double_scratch, double_scratch); |
| 2462 fcmpu(double_scratch, double_input); |
| 2463 bind(&done); |
2534 } | 2464 } |
2535 | 2465 |
2536 | 2466 |
2537 void MacroAssembler::TryInt32Floor(Register result, | 2467 void MacroAssembler::TryInt32Floor(Register result, |
2538 DwVfpRegister double_input, | 2468 DoubleRegister double_input, |
2539 Register input_high, | 2469 Register input_high, |
2540 LowDwVfpRegister double_scratch, | 2470 Register scratch, |
| 2471 DoubleRegister double_scratch, |
2541 Label* done, | 2472 Label* done, |
2542 Label* exact) { | 2473 Label* exact) { |
2543 DCHECK(!result.is(input_high)); | 2474 DCHECK(!result.is(input_high)); |
2544 DCHECK(!double_input.is(double_scratch)); | 2475 DCHECK(!double_input.is(double_scratch)); |
2545 Label negative, exception; | 2476 Label exception; |
2546 | 2477 |
2547 VmovHigh(input_high, double_input); | 2478 MovDoubleHighToInt(input_high, double_input); |
2548 | 2479 |
2549 // Test for NaN and infinities. | 2480 // Test for NaN/Inf |
2550 Sbfx(result, input_high, | 2481 ExtractBitMask(result, input_high, HeapNumber::kExponentMask); |
2551 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 2482 cmpli(result, Operand(0x7ff)); |
2552 cmp(result, Operand(-1)); | 2483 beq(&exception); |
2553 b(eq, &exception); | |
2554 // Test for values that can be exactly represented as a | |
2555 // signed 32-bit integer. | |
2556 TryDoubleToInt32Exact(result, double_input, double_scratch); | |
2557 // If exact, return (result already fetched). | |
2558 b(eq, exact); | |
2559 cmp(input_high, Operand::Zero()); | |
2560 b(mi, &negative); | |
2561 | 2484 |
2562 // Input is in ]+0, +inf[. | 2485 // Convert (rounding to -Inf) |
2563 // If result equals 0x7fffffff input was out of range or | 2486 ConvertDoubleToInt64(double_input, |
2564 // in ]0x7fffffff, 0x80000000[. We ignore this last case which | 2487 #if !V8_TARGET_ARCH_PPC64 |
2565 // could fits into an int32, that means we always think input was | 2488 scratch, |
2566 // out of range and always go to exception. | 2489 #endif |
2567 // If result < 0x7fffffff, go to done, result fetched. | 2490 result, double_scratch, |
2568 cmn(result, Operand(1)); | 2491 kRoundToMinusInf); |
2569 b(mi, &exception); | 2492 |
| 2493 // Test for overflow |
| 2494 #if V8_TARGET_ARCH_PPC64 |
| 2495 TestIfInt32(result, scratch, r0); |
| 2496 #else |
| 2497 TestIfInt32(scratch, result, r0); |
| 2498 #endif |
| 2499 bne(&exception); |
| 2500 |
| 2501 // Test for exactness |
| 2502 fcfid(double_scratch, double_scratch); |
| 2503 fcmpu(double_scratch, double_input); |
| 2504 beq(exact); |
2570 b(done); | 2505 b(done); |
2571 | 2506 |
2572 // Input is in ]-inf, -0[. | |
2573 // If x is a non integer negative number, | |
2574 // floor(x) <=> round_to_zero(x) - 1. | |
2575 bind(&negative); | |
2576 sub(result, result, Operand(1), SetCC); | |
2577 // If result is still negative, go to done, result fetched. | |
2578 // Else, we had an overflow and we fall through exception. | |
2579 b(mi, done); | |
2580 bind(&exception); | 2507 bind(&exception); |
2581 } | 2508 } |
2582 | 2509 |
| 2510 |
2583 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, | 2511 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, |
2584 DwVfpRegister double_input, | 2512 DoubleRegister double_input, |
2585 Label* done) { | 2513 Label* done) { |
2586 LowDwVfpRegister double_scratch = kScratchDoubleReg; | 2514 DoubleRegister double_scratch = kScratchDoubleReg; |
2587 vcvt_s32_f64(double_scratch.low(), double_input); | 2515 Register scratch = ip; |
2588 vmov(result, double_scratch.low()); | |
2589 | 2516 |
2590 // If result is not saturated (0x7fffffff or 0x80000000), we are done. | 2517 ConvertDoubleToInt64(double_input, |
2591 sub(ip, result, Operand(1)); | 2518 #if !V8_TARGET_ARCH_PPC64 |
2592 cmp(ip, Operand(0x7ffffffe)); | 2519 scratch, |
2593 b(lt, done); | 2520 #endif |
| 2521 result, double_scratch); |
| 2522 |
| 2523 // Test for overflow |
| 2524 #if V8_TARGET_ARCH_PPC64 |
| 2525 TestIfInt32(result, scratch, r0); |
| 2526 #else |
| 2527 TestIfInt32(scratch, result, r0); |
| 2528 #endif |
| 2529 beq(done); |
2594 } | 2530 } |
2595 | 2531 |
2596 | 2532 |
2597 void MacroAssembler::TruncateDoubleToI(Register result, | 2533 void MacroAssembler::TruncateDoubleToI(Register result, |
2598 DwVfpRegister double_input) { | 2534 DoubleRegister double_input) { |
2599 Label done; | 2535 Label done; |
2600 | 2536 |
2601 TryInlineTruncateDoubleToI(result, double_input, &done); | 2537 TryInlineTruncateDoubleToI(result, double_input, &done); |
2602 | 2538 |
2603 // If we fell through then inline version didn't succeed - call stub instead. | 2539 // If we fell through then inline version didn't succeed - call stub instead. |
2604 push(lr); | 2540 mflr(r0); |
2605 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. | 2541 push(r0); |
2606 vstr(double_input, MemOperand(sp, 0)); | 2542 // Put input on stack. |
| 2543 stfdu(double_input, MemOperand(sp, -kDoubleSize)); |
2607 | 2544 |
2608 DoubleToIStub stub(isolate(), sp, result, 0, true, true); | 2545 DoubleToIStub stub(isolate(), sp, result, 0, true, true); |
2609 CallStub(&stub); | 2546 CallStub(&stub); |
2610 | 2547 |
2611 add(sp, sp, Operand(kDoubleSize)); | 2548 addi(sp, sp, Operand(kDoubleSize)); |
2612 pop(lr); | 2549 pop(r0); |
| 2550 mtlr(r0); |
2613 | 2551 |
2614 bind(&done); | 2552 bind(&done); |
2615 } | 2553 } |
2616 | 2554 |
2617 | 2555 |
2618 void MacroAssembler::TruncateHeapNumberToI(Register result, | 2556 void MacroAssembler::TruncateHeapNumberToI(Register result, |
2619 Register object) { | 2557 Register object) { |
2620 Label done; | 2558 Label done; |
2621 LowDwVfpRegister double_scratch = kScratchDoubleReg; | 2559 DoubleRegister double_scratch = kScratchDoubleReg; |
2622 DCHECK(!result.is(object)); | 2560 DCHECK(!result.is(object)); |
2623 | 2561 |
2624 vldr(double_scratch, | 2562 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); |
2625 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); | |
2626 TryInlineTruncateDoubleToI(result, double_scratch, &done); | 2563 TryInlineTruncateDoubleToI(result, double_scratch, &done); |
2627 | 2564 |
2628 // If we fell through then inline version didn't succeed - call stub instead. | 2565 // If we fell through then inline version didn't succeed - call stub instead. |
2629 push(lr); | 2566 mflr(r0); |
| 2567 push(r0); |
2630 DoubleToIStub stub(isolate(), | 2568 DoubleToIStub stub(isolate(), |
2631 object, | 2569 object, |
2632 result, | 2570 result, |
2633 HeapNumber::kValueOffset - kHeapObjectTag, | 2571 HeapNumber::kValueOffset - kHeapObjectTag, |
2634 true, | 2572 true, |
2635 true); | 2573 true); |
2636 CallStub(&stub); | 2574 CallStub(&stub); |
2637 pop(lr); | 2575 pop(r0); |
| 2576 mtlr(r0); |
2638 | 2577 |
2639 bind(&done); | 2578 bind(&done); |
2640 } | 2579 } |
2641 | 2580 |
2642 | 2581 |
2643 void MacroAssembler::TruncateNumberToI(Register object, | 2582 void MacroAssembler::TruncateNumberToI(Register object, |
2644 Register result, | 2583 Register result, |
2645 Register heap_number_map, | 2584 Register heap_number_map, |
2646 Register scratch1, | 2585 Register scratch1, |
2647 Label* not_number) { | 2586 Label* not_number) { |
2648 Label done; | 2587 Label done; |
2649 DCHECK(!result.is(object)); | 2588 DCHECK(!result.is(object)); |
2650 | 2589 |
2651 UntagAndJumpIfSmi(result, object, &done); | 2590 UntagAndJumpIfSmi(result, object, &done); |
2652 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 2591 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
2653 TruncateHeapNumberToI(result, object); | 2592 TruncateHeapNumberToI(result, object); |
2654 | 2593 |
2655 bind(&done); | 2594 bind(&done); |
2656 } | 2595 } |
2657 | 2596 |
2658 | 2597 |
2659 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2598 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
2660 Register src, | 2599 Register src, |
2661 int num_least_bits) { | 2600 int num_least_bits) { |
2662 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 2601 #if V8_TARGET_ARCH_PPC64 |
2663 ubfx(dst, src, kSmiTagSize, num_least_bits); | 2602 rldicl(dst, src, kBitsPerPointer - kSmiShift, |
2664 } else { | 2603 kBitsPerPointer - num_least_bits); |
2665 SmiUntag(dst, src); | 2604 #else |
2666 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 2605 rlwinm(dst, src, kBitsPerPointer - kSmiShift, |
2667 } | 2606 kBitsPerPointer - num_least_bits, 31); |
| 2607 #endif |
2668 } | 2608 } |
2669 | 2609 |
2670 | 2610 |
2671 void MacroAssembler::GetLeastBitsFromInt32(Register dst, | 2611 void MacroAssembler::GetLeastBitsFromInt32(Register dst, |
2672 Register src, | 2612 Register src, |
2673 int num_least_bits) { | 2613 int num_least_bits) { |
2674 and_(dst, src, Operand((1 << num_least_bits) - 1)); | 2614 rlwinm(dst, src, 0, 32 - num_least_bits, 31); |
2675 } | 2615 } |
2676 | 2616 |
2677 | 2617 |
2678 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 2618 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
2679 int num_arguments, | 2619 int num_arguments, |
2680 SaveFPRegsMode save_doubles) { | 2620 SaveFPRegsMode save_doubles) { |
2681 // All parameters are on the stack. r0 has the return value after call. | 2621 // All parameters are on the stack. r3 has the return value after call. |
2682 | 2622 |
2683 // If the expected number of arguments of the runtime function is | 2623 // If the expected number of arguments of the runtime function is |
2684 // constant, we check that the actual number of arguments match the | 2624 // constant, we check that the actual number of arguments match the |
2685 // expectation. | 2625 // expectation. |
2686 CHECK(f->nargs < 0 || f->nargs == num_arguments); | 2626 CHECK(f->nargs < 0 || f->nargs == num_arguments); |
2687 | 2627 |
2688 // TODO(1236192): Most runtime routines don't need the number of | 2628 // TODO(1236192): Most runtime routines don't need the number of |
2689 // arguments passed in because it is constant. At some point we | 2629 // arguments passed in because it is constant. At some point we |
2690 // should remove this need and make the runtime routine entry code | 2630 // should remove this need and make the runtime routine entry code |
2691 // smarter. | 2631 // smarter. |
2692 mov(r0, Operand(num_arguments)); | 2632 mov(r3, Operand(num_arguments)); |
2693 mov(r1, Operand(ExternalReference(f, isolate()))); | 2633 mov(r4, Operand(ExternalReference(f, isolate()))); |
2694 CEntryStub stub(isolate(), 1, save_doubles); | 2634 CEntryStub stub(isolate(), |
| 2635 #if V8_TARGET_ARCH_PPC64 |
| 2636 f->result_size, |
| 2637 #else |
| 2638 1, |
| 2639 #endif |
| 2640 save_doubles); |
2695 CallStub(&stub); | 2641 CallStub(&stub); |
2696 } | 2642 } |
2697 | 2643 |
2698 | 2644 |
2699 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | 2645 void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
2700 int num_arguments) { | 2646 int num_arguments) { |
2701 mov(r0, Operand(num_arguments)); | 2647 mov(r3, Operand(num_arguments)); |
2702 mov(r1, Operand(ext)); | 2648 mov(r4, Operand(ext)); |
2703 | 2649 |
2704 CEntryStub stub(isolate(), 1); | 2650 CEntryStub stub(isolate(), 1); |
2705 CallStub(&stub); | 2651 CallStub(&stub); |
2706 } | 2652 } |
2707 | 2653 |
2708 | 2654 |
2709 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, | 2655 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, |
2710 int num_arguments, | 2656 int num_arguments, |
2711 int result_size) { | 2657 int result_size) { |
2712 // TODO(1236192): Most runtime routines don't need the number of | 2658 // TODO(1236192): Most runtime routines don't need the number of |
2713 // arguments passed in because it is constant. At some point we | 2659 // arguments passed in because it is constant. At some point we |
2714 // should remove this need and make the runtime routine entry code | 2660 // should remove this need and make the runtime routine entry code |
2715 // smarter. | 2661 // smarter. |
2716 mov(r0, Operand(num_arguments)); | 2662 mov(r3, Operand(num_arguments)); |
2717 JumpToExternalReference(ext); | 2663 JumpToExternalReference(ext); |
2718 } | 2664 } |
2719 | 2665 |
2720 | 2666 |
2721 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, | 2667 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, |
2722 int num_arguments, | 2668 int num_arguments, |
2723 int result_size) { | 2669 int result_size) { |
2724 TailCallExternalReference(ExternalReference(fid, isolate()), | 2670 TailCallExternalReference(ExternalReference(fid, isolate()), |
2725 num_arguments, | 2671 num_arguments, |
2726 result_size); | 2672 result_size); |
2727 } | 2673 } |
2728 | 2674 |
2729 | 2675 |
2730 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { | 2676 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { |
2731 #if defined(__thumb__) | 2677 mov(r4, Operand(builtin)); |
2732 // Thumb mode builtin. | |
2733 DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); | |
2734 #endif | |
2735 mov(r1, Operand(builtin)); | |
2736 CEntryStub stub(isolate(), 1); | 2678 CEntryStub stub(isolate(), 1); |
2737 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 2679 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
2738 } | 2680 } |
2739 | 2681 |
2740 | 2682 |
2741 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | 2683 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
2742 InvokeFlag flag, | 2684 InvokeFlag flag, |
2743 const CallWrapper& call_wrapper) { | 2685 const CallWrapper& call_wrapper) { |
2744 // You can't call a builtin without a valid frame. | 2686 // You can't call a builtin without a valid frame. |
2745 DCHECK(flag == JUMP_FUNCTION || has_frame()); | 2687 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
2746 | 2688 |
2747 GetBuiltinEntry(r2, id); | 2689 GetBuiltinEntry(r5, id); |
2748 if (flag == CALL_FUNCTION) { | 2690 if (flag == CALL_FUNCTION) { |
2749 call_wrapper.BeforeCall(CallSize(r2)); | 2691 call_wrapper.BeforeCall(CallSize(r5)); |
2750 Call(r2); | 2692 Call(r5); |
2751 call_wrapper.AfterCall(); | 2693 call_wrapper.AfterCall(); |
2752 } else { | 2694 } else { |
2753 DCHECK(flag == JUMP_FUNCTION); | 2695 DCHECK(flag == JUMP_FUNCTION); |
2754 Jump(r2); | 2696 Jump(r5); |
2755 } | 2697 } |
2756 } | 2698 } |
2757 | 2699 |
2758 | 2700 |
2759 void MacroAssembler::GetBuiltinFunction(Register target, | 2701 void MacroAssembler::GetBuiltinFunction(Register target, |
2760 Builtins::JavaScript id) { | 2702 Builtins::JavaScript id) { |
2761 // Load the builtins object into target register. | 2703 // Load the builtins object into target register. |
2762 ldr(target, | 2704 LoadP(target, |
2763 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2705 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2764 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); | 2706 LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); |
2765 // Load the JavaScript builtin function from the builtins object. | 2707 // Load the JavaScript builtin function from the builtins object. |
2766 ldr(target, FieldMemOperand(target, | 2708 LoadP(target, |
2767 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | 2709 FieldMemOperand(target, |
| 2710 JSBuiltinsObject::OffsetOfFunctionWithId(id)), r0); |
2768 } | 2711 } |
2769 | 2712 |
2770 | 2713 |
2771 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | 2714 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
2772 DCHECK(!target.is(r1)); | 2715 DCHECK(!target.is(r4)); |
2773 GetBuiltinFunction(r1, id); | 2716 GetBuiltinFunction(r4, id); |
2774 // Load the code entry point from the builtins object. | 2717 // Load the code entry point from the builtins object. |
2775 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 2718 LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
2776 } | 2719 } |
2777 | 2720 |
2778 | 2721 |
2779 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | 2722 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
2780 Register scratch1, Register scratch2) { | 2723 Register scratch1, Register scratch2) { |
2781 if (FLAG_native_code_counters && counter->Enabled()) { | 2724 if (FLAG_native_code_counters && counter->Enabled()) { |
2782 mov(scratch1, Operand(value)); | 2725 mov(scratch1, Operand(value)); |
2783 mov(scratch2, Operand(ExternalReference(counter))); | 2726 mov(scratch2, Operand(ExternalReference(counter))); |
2784 str(scratch1, MemOperand(scratch2)); | 2727 stw(scratch1, MemOperand(scratch2)); |
2785 } | 2728 } |
2786 } | 2729 } |
2787 | 2730 |
2788 | 2731 |
2789 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, | 2732 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
2790 Register scratch1, Register scratch2) { | 2733 Register scratch1, Register scratch2) { |
2791 DCHECK(value > 0); | 2734 DCHECK(value > 0); |
2792 if (FLAG_native_code_counters && counter->Enabled()) { | 2735 if (FLAG_native_code_counters && counter->Enabled()) { |
2793 mov(scratch2, Operand(ExternalReference(counter))); | 2736 mov(scratch2, Operand(ExternalReference(counter))); |
2794 ldr(scratch1, MemOperand(scratch2)); | 2737 lwz(scratch1, MemOperand(scratch2)); |
2795 add(scratch1, scratch1, Operand(value)); | 2738 addi(scratch1, scratch1, Operand(value)); |
2796 str(scratch1, MemOperand(scratch2)); | 2739 stw(scratch1, MemOperand(scratch2)); |
2797 } | 2740 } |
2798 } | 2741 } |
2799 | 2742 |
2800 | 2743 |
2801 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | 2744 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
2802 Register scratch1, Register scratch2) { | 2745 Register scratch1, Register scratch2) { |
2803 DCHECK(value > 0); | 2746 DCHECK(value > 0); |
2804 if (FLAG_native_code_counters && counter->Enabled()) { | 2747 if (FLAG_native_code_counters && counter->Enabled()) { |
2805 mov(scratch2, Operand(ExternalReference(counter))); | 2748 mov(scratch2, Operand(ExternalReference(counter))); |
2806 ldr(scratch1, MemOperand(scratch2)); | 2749 lwz(scratch1, MemOperand(scratch2)); |
2807 sub(scratch1, scratch1, Operand(value)); | 2750 subi(scratch1, scratch1, Operand(value)); |
2808 str(scratch1, MemOperand(scratch2)); | 2751 stw(scratch1, MemOperand(scratch2)); |
2809 } | 2752 } |
2810 } | 2753 } |
2811 | 2754 |
2812 | 2755 |
2813 void MacroAssembler::Assert(Condition cond, BailoutReason reason) { | 2756 void MacroAssembler::Assert(Condition cond, BailoutReason reason, |
| 2757 CRegister cr) { |
2814 if (emit_debug_code()) | 2758 if (emit_debug_code()) |
2815 Check(cond, reason); | 2759 Check(cond, reason, cr); |
2816 } | 2760 } |
2817 | 2761 |
2818 | 2762 |
2819 void MacroAssembler::AssertFastElements(Register elements) { | 2763 void MacroAssembler::AssertFastElements(Register elements) { |
2820 if (emit_debug_code()) { | 2764 if (emit_debug_code()) { |
2821 DCHECK(!elements.is(ip)); | 2765 DCHECK(!elements.is(ip)); |
2822 Label ok; | 2766 Label ok; |
2823 push(elements); | 2767 push(elements); |
2824 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); | 2768 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); |
2825 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 2769 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
2826 cmp(elements, ip); | 2770 cmp(elements, ip); |
2827 b(eq, &ok); | 2771 beq(&ok); |
2828 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); | 2772 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); |
2829 cmp(elements, ip); | 2773 cmp(elements, ip); |
2830 b(eq, &ok); | 2774 beq(&ok); |
2831 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); | 2775 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); |
2832 cmp(elements, ip); | 2776 cmp(elements, ip); |
2833 b(eq, &ok); | 2777 beq(&ok); |
2834 Abort(kJSObjectWithFastElementsMapHasSlowElements); | 2778 Abort(kJSObjectWithFastElementsMapHasSlowElements); |
2835 bind(&ok); | 2779 bind(&ok); |
2836 pop(elements); | 2780 pop(elements); |
2837 } | 2781 } |
2838 } | 2782 } |
2839 | 2783 |
2840 | 2784 |
2841 void MacroAssembler::Check(Condition cond, BailoutReason reason) { | 2785 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) { |
2842 Label L; | 2786 Label L; |
2843 b(cond, &L); | 2787 b(cond, &L, cr); |
2844 Abort(reason); | 2788 Abort(reason); |
2845 // will not return here | 2789 // will not return here |
2846 bind(&L); | 2790 bind(&L); |
2847 } | 2791 } |
2848 | 2792 |
2849 | 2793 |
2850 void MacroAssembler::Abort(BailoutReason reason) { | 2794 void MacroAssembler::Abort(BailoutReason reason) { |
2851 Label abort_start; | 2795 Label abort_start; |
2852 bind(&abort_start); | 2796 bind(&abort_start); |
2853 #ifdef DEBUG | 2797 #ifdef DEBUG |
2854 const char* msg = GetBailoutReason(reason); | 2798 const char* msg = GetBailoutReason(reason); |
2855 if (msg != NULL) { | 2799 if (msg != NULL) { |
2856 RecordComment("Abort message: "); | 2800 RecordComment("Abort message: "); |
2857 RecordComment(msg); | 2801 RecordComment(msg); |
2858 } | 2802 } |
2859 | 2803 |
2860 if (FLAG_trap_on_abort) { | 2804 if (FLAG_trap_on_abort) { |
2861 stop(msg); | 2805 stop(msg); |
2862 return; | 2806 return; |
2863 } | 2807 } |
2864 #endif | 2808 #endif |
2865 | 2809 |
2866 mov(r0, Operand(Smi::FromInt(reason))); | 2810 LoadSmiLiteral(r0, Smi::FromInt(reason)); |
2867 push(r0); | 2811 push(r0); |
2868 | |
2869 // Disable stub call restrictions to always allow calls to abort. | 2812 // Disable stub call restrictions to always allow calls to abort. |
2870 if (!has_frame_) { | 2813 if (!has_frame_) { |
2871 // We don't actually want to generate a pile of code for this, so just | 2814 // We don't actually want to generate a pile of code for this, so just |
2872 // claim there is a stack frame, without generating one. | 2815 // claim there is a stack frame, without generating one. |
2873 FrameScope scope(this, StackFrame::NONE); | 2816 FrameScope scope(this, StackFrame::NONE); |
2874 CallRuntime(Runtime::kAbort, 1); | 2817 CallRuntime(Runtime::kAbort, 1); |
2875 } else { | 2818 } else { |
2876 CallRuntime(Runtime::kAbort, 1); | 2819 CallRuntime(Runtime::kAbort, 1); |
2877 } | 2820 } |
2878 // will not return here | 2821 // will not return here |
2879 if (is_const_pool_blocked()) { | |
2880 // If the calling code cares about the exact number of | |
2881 // instructions generated, we insert padding here to keep the size | |
2882 // of the Abort macro constant. | |
2883 static const int kExpectedAbortInstructions = 7; | |
2884 int abort_instructions = InstructionsGeneratedSince(&abort_start); | |
2885 DCHECK(abort_instructions <= kExpectedAbortInstructions); | |
2886 while (abort_instructions++ < kExpectedAbortInstructions) { | |
2887 nop(); | |
2888 } | |
2889 } | |
2890 } | 2822 } |
2891 | 2823 |
2892 | 2824 |
2893 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 2825 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
2894 if (context_chain_length > 0) { | 2826 if (context_chain_length > 0) { |
2895 // Move up the chain of contexts to the context containing the slot. | 2827 // Move up the chain of contexts to the context containing the slot. |
2896 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 2828 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
2897 for (int i = 1; i < context_chain_length; i++) { | 2829 for (int i = 1; i < context_chain_length; i++) { |
2898 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 2830 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
2899 } | 2831 } |
2900 } else { | 2832 } else { |
2901 // Slot is in the current function context. Move it into the | 2833 // Slot is in the current function context. Move it into the |
2902 // destination register in case we store into it (the write barrier | 2834 // destination register in case we store into it (the write barrier |
2903 // cannot be allowed to destroy the context in esi). | 2835 // cannot be allowed to destroy the context in esi). |
2904 mov(dst, cp); | 2836 mr(dst, cp); |
2905 } | 2837 } |
2906 } | 2838 } |
2907 | 2839 |
2908 | 2840 |
2909 void MacroAssembler::LoadTransitionedArrayMapConditional( | 2841 void MacroAssembler::LoadTransitionedArrayMapConditional( |
2910 ElementsKind expected_kind, | 2842 ElementsKind expected_kind, |
2911 ElementsKind transitioned_kind, | 2843 ElementsKind transitioned_kind, |
2912 Register map_in_out, | 2844 Register map_in_out, |
2913 Register scratch, | 2845 Register scratch, |
2914 Label* no_map_match) { | 2846 Label* no_map_match) { |
2915 // Load the global or builtins object from the current context. | 2847 // Load the global or builtins object from the current context. |
2916 ldr(scratch, | 2848 LoadP(scratch, |
2917 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2849 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2918 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 2850 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
2919 | 2851 |
2920 // Check that the function's map is the same as the expected cached map. | 2852 // Check that the function's map is the same as the expected cached map. |
2921 ldr(scratch, | 2853 LoadP(scratch, |
2922 MemOperand(scratch, | 2854 MemOperand(scratch, |
2923 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); | 2855 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); |
2924 size_t offset = expected_kind * kPointerSize + | 2856 size_t offset = expected_kind * kPointerSize + |
2925 FixedArrayBase::kHeaderSize; | 2857 FixedArrayBase::kHeaderSize; |
2926 ldr(ip, FieldMemOperand(scratch, offset)); | 2858 LoadP(ip, FieldMemOperand(scratch, offset)); |
2927 cmp(map_in_out, ip); | 2859 cmp(map_in_out, ip); |
2928 b(ne, no_map_match); | 2860 bne(no_map_match); |
2929 | 2861 |
2930 // Use the transitioned cached map. | 2862 // Use the transitioned cached map. |
2931 offset = transitioned_kind * kPointerSize + | 2863 offset = transitioned_kind * kPointerSize + |
2932 FixedArrayBase::kHeaderSize; | 2864 FixedArrayBase::kHeaderSize; |
2933 ldr(map_in_out, FieldMemOperand(scratch, offset)); | 2865 LoadP(map_in_out, FieldMemOperand(scratch, offset)); |
2934 } | 2866 } |
2935 | 2867 |
2936 | 2868 |
2937 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | 2869 void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
2938 // Load the global or builtins object from the current context. | 2870 // Load the global or builtins object from the current context. |
2939 ldr(function, | 2871 LoadP(function, |
2940 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2872 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2941 // Load the native context from the global or builtins object. | 2873 // Load the native context from the global or builtins object. |
2942 ldr(function, FieldMemOperand(function, | 2874 LoadP(function, FieldMemOperand(function, |
2943 GlobalObject::kNativeContextOffset)); | 2875 GlobalObject::kNativeContextOffset)); |
2944 // Load the function from the native context. | 2876 // Load the function from the native context. |
2945 ldr(function, MemOperand(function, Context::SlotOffset(index))); | 2877 LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0); |
2946 } | 2878 } |
2947 | 2879 |
2948 | 2880 |
2949 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, | 2881 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, |
2950 Register map, | 2882 Register map, |
2951 Register scratch) { | 2883 Register scratch) { |
2952 // Load the initial map. The global functions all have initial maps. | 2884 // Load the initial map. The global functions all have initial maps. |
2953 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2885 LoadP(map, |
| 2886 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
2954 if (emit_debug_code()) { | 2887 if (emit_debug_code()) { |
2955 Label ok, fail; | 2888 Label ok, fail; |
2956 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); | 2889 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); |
2957 b(&ok); | 2890 b(&ok); |
2958 bind(&fail); | 2891 bind(&fail); |
2959 Abort(kGlobalFunctionsMustHaveInitialMap); | 2892 Abort(kGlobalFunctionsMustHaveInitialMap); |
2960 bind(&ok); | 2893 bind(&ok); |
2961 } | 2894 } |
2962 } | 2895 } |
2963 | 2896 |
2964 | 2897 |
2965 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( | 2898 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( |
2966 Register reg, | 2899 Register reg, |
2967 Register scratch, | 2900 Register scratch, |
2968 Label* not_power_of_two_or_zero) { | 2901 Label* not_power_of_two_or_zero) { |
2969 sub(scratch, reg, Operand(1), SetCC); | 2902 subi(scratch, reg, Operand(1)); |
2970 b(mi, not_power_of_two_or_zero); | 2903 cmpi(scratch, Operand::Zero()); |
2971 tst(scratch, reg); | 2904 blt(not_power_of_two_or_zero); |
2972 b(ne, not_power_of_two_or_zero); | 2905 and_(r0, scratch, reg, SetRC); |
| 2906 bne(not_power_of_two_or_zero, cr0); |
2973 } | 2907 } |
2974 | 2908 |
2975 | 2909 |
2976 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg( | 2910 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg( |
2977 Register reg, | 2911 Register reg, |
2978 Register scratch, | 2912 Register scratch, |
2979 Label* zero_and_neg, | 2913 Label* zero_and_neg, |
2980 Label* not_power_of_two) { | 2914 Label* not_power_of_two) { |
2981 sub(scratch, reg, Operand(1), SetCC); | 2915 subi(scratch, reg, Operand(1)); |
2982 b(mi, zero_and_neg); | 2916 cmpi(scratch, Operand::Zero()); |
2983 tst(scratch, reg); | 2917 blt(zero_and_neg); |
2984 b(ne, not_power_of_two); | 2918 and_(r0, scratch, reg, SetRC); |
| 2919 bne(not_power_of_two, cr0); |
2985 } | 2920 } |
2986 | 2921 |
| 2922 #if !V8_TARGET_ARCH_PPC64 |
| 2923 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { |
| 2924 DCHECK(!reg.is(overflow)); |
| 2925 mr(overflow, reg); // Save original value. |
| 2926 SmiTag(reg); |
| 2927 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0. |
| 2928 } |
| 2929 |
| 2930 |
| 2931 void MacroAssembler::SmiTagCheckOverflow(Register dst, |
| 2932 Register src, |
| 2933 Register overflow) { |
| 2934 if (dst.is(src)) { |
| 2935 // Fall back to slower case. |
| 2936 SmiTagCheckOverflow(dst, overflow); |
| 2937 } else { |
| 2938 DCHECK(!dst.is(src)); |
| 2939 DCHECK(!dst.is(overflow)); |
| 2940 DCHECK(!src.is(overflow)); |
| 2941 SmiTag(dst, src); |
| 2942 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0. |
| 2943 } |
| 2944 } |
| 2945 #endif |
2987 | 2946 |
2988 void MacroAssembler::JumpIfNotBothSmi(Register reg1, | 2947 void MacroAssembler::JumpIfNotBothSmi(Register reg1, |
2989 Register reg2, | 2948 Register reg2, |
2990 Label* on_not_both_smi) { | 2949 Label* on_not_both_smi) { |
2991 STATIC_ASSERT(kSmiTag == 0); | 2950 STATIC_ASSERT(kSmiTag == 0); |
2992 tst(reg1, Operand(kSmiTagMask)); | 2951 DCHECK_EQ(1, static_cast<int>(kSmiTagMask)); |
2993 tst(reg2, Operand(kSmiTagMask), eq); | 2952 orx(r0, reg1, reg2, LeaveRC); |
2994 b(ne, on_not_both_smi); | 2953 JumpIfNotSmi(r0, on_not_both_smi); |
2995 } | 2954 } |
2996 | 2955 |
2997 | 2956 |
2998 void MacroAssembler::UntagAndJumpIfSmi( | 2957 void MacroAssembler::UntagAndJumpIfSmi( |
2999 Register dst, Register src, Label* smi_case) { | 2958 Register dst, Register src, Label* smi_case) { |
3000 STATIC_ASSERT(kSmiTag == 0); | 2959 STATIC_ASSERT(kSmiTag == 0); |
3001 SmiUntag(dst, src, SetCC); | 2960 STATIC_ASSERT(kSmiTagSize == 1); |
3002 b(cc, smi_case); // Shifter carry is not set for a smi. | 2961 TestBit(src, 0, r0); |
| 2962 SmiUntag(dst, src); |
| 2963 beq(smi_case, cr0); |
3003 } | 2964 } |
3004 | 2965 |
3005 | 2966 |
3006 void MacroAssembler::UntagAndJumpIfNotSmi( | 2967 void MacroAssembler::UntagAndJumpIfNotSmi( |
3007 Register dst, Register src, Label* non_smi_case) { | 2968 Register dst, Register src, Label* non_smi_case) { |
3008 STATIC_ASSERT(kSmiTag == 0); | 2969 STATIC_ASSERT(kSmiTag == 0); |
3009 SmiUntag(dst, src, SetCC); | 2970 STATIC_ASSERT(kSmiTagSize == 1); |
3010 b(cs, non_smi_case); // Shifter carry is set for a non-smi. | 2971 TestBit(src, 0, r0); |
| 2972 SmiUntag(dst, src); |
| 2973 bne(non_smi_case, cr0); |
3011 } | 2974 } |
3012 | 2975 |
3013 | 2976 |
3014 void MacroAssembler::JumpIfEitherSmi(Register reg1, | 2977 void MacroAssembler::JumpIfEitherSmi(Register reg1, |
3015 Register reg2, | 2978 Register reg2, |
3016 Label* on_either_smi) { | 2979 Label* on_either_smi) { |
3017 STATIC_ASSERT(kSmiTag == 0); | 2980 STATIC_ASSERT(kSmiTag == 0); |
3018 tst(reg1, Operand(kSmiTagMask)); | 2981 JumpIfSmi(reg1, on_either_smi); |
3019 tst(reg2, Operand(kSmiTagMask), ne); | 2982 JumpIfSmi(reg2, on_either_smi); |
3020 b(eq, on_either_smi); | |
3021 } | 2983 } |
3022 | 2984 |
3023 | 2985 |
3024 void MacroAssembler::AssertNotSmi(Register object) { | 2986 void MacroAssembler::AssertNotSmi(Register object) { |
3025 if (emit_debug_code()) { | 2987 if (emit_debug_code()) { |
3026 STATIC_ASSERT(kSmiTag == 0); | 2988 STATIC_ASSERT(kSmiTag == 0); |
3027 tst(object, Operand(kSmiTagMask)); | 2989 TestIfSmi(object, r0); |
3028 Check(ne, kOperandIsASmi); | 2990 Check(ne, kOperandIsASmi, cr0); |
3029 } | 2991 } |
3030 } | 2992 } |
3031 | 2993 |
3032 | 2994 |
3033 void MacroAssembler::AssertSmi(Register object) { | 2995 void MacroAssembler::AssertSmi(Register object) { |
3034 if (emit_debug_code()) { | 2996 if (emit_debug_code()) { |
3035 STATIC_ASSERT(kSmiTag == 0); | 2997 STATIC_ASSERT(kSmiTag == 0); |
3036 tst(object, Operand(kSmiTagMask)); | 2998 TestIfSmi(object, r0); |
3037 Check(eq, kOperandIsNotSmi); | 2999 Check(eq, kOperandIsNotSmi, cr0); |
3038 } | 3000 } |
3039 } | 3001 } |
3040 | 3002 |
3041 | 3003 |
3042 void MacroAssembler::AssertString(Register object) { | 3004 void MacroAssembler::AssertString(Register object) { |
3043 if (emit_debug_code()) { | 3005 if (emit_debug_code()) { |
3044 STATIC_ASSERT(kSmiTag == 0); | 3006 STATIC_ASSERT(kSmiTag == 0); |
3045 tst(object, Operand(kSmiTagMask)); | 3007 TestIfSmi(object, r0); |
3046 Check(ne, kOperandIsASmiAndNotAString); | 3008 Check(ne, kOperandIsASmiAndNotAString, cr0); |
3047 push(object); | 3009 push(object); |
3048 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 3010 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
3049 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); | 3011 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); |
3050 pop(object); | 3012 pop(object); |
3051 Check(lo, kOperandIsNotAString); | 3013 Check(lt, kOperandIsNotAString); |
3052 } | 3014 } |
3053 } | 3015 } |
3054 | 3016 |
3055 | 3017 |
3056 void MacroAssembler::AssertName(Register object) { | 3018 void MacroAssembler::AssertName(Register object) { |
3057 if (emit_debug_code()) { | 3019 if (emit_debug_code()) { |
3058 STATIC_ASSERT(kSmiTag == 0); | 3020 STATIC_ASSERT(kSmiTag == 0); |
3059 tst(object, Operand(kSmiTagMask)); | 3021 TestIfSmi(object, r0); |
3060 Check(ne, kOperandIsASmiAndNotAName); | 3022 Check(ne, kOperandIsASmiAndNotAName, cr0); |
3061 push(object); | 3023 push(object); |
3062 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 3024 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
3063 CompareInstanceType(object, object, LAST_NAME_TYPE); | 3025 CompareInstanceType(object, object, LAST_NAME_TYPE); |
3064 pop(object); | 3026 pop(object); |
3065 Check(le, kOperandIsNotAName); | 3027 Check(le, kOperandIsNotAName); |
3066 } | 3028 } |
3067 } | 3029 } |
3068 | 3030 |
3069 | 3031 |
3070 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, | 3032 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, |
3071 Register scratch) { | 3033 Register scratch) { |
3072 if (emit_debug_code()) { | 3034 if (emit_debug_code()) { |
3073 Label done_checking; | 3035 Label done_checking; |
3074 AssertNotSmi(object); | 3036 AssertNotSmi(object); |
3075 CompareRoot(object, Heap::kUndefinedValueRootIndex); | 3037 CompareRoot(object, Heap::kUndefinedValueRootIndex); |
3076 b(eq, &done_checking); | 3038 beq(&done_checking); |
3077 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 3039 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
3078 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex); | 3040 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex); |
3079 Assert(eq, kExpectedUndefinedOrCell); | 3041 Assert(eq, kExpectedUndefinedOrCell); |
3080 bind(&done_checking); | 3042 bind(&done_checking); |
3081 } | 3043 } |
3082 } | 3044 } |
3083 | 3045 |
3084 | 3046 |
3085 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { | 3047 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { |
3086 if (emit_debug_code()) { | 3048 if (emit_debug_code()) { |
3087 CompareRoot(reg, index); | 3049 CompareRoot(reg, index); |
3088 Check(eq, kHeapNumberMapRegisterClobbered); | 3050 Check(eq, kHeapNumberMapRegisterClobbered); |
3089 } | 3051 } |
3090 } | 3052 } |
3091 | 3053 |
3092 | 3054 |
3093 void MacroAssembler::JumpIfNotHeapNumber(Register object, | 3055 void MacroAssembler::JumpIfNotHeapNumber(Register object, |
3094 Register heap_number_map, | 3056 Register heap_number_map, |
3095 Register scratch, | 3057 Register scratch, |
3096 Label* on_not_heap_number) { | 3058 Label* on_not_heap_number) { |
3097 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 3059 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
3098 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 3060 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
3099 cmp(scratch, heap_number_map); | 3061 cmp(scratch, heap_number_map); |
3100 b(ne, on_not_heap_number); | 3062 bne(on_not_heap_number); |
3101 } | 3063 } |
3102 | 3064 |
3103 | 3065 |
3104 void MacroAssembler::LookupNumberStringCache(Register object, | 3066 void MacroAssembler::LookupNumberStringCache(Register object, |
3105 Register result, | 3067 Register result, |
3106 Register scratch1, | 3068 Register scratch1, |
3107 Register scratch2, | 3069 Register scratch2, |
3108 Register scratch3, | 3070 Register scratch3, |
3109 Label* not_found) { | 3071 Label* not_found) { |
3110 // Use of registers. Register result is used as a temporary. | 3072 // Use of registers. Register result is used as a temporary. |
3111 Register number_string_cache = result; | 3073 Register number_string_cache = result; |
3112 Register mask = scratch3; | 3074 Register mask = scratch3; |
3113 | 3075 |
3114 // Load the number string cache. | 3076 // Load the number string cache. |
3115 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | 3077 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
3116 | 3078 |
3117 // Make the hash mask from the length of the number string cache. It | 3079 // Make the hash mask from the length of the number string cache. It |
3118 // contains two elements (number and string) for each cache entry. | 3080 // contains two elements (number and string) for each cache entry. |
3119 ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); | 3081 LoadP(mask, FieldMemOperand(number_string_cache, |
| 3082 FixedArray::kLengthOffset)); |
3120 // Divide length by two (length is a smi). | 3083 // Divide length by two (length is a smi). |
3121 mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); | 3084 ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1); |
3122 sub(mask, mask, Operand(1)); // Make mask. | 3085 subi(mask, mask, Operand(1)); // Make mask. |
3123 | 3086 |
3124 // Calculate the entry in the number string cache. The hash value in the | 3087 // Calculate the entry in the number string cache. The hash value in the |
3125 // number string cache for smis is just the smi value, and the hash for | 3088 // number string cache for smis is just the smi value, and the hash for |
3126 // doubles is the xor of the upper and lower words. See | 3089 // doubles is the xor of the upper and lower words. See |
3127 // Heap::GetNumberStringCache. | 3090 // Heap::GetNumberStringCache. |
3128 Label is_smi; | 3091 Label is_smi; |
3129 Label load_result_from_cache; | 3092 Label load_result_from_cache; |
3130 JumpIfSmi(object, &is_smi); | 3093 JumpIfSmi(object, &is_smi); |
3131 CheckMap(object, | 3094 CheckMap(object, |
3132 scratch1, | 3095 scratch1, |
3133 Heap::kHeapNumberMapRootIndex, | 3096 Heap::kHeapNumberMapRootIndex, |
3134 not_found, | 3097 not_found, |
3135 DONT_DO_SMI_CHECK); | 3098 DONT_DO_SMI_CHECK); |
3136 | 3099 |
3137 STATIC_ASSERT(8 == kDoubleSize); | 3100 STATIC_ASSERT(8 == kDoubleSize); |
3138 add(scratch1, | 3101 lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
3139 object, | 3102 lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
3140 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 3103 xor_(scratch1, scratch1, scratch2); |
3141 ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); | 3104 and_(scratch1, scratch1, mask); |
3142 eor(scratch1, scratch1, Operand(scratch2)); | |
3143 and_(scratch1, scratch1, Operand(mask)); | |
3144 | 3105 |
3145 // Calculate address of entry in string cache: each entry consists | 3106 // Calculate address of entry in string cache: each entry consists |
3146 // of two pointer sized fields. | 3107 // of two pointer sized fields. |
3147 add(scratch1, | 3108 ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1)); |
3148 number_string_cache, | 3109 add(scratch1, number_string_cache, scratch1); |
3149 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | |
3150 | 3110 |
3151 Register probe = mask; | 3111 Register probe = mask; |
3152 ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 3112 LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
3153 JumpIfSmi(probe, not_found); | 3113 JumpIfSmi(probe, not_found); |
3154 sub(scratch2, object, Operand(kHeapObjectTag)); | 3114 lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset)); |
3155 vldr(d0, scratch2, HeapNumber::kValueOffset); | 3115 lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset)); |
3156 sub(probe, probe, Operand(kHeapObjectTag)); | 3116 fcmpu(d0, d1); |
3157 vldr(d1, probe, HeapNumber::kValueOffset); | 3117 bne(not_found); // The cache did not contain this value. |
3158 VFPCompareAndSetFlags(d0, d1); | |
3159 b(ne, not_found); // The cache did not contain this value. | |
3160 b(&load_result_from_cache); | 3118 b(&load_result_from_cache); |
3161 | 3119 |
3162 bind(&is_smi); | 3120 bind(&is_smi); |
3163 Register scratch = scratch1; | 3121 Register scratch = scratch1; |
3164 and_(scratch, mask, Operand(object, ASR, 1)); | 3122 SmiUntag(scratch, object); |
| 3123 and_(scratch, mask, scratch); |
3165 // Calculate address of entry in string cache: each entry consists | 3124 // Calculate address of entry in string cache: each entry consists |
3166 // of two pointer sized fields. | 3125 // of two pointer sized fields. |
3167 add(scratch, | 3126 ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1)); |
3168 number_string_cache, | 3127 add(scratch, number_string_cache, scratch); |
3169 Operand(scratch, LSL, kPointerSizeLog2 + 1)); | |
3170 | 3128 |
3171 // Check if the entry is the smi we are looking for. | 3129 // Check if the entry is the smi we are looking for. |
3172 ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | 3130 LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
3173 cmp(object, probe); | 3131 cmp(object, probe); |
3174 b(ne, not_found); | 3132 bne(not_found); |
3175 | 3133 |
3176 // Get the result from the cache. | 3134 // Get the result from the cache. |
3177 bind(&load_result_from_cache); | 3135 bind(&load_result_from_cache); |
3178 ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); | 3136 LoadP(result, |
| 3137 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); |
3179 IncrementCounter(isolate()->counters()->number_to_string_native(), | 3138 IncrementCounter(isolate()->counters()->number_to_string_native(), |
3180 1, | 3139 1, |
3181 scratch1, | 3140 scratch1, |
3182 scratch2); | 3141 scratch2); |
3183 } | 3142 } |
3184 | 3143 |
3185 | 3144 |
3186 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( | 3145 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( |
3187 Register first, | 3146 Register first, |
3188 Register second, | 3147 Register second, |
3189 Register scratch1, | 3148 Register scratch1, |
3190 Register scratch2, | 3149 Register scratch2, |
3191 Label* failure) { | 3150 Label* failure) { |
3192 // Test that both first and second are sequential ASCII strings. | 3151 // Test that both first and second are sequential ASCII strings. |
3193 // Assume that they are non-smis. | 3152 // Assume that they are non-smis. |
3194 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); | 3153 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); |
3195 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); | 3154 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); |
3196 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | 3155 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
3197 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); | 3156 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); |
3198 | 3157 |
3199 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, | 3158 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, |
3200 scratch2, | 3159 scratch2, |
3201 scratch1, | 3160 scratch1, |
3202 scratch2, | 3161 scratch2, |
3203 failure); | 3162 failure); |
3204 } | 3163 } |
3205 | 3164 |
3206 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, | 3165 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, |
3207 Register second, | 3166 Register second, |
3208 Register scratch1, | 3167 Register scratch1, |
3209 Register scratch2, | 3168 Register scratch2, |
3210 Label* failure) { | 3169 Label* failure) { |
3211 // Check that neither is a smi. | 3170 // Check that neither is a smi. |
3212 and_(scratch1, first, Operand(second)); | 3171 and_(scratch1, first, second); |
3213 JumpIfSmi(scratch1, failure); | 3172 JumpIfSmi(scratch1, failure); |
3214 JumpIfNonSmisNotBothSequentialAsciiStrings(first, | 3173 JumpIfNonSmisNotBothSequentialAsciiStrings(first, |
3215 second, | 3174 second, |
3216 scratch1, | 3175 scratch1, |
3217 scratch2, | 3176 scratch2, |
3218 failure); | 3177 failure); |
3219 } | 3178 } |
3220 | 3179 |
3221 | 3180 |
3222 void MacroAssembler::JumpIfNotUniqueName(Register reg, | 3181 void MacroAssembler::JumpIfNotUniqueName(Register reg, |
3223 Label* not_unique_name) { | 3182 Label* not_unique_name) { |
3224 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 3183 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
3225 Label succeed; | 3184 Label succeed; |
3226 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 3185 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
3227 b(eq, &succeed); | 3186 beq(&succeed, cr0); |
3228 cmp(reg, Operand(SYMBOL_TYPE)); | 3187 cmpi(reg, Operand(SYMBOL_TYPE)); |
3229 b(ne, not_unique_name); | 3188 bne(not_unique_name); |
3230 | 3189 |
3231 bind(&succeed); | 3190 bind(&succeed); |
3232 } | 3191 } |
3233 | 3192 |
3234 | 3193 |
3235 // Allocates a heap number or jumps to the need_gc label if the young space | 3194 // Allocates a heap number or jumps to the need_gc label if the young space |
3236 // is full and a scavenge is needed. | 3195 // is full and a scavenge is needed. |
3237 void MacroAssembler::AllocateHeapNumber(Register result, | 3196 void MacroAssembler::AllocateHeapNumber(Register result, |
3238 Register scratch1, | 3197 Register scratch1, |
3239 Register scratch2, | 3198 Register scratch2, |
3240 Register heap_number_map, | 3199 Register heap_number_map, |
3241 Label* gc_required, | 3200 Label* gc_required, |
3242 TaggingMode tagging_mode, | 3201 TaggingMode tagging_mode, |
3243 MutableMode mode) { | 3202 MutableMode mode) { |
3244 // Allocate an object in the heap for the heap number and tag it as a heap | 3203 // Allocate an object in the heap for the heap number and tag it as a heap |
3245 // object. | 3204 // object. |
3246 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, | 3205 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, |
3247 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); | 3206 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); |
3248 | 3207 |
3249 Heap::RootListIndex map_index = mode == MUTABLE | 3208 Heap::RootListIndex map_index = mode == MUTABLE |
3250 ? Heap::kMutableHeapNumberMapRootIndex | 3209 ? Heap::kMutableHeapNumberMapRootIndex |
3251 : Heap::kHeapNumberMapRootIndex; | 3210 : Heap::kHeapNumberMapRootIndex; |
3252 AssertIsRoot(heap_number_map, map_index); | 3211 AssertIsRoot(heap_number_map, map_index); |
3253 | 3212 |
3254 // Store heap number map in the allocated object. | 3213 // Store heap number map in the allocated object. |
3255 if (tagging_mode == TAG_RESULT) { | 3214 if (tagging_mode == TAG_RESULT) { |
3256 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); | 3215 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset), |
| 3216 r0); |
3257 } else { | 3217 } else { |
3258 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); | 3218 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); |
3259 } | 3219 } |
3260 } | 3220 } |
3261 | 3221 |
3262 | 3222 |
3263 void MacroAssembler::AllocateHeapNumberWithValue(Register result, | 3223 void MacroAssembler::AllocateHeapNumberWithValue(Register result, |
3264 DwVfpRegister value, | 3224 DoubleRegister value, |
3265 Register scratch1, | 3225 Register scratch1, |
3266 Register scratch2, | 3226 Register scratch2, |
3267 Register heap_number_map, | 3227 Register heap_number_map, |
3268 Label* gc_required) { | 3228 Label* gc_required) { |
3269 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); | 3229 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); |
3270 sub(scratch1, result, Operand(kHeapObjectTag)); | 3230 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset)); |
3271 vstr(value, scratch1, HeapNumber::kValueOffset); | |
3272 } | 3231 } |
3273 | 3232 |
3274 | 3233 |
3275 // Copies a fixed number of fields of heap objects from src to dst. | 3234 // Copies a fixed number of fields of heap objects from src to dst. |
3276 void MacroAssembler::CopyFields(Register dst, | 3235 void MacroAssembler::CopyFields(Register dst, |
3277 Register src, | 3236 Register src, |
3278 LowDwVfpRegister double_scratch, | 3237 RegList temps, |
3279 int field_count) { | 3238 int field_count) { |
3280 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize); | 3239 // At least one bit set in the first 15 registers. |
3281 for (int i = 0; i < double_count; i++) { | 3240 DCHECK((temps & ((1 << 15) - 1)) != 0); |
3282 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes)); | 3241 DCHECK((temps & dst.bit()) == 0); |
3283 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes)); | 3242 DCHECK((temps & src.bit()) == 0); |
| 3243 // Primitive implementation using only one temporary register. |
| 3244 |
| 3245 Register tmp = no_reg; |
| 3246 // Find a temp register in temps list. |
| 3247 for (int i = 0; i < 15; i++) { |
| 3248 if ((temps & (1 << i)) != 0) { |
| 3249 tmp.set_code(i); |
| 3250 break; |
| 3251 } |
3284 } | 3252 } |
| 3253 DCHECK(!tmp.is(no_reg)); |
3285 | 3254 |
3286 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize); | 3255 for (int i = 0; i < field_count; i++) { |
3287 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes); | 3256 LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0); |
3288 | 3257 StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0); |
3289 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize); | |
3290 if (remain != 0) { | |
3291 vldr(double_scratch.low(), | |
3292 FieldMemOperand(src, (field_count - 1) * kPointerSize)); | |
3293 vstr(double_scratch.low(), | |
3294 FieldMemOperand(dst, (field_count - 1) * kPointerSize)); | |
3295 } | 3258 } |
3296 } | 3259 } |
3297 | 3260 |
3298 | 3261 |
3299 void MacroAssembler::CopyBytes(Register src, | 3262 void MacroAssembler::CopyBytes(Register src, |
3300 Register dst, | 3263 Register dst, |
3301 Register length, | 3264 Register length, |
3302 Register scratch) { | 3265 Register scratch) { |
3303 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; | 3266 Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done; |
| 3267 |
| 3268 DCHECK(!scratch.is(r0)); |
| 3269 |
| 3270 cmpi(length, Operand::Zero()); |
| 3271 beq(&done); |
| 3272 |
| 3273 // Check src alignment and length to see whether word_loop is possible |
| 3274 andi(scratch, src, Operand(kPointerSize - 1)); |
| 3275 beq(&aligned, cr0); |
| 3276 subfic(scratch, scratch, Operand(kPointerSize * 2)); |
| 3277 cmp(length, scratch); |
| 3278 blt(&byte_loop); |
3304 | 3279 |
3305 // Align src before copying in word size chunks. | 3280 // Align src before copying in word size chunks. |
3306 cmp(length, Operand(kPointerSize)); | 3281 subi(scratch, scratch, Operand(kPointerSize)); |
3307 b(le, &byte_loop); | 3282 mtctr(scratch); |
| 3283 bind(&align_loop); |
| 3284 lbz(scratch, MemOperand(src)); |
| 3285 addi(src, src, Operand(1)); |
| 3286 subi(length, length, Operand(1)); |
| 3287 stb(scratch, MemOperand(dst)); |
| 3288 addi(dst, dst, Operand(1)); |
| 3289 bdnz(&align_loop); |
3308 | 3290 |
3309 bind(&align_loop_1); | 3291 bind(&aligned); |
3310 tst(src, Operand(kPointerSize - 1)); | 3292 |
3311 b(eq, &word_loop); | |
3312 ldrb(scratch, MemOperand(src, 1, PostIndex)); | |
3313 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3314 sub(length, length, Operand(1), SetCC); | |
3315 b(&align_loop_1); | |
3316 // Copy bytes in word size chunks. | 3293 // Copy bytes in word size chunks. |
| 3294 if (emit_debug_code()) { |
| 3295 andi(r0, src, Operand(kPointerSize - 1)); |
| 3296 Assert(eq, kExpectingAlignmentForCopyBytes, cr0); |
| 3297 } |
| 3298 |
| 3299 ShiftRightImm(scratch, length, Operand(kPointerSizeLog2)); |
| 3300 cmpi(scratch, Operand::Zero()); |
| 3301 beq(&byte_loop); |
| 3302 |
| 3303 mtctr(scratch); |
3317 bind(&word_loop); | 3304 bind(&word_loop); |
3318 if (emit_debug_code()) { | 3305 LoadP(scratch, MemOperand(src)); |
3319 tst(src, Operand(kPointerSize - 1)); | 3306 addi(src, src, Operand(kPointerSize)); |
3320 Assert(eq, kExpectingAlignmentForCopyBytes); | 3307 subi(length, length, Operand(kPointerSize)); |
| 3308 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { |
| 3309 // currently false for PPC - but possible future opt |
| 3310 StoreP(scratch, MemOperand(dst)); |
| 3311 addi(dst, dst, Operand(kPointerSize)); |
| 3312 } else { |
| 3313 #if V8_TARGET_LITTLE_ENDIAN |
| 3314 stb(scratch, MemOperand(dst, 0)); |
| 3315 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3316 stb(scratch, MemOperand(dst, 1)); |
| 3317 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3318 stb(scratch, MemOperand(dst, 2)); |
| 3319 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3320 stb(scratch, MemOperand(dst, 3)); |
| 3321 #if V8_TARGET_ARCH_PPC64 |
| 3322 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3323 stb(scratch, MemOperand(dst, 4)); |
| 3324 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3325 stb(scratch, MemOperand(dst, 5)); |
| 3326 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3327 stb(scratch, MemOperand(dst, 6)); |
| 3328 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3329 stb(scratch, MemOperand(dst, 7)); |
| 3330 #endif |
| 3331 #else |
| 3332 #if V8_TARGET_ARCH_PPC64 |
| 3333 stb(scratch, MemOperand(dst, 7)); |
| 3334 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3335 stb(scratch, MemOperand(dst, 6)); |
| 3336 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3337 stb(scratch, MemOperand(dst, 5)); |
| 3338 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3339 stb(scratch, MemOperand(dst, 4)); |
| 3340 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3341 #endif |
| 3342 stb(scratch, MemOperand(dst, 3)); |
| 3343 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3344 stb(scratch, MemOperand(dst, 2)); |
| 3345 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3346 stb(scratch, MemOperand(dst, 1)); |
| 3347 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3348 stb(scratch, MemOperand(dst, 0)); |
| 3349 #endif |
| 3350 addi(dst, dst, Operand(kPointerSize)); |
3321 } | 3351 } |
3322 cmp(length, Operand(kPointerSize)); | 3352 bdnz(&word_loop); |
3323 b(lt, &byte_loop); | |
3324 ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); | |
3325 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { | |
3326 str(scratch, MemOperand(dst, kPointerSize, PostIndex)); | |
3327 } else { | |
3328 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3329 mov(scratch, Operand(scratch, LSR, 8)); | |
3330 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3331 mov(scratch, Operand(scratch, LSR, 8)); | |
3332 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3333 mov(scratch, Operand(scratch, LSR, 8)); | |
3334 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3335 } | |
3336 sub(length, length, Operand(kPointerSize)); | |
3337 b(&word_loop); | |
3338 | 3353 |
3339 // Copy the last bytes if any left. | 3354 // Copy the last bytes if any left. |
| 3355 cmpi(length, Operand::Zero()); |
| 3356 beq(&done); |
| 3357 |
3340 bind(&byte_loop); | 3358 bind(&byte_loop); |
3341 cmp(length, Operand::Zero()); | 3359 mtctr(length); |
3342 b(eq, &done); | |
3343 bind(&byte_loop_1); | 3360 bind(&byte_loop_1); |
3344 ldrb(scratch, MemOperand(src, 1, PostIndex)); | 3361 lbz(scratch, MemOperand(src)); |
3345 strb(scratch, MemOperand(dst, 1, PostIndex)); | 3362 addi(src, src, Operand(1)); |
3346 sub(length, length, Operand(1), SetCC); | 3363 stb(scratch, MemOperand(dst)); |
3347 b(ne, &byte_loop_1); | 3364 addi(dst, dst, Operand(1)); |
| 3365 bdnz(&byte_loop_1); |
| 3366 |
3348 bind(&done); | 3367 bind(&done); |
3349 } | 3368 } |
3350 | 3369 |
3351 | 3370 |
| 3371 void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset, |
| 3372 Register count, |
| 3373 Register filler) { |
| 3374 Label loop; |
| 3375 mtctr(count); |
| 3376 bind(&loop); |
| 3377 StoreP(filler, MemOperand(start_offset)); |
| 3378 addi(start_offset, start_offset, Operand(kPointerSize)); |
| 3379 bdnz(&loop); |
| 3380 } |
| 3381 |
3352 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, | 3382 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, |
3353 Register end_offset, | 3383 Register end_offset, |
3354 Register filler) { | 3384 Register filler) { |
3355 Label loop, entry; | 3385 Label done; |
3356 b(&entry); | 3386 sub(r0, end_offset, start_offset, LeaveOE, SetRC); |
3357 bind(&loop); | 3387 beq(&done, cr0); |
3358 str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); | 3388 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2)); |
3359 bind(&entry); | 3389 InitializeNFieldsWithFiller(start_offset, r0, filler); |
3360 cmp(start_offset, end_offset); | 3390 bind(&done); |
3361 b(lt, &loop); | |
3362 } | 3391 } |
3363 | 3392 |
3364 | 3393 |
3365 void MacroAssembler::CheckFor32DRegs(Register scratch) { | 3394 void MacroAssembler::SaveFPRegs(Register location, int first, int count) { |
3366 mov(scratch, Operand(ExternalReference::cpu_features())); | 3395 DCHECK(count > 0); |
3367 ldr(scratch, MemOperand(scratch)); | 3396 int cur = first; |
3368 tst(scratch, Operand(1u << VFP32DREGS)); | 3397 subi(location, location, Operand(count * kDoubleSize)); |
| 3398 for (int i = 0; i < count; i++) { |
| 3399 DoubleRegister reg = DoubleRegister::from_code(cur++); |
| 3400 stfd(reg, MemOperand(location, i * kDoubleSize)); |
| 3401 } |
3369 } | 3402 } |
3370 | 3403 |
3371 | 3404 |
3372 void MacroAssembler::SaveFPRegs(Register location, Register scratch) { | 3405 void MacroAssembler::RestoreFPRegs(Register location, int first, int count) { |
3373 CheckFor32DRegs(scratch); | 3406 DCHECK(count > 0); |
3374 vstm(db_w, location, d16, d31, ne); | 3407 int cur = first + count - 1; |
3375 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); | 3408 for (int i = count - 1; i >= 0; i--) { |
3376 vstm(db_w, location, d0, d15); | 3409 DoubleRegister reg = DoubleRegister::from_code(cur--); |
| 3410 lfd(reg, MemOperand(location, i * kDoubleSize)); |
| 3411 } |
| 3412 addi(location, location, Operand(count * kDoubleSize)); |
3377 } | 3413 } |
3378 | 3414 |
3379 | 3415 |
3380 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { | |
3381 CheckFor32DRegs(scratch); | |
3382 vldm(ia_w, location, d0, d15); | |
3383 vldm(ia_w, location, d16, d31, ne); | |
3384 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); | |
3385 } | |
3386 | |
3387 | |
3388 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( | 3416 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( |
3389 Register first, | 3417 Register first, |
3390 Register second, | 3418 Register second, |
3391 Register scratch1, | 3419 Register scratch1, |
3392 Register scratch2, | 3420 Register scratch2, |
3393 Label* failure) { | 3421 Label* failure) { |
3394 const int kFlatAsciiStringMask = | 3422 const int kFlatAsciiStringMask = |
3395 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | 3423 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
3396 const int kFlatAsciiStringTag = | 3424 const int kFlatAsciiStringTag = |
3397 kStringTag | kOneByteStringTag | kSeqStringTag; | 3425 kStringTag | kOneByteStringTag | kSeqStringTag; |
3398 and_(scratch1, first, Operand(kFlatAsciiStringMask)); | 3426 andi(scratch1, first, Operand(kFlatAsciiStringMask)); |
3399 and_(scratch2, second, Operand(kFlatAsciiStringMask)); | 3427 andi(scratch2, second, Operand(kFlatAsciiStringMask)); |
3400 cmp(scratch1, Operand(kFlatAsciiStringTag)); | 3428 cmpi(scratch1, Operand(kFlatAsciiStringTag)); |
3401 // Ignore second test if first test failed. | 3429 bne(failure); |
3402 cmp(scratch2, Operand(kFlatAsciiStringTag), eq); | 3430 cmpi(scratch2, Operand(kFlatAsciiStringTag)); |
3403 b(ne, failure); | 3431 bne(failure); |
3404 } | 3432 } |
3405 | 3433 |
3406 | 3434 |
3407 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, | 3435 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, |
3408 Register scratch, | 3436 Register scratch, |
3409 Label* failure) { | 3437 Label* failure) { |
3410 const int kFlatAsciiStringMask = | 3438 const int kFlatAsciiStringMask = |
3411 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | 3439 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
3412 const int kFlatAsciiStringTag = | 3440 const int kFlatAsciiStringTag = |
3413 kStringTag | kOneByteStringTag | kSeqStringTag; | 3441 kStringTag | kOneByteStringTag | kSeqStringTag; |
3414 and_(scratch, type, Operand(kFlatAsciiStringMask)); | 3442 andi(scratch, type, Operand(kFlatAsciiStringMask)); |
3415 cmp(scratch, Operand(kFlatAsciiStringTag)); | 3443 cmpi(scratch, Operand(kFlatAsciiStringTag)); |
3416 b(ne, failure); | 3444 bne(failure); |
3417 } | 3445 } |
3418 | 3446 |
3419 static const int kRegisterPassedArguments = 4; | 3447 static const int kRegisterPassedArguments = 8; |
3420 | 3448 |
3421 | 3449 |
3422 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, | 3450 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, |
3423 int num_double_arguments) { | 3451 int num_double_arguments) { |
3424 int stack_passed_words = 0; | 3452 int stack_passed_words = 0; |
3425 if (use_eabi_hardfloat()) { | 3453 if (num_double_arguments > DoubleRegister::kNumRegisters) { |
3426 // In the hard floating point calling convention, we can use | |
3427 // all double registers to pass doubles. | |
3428 if (num_double_arguments > DoubleRegister::NumRegisters()) { | |
3429 stack_passed_words += | 3454 stack_passed_words += |
3430 2 * (num_double_arguments - DoubleRegister::NumRegisters()); | 3455 2 * (num_double_arguments - DoubleRegister::kNumRegisters); |
3431 } | |
3432 } else { | |
3433 // In the soft floating point calling convention, every double | |
3434 // argument is passed using two registers. | |
3435 num_reg_arguments += 2 * num_double_arguments; | |
3436 } | 3456 } |
3437 // Up to four simple arguments are passed in registers r0..r3. | 3457 // Up to 8 simple arguments are passed in registers r3..r10. |
3438 if (num_reg_arguments > kRegisterPassedArguments) { | 3458 if (num_reg_arguments > kRegisterPassedArguments) { |
3439 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; | 3459 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; |
3440 } | 3460 } |
3441 return stack_passed_words; | 3461 return stack_passed_words; |
3442 } | 3462 } |
3443 | 3463 |
3444 | 3464 |
3445 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, | 3465 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, |
3446 Register index, | 3466 Register index, |
3447 Register value, | 3467 Register value, |
3448 uint32_t encoding_mask) { | 3468 uint32_t encoding_mask) { |
3449 Label is_object; | 3469 Label is_object; |
3450 SmiTst(string); | 3470 TestIfSmi(string, r0); |
3451 Check(ne, kNonObject); | 3471 Check(ne, kNonObject, cr0); |
3452 | 3472 |
3453 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); | 3473 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset)); |
3454 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); | 3474 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); |
3455 | 3475 |
3456 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); | 3476 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); |
3457 cmp(ip, Operand(encoding_mask)); | 3477 cmpi(ip, Operand(encoding_mask)); |
3458 Check(eq, kUnexpectedStringType); | 3478 Check(eq, kUnexpectedStringType); |
3459 | 3479 |
3460 // The index is assumed to be untagged coming in, tag it to compare with the | 3480 // The index is assumed to be untagged coming in, tag it to compare with the |
3461 // string length without using a temp register, it is restored at the end of | 3481 // string length without using a temp register, it is restored at the end of |
3462 // this function. | 3482 // this function. |
| 3483 #if !V8_TARGET_ARCH_PPC64 |
3463 Label index_tag_ok, index_tag_bad; | 3484 Label index_tag_ok, index_tag_bad; |
3464 TrySmiTag(index, index, &index_tag_bad); | 3485 JumpIfNotSmiCandidate(index, r0, &index_tag_bad); |
| 3486 #endif |
| 3487 SmiTag(index, index); |
| 3488 #if !V8_TARGET_ARCH_PPC64 |
3465 b(&index_tag_ok); | 3489 b(&index_tag_ok); |
3466 bind(&index_tag_bad); | 3490 bind(&index_tag_bad); |
3467 Abort(kIndexIsTooLarge); | 3491 Abort(kIndexIsTooLarge); |
3468 bind(&index_tag_ok); | 3492 bind(&index_tag_ok); |
| 3493 #endif |
3469 | 3494 |
3470 ldr(ip, FieldMemOperand(string, String::kLengthOffset)); | 3495 LoadP(ip, FieldMemOperand(string, String::kLengthOffset)); |
3471 cmp(index, ip); | 3496 cmp(index, ip); |
3472 Check(lt, kIndexIsTooLarge); | 3497 Check(lt, kIndexIsTooLarge); |
3473 | 3498 |
3474 cmp(index, Operand(Smi::FromInt(0))); | 3499 DCHECK(Smi::FromInt(0) == 0); |
| 3500 cmpi(index, Operand::Zero()); |
3475 Check(ge, kIndexIsNegative); | 3501 Check(ge, kIndexIsNegative); |
3476 | 3502 |
3477 SmiUntag(index, index); | 3503 SmiUntag(index, index); |
3478 } | 3504 } |
3479 | 3505 |
3480 | 3506 |
3481 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3507 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3482 int num_double_arguments, | 3508 int num_double_arguments, |
3483 Register scratch) { | 3509 Register scratch) { |
3484 int frame_alignment = ActivationFrameAlignment(); | 3510 int frame_alignment = ActivationFrameAlignment(); |
3485 int stack_passed_arguments = CalculateStackPassedWords( | 3511 int stack_passed_arguments = CalculateStackPassedWords( |
3486 num_reg_arguments, num_double_arguments); | 3512 num_reg_arguments, num_double_arguments); |
| 3513 int stack_space = kNumRequiredStackFrameSlots; |
| 3514 |
3487 if (frame_alignment > kPointerSize) { | 3515 if (frame_alignment > kPointerSize) { |
3488 // Make stack end at alignment and make room for num_arguments - 4 words | 3516 // Make stack end at alignment and make room for stack arguments |
3489 // and the original value of sp. | 3517 // -- preserving original value of sp. |
3490 mov(scratch, sp); | 3518 mr(scratch, sp); |
3491 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); | 3519 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize)); |
3492 DCHECK(IsPowerOf2(frame_alignment)); | 3520 DCHECK(IsPowerOf2(frame_alignment)); |
3493 and_(sp, sp, Operand(-frame_alignment)); | 3521 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); |
3494 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3522 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
3495 } else { | 3523 } else { |
3496 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); | 3524 // Make room for stack arguments |
| 3525 stack_space += stack_passed_arguments; |
3497 } | 3526 } |
| 3527 |
| 3528 // Allocate frame with required slots to make ABI work. |
| 3529 li(r0, Operand::Zero()); |
| 3530 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize)); |
3498 } | 3531 } |
3499 | 3532 |
3500 | 3533 |
3501 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3534 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3502 Register scratch) { | 3535 Register scratch) { |
3503 PrepareCallCFunction(num_reg_arguments, 0, scratch); | 3536 PrepareCallCFunction(num_reg_arguments, 0, scratch); |
3504 } | 3537 } |
3505 | 3538 |
3506 | 3539 |
3507 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { | 3540 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { |
3508 DCHECK(src.is(d0)); | 3541 Move(d1, src); |
3509 if (!use_eabi_hardfloat()) { | 3542 } |
3510 vmov(r0, r1, src); | 3543 |
| 3544 |
| 3545 void MacroAssembler::MovToFloatResult(DoubleRegister src) { |
| 3546 Move(d1, src); |
| 3547 } |
| 3548 |
| 3549 |
| 3550 void MacroAssembler::MovToFloatParameters(DoubleRegister src1, |
| 3551 DoubleRegister src2) { |
| 3552 if (src2.is(d1)) { |
| 3553 DCHECK(!src1.is(d2)); |
| 3554 Move(d2, src2); |
| 3555 Move(d1, src1); |
| 3556 } else { |
| 3557 Move(d1, src1); |
| 3558 Move(d2, src2); |
3511 } | 3559 } |
3512 } | 3560 } |
3513 | 3561 |
3514 | |
3515 // On ARM this is just a synonym to make the purpose clear. | |
3516 void MacroAssembler::MovToFloatResult(DwVfpRegister src) { | |
3517 MovToFloatParameter(src); | |
3518 } | |
3519 | |
3520 | |
3521 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, | |
3522 DwVfpRegister src2) { | |
3523 DCHECK(src1.is(d0)); | |
3524 DCHECK(src2.is(d1)); | |
3525 if (!use_eabi_hardfloat()) { | |
3526 vmov(r0, r1, src1); | |
3527 vmov(r2, r3, src2); | |
3528 } | |
3529 } | |
3530 | |
3531 | 3562 |
3532 void MacroAssembler::CallCFunction(ExternalReference function, | 3563 void MacroAssembler::CallCFunction(ExternalReference function, |
3533 int num_reg_arguments, | 3564 int num_reg_arguments, |
3534 int num_double_arguments) { | 3565 int num_double_arguments) { |
3535 mov(ip, Operand(function)); | 3566 mov(ip, Operand(function)); |
3536 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); | 3567 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); |
3537 } | 3568 } |
3538 | 3569 |
3539 | 3570 |
3540 void MacroAssembler::CallCFunction(Register function, | 3571 void MacroAssembler::CallCFunction(Register function, |
(...skipping 12 matching lines...) Expand all Loading... |
3553 void MacroAssembler::CallCFunction(Register function, | 3584 void MacroAssembler::CallCFunction(Register function, |
3554 int num_arguments) { | 3585 int num_arguments) { |
3555 CallCFunction(function, num_arguments, 0); | 3586 CallCFunction(function, num_arguments, 0); |
3556 } | 3587 } |
3557 | 3588 |
3558 | 3589 |
3559 void MacroAssembler::CallCFunctionHelper(Register function, | 3590 void MacroAssembler::CallCFunctionHelper(Register function, |
3560 int num_reg_arguments, | 3591 int num_reg_arguments, |
3561 int num_double_arguments) { | 3592 int num_double_arguments) { |
3562 DCHECK(has_frame()); | 3593 DCHECK(has_frame()); |
3563 // Make sure that the stack is aligned before calling a C function unless | |
3564 // running in the simulator. The simulator has its own alignment check which | |
3565 // provides more information. | |
3566 #if V8_HOST_ARCH_ARM | |
3567 if (emit_debug_code()) { | |
3568 int frame_alignment = base::OS::ActivationFrameAlignment(); | |
3569 int frame_alignment_mask = frame_alignment - 1; | |
3570 if (frame_alignment > kPointerSize) { | |
3571 DCHECK(IsPowerOf2(frame_alignment)); | |
3572 Label alignment_as_expected; | |
3573 tst(sp, Operand(frame_alignment_mask)); | |
3574 b(eq, &alignment_as_expected); | |
3575 // Don't use Check here, as it will call Runtime_Abort possibly | |
3576 // re-entering here. | |
3577 stop("Unexpected alignment"); | |
3578 bind(&alignment_as_expected); | |
3579 } | |
3580 } | |
3581 #endif | |
3582 | |
3583 // Just call directly. The function called cannot cause a GC, or | 3594 // Just call directly. The function called cannot cause a GC, or |
3584 // allow preemption, so the return address in the link register | 3595 // allow preemption, so the return address in the link register |
3585 // stays correct. | 3596 // stays correct. |
3586 Call(function); | 3597 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) |
| 3598 // AIX uses a function descriptor. When calling C code be aware |
| 3599 // of this descriptor and pick up values from it |
| 3600 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize)); |
| 3601 LoadP(ip, MemOperand(function, 0)); |
| 3602 Register dest = ip; |
| 3603 #elif ABI_TOC_ADDRESSABILITY_VIA_IP |
| 3604 Move(ip, function); |
| 3605 Register dest = ip; |
| 3606 #else |
| 3607 Register dest = function; |
| 3608 #endif |
| 3609 |
| 3610 Call(dest); |
| 3611 |
| 3612 // Remove frame bought in PrepareCallCFunction |
3587 int stack_passed_arguments = CalculateStackPassedWords( | 3613 int stack_passed_arguments = CalculateStackPassedWords( |
3588 num_reg_arguments, num_double_arguments); | 3614 num_reg_arguments, num_double_arguments); |
| 3615 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments; |
3589 if (ActivationFrameAlignment() > kPointerSize) { | 3616 if (ActivationFrameAlignment() > kPointerSize) { |
3590 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3617 LoadP(sp, MemOperand(sp, stack_space * kPointerSize)); |
3591 } else { | 3618 } else { |
3592 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); | 3619 addi(sp, sp, Operand(stack_space * kPointerSize)); |
3593 } | 3620 } |
3594 } | 3621 } |
3595 | 3622 |
3596 | 3623 |
3597 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, | 3624 void MacroAssembler::FlushICache(Register address, size_t size, |
3598 Register result, | 3625 Register scratch) { |
3599 Register scratch) { | 3626 Label done; |
3600 Label small_constant_pool_load, load_result; | 3627 |
3601 ldr(result, MemOperand(ldr_location)); | 3628 dcbf(r0, address); |
3602 | 3629 sync(); |
3603 if (FLAG_enable_ool_constant_pool) { | 3630 icbi(r0, address); |
3604 // Check if this is an extended constant pool load. | 3631 isync(); |
3605 and_(scratch, result, Operand(GetConsantPoolLoadMask())); | 3632 |
3606 teq(scratch, Operand(GetConsantPoolLoadPattern())); | 3633 // This code handles ranges which cross a single cacheline boundary. |
3607 b(eq, &small_constant_pool_load); | 3634 // scratch is last cacheline which intersects range. |
3608 if (emit_debug_code()) { | 3635 const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size()); |
3609 // Check that the instruction sequence is: | 3636 |
3610 // movw reg, #offset_low | 3637 DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2)); |
3611 // movt reg, #offset_high | 3638 addi(scratch, address, Operand(size - 1)); |
3612 // ldr reg, [pp, reg] | 3639 ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2)); |
3613 Instr patterns[] = {GetMovWPattern(), GetMovTPattern(), | 3640 cmpl(scratch, address); |
3614 GetLdrPpRegOffsetPattern()}; | 3641 ble(&done); |
3615 for (int i = 0; i < 3; i++) { | 3642 |
3616 ldr(result, MemOperand(ldr_location, i * kInstrSize)); | 3643 dcbf(r0, scratch); |
3617 and_(result, result, Operand(patterns[i])); | 3644 sync(); |
3618 cmp(result, Operand(patterns[i])); | 3645 icbi(r0, scratch); |
3619 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); | 3646 isync(); |
3620 } | 3647 |
3621 // Result was clobbered. Restore it. | 3648 bind(&done); |
3622 ldr(result, MemOperand(ldr_location)); | 3649 } |
3623 } | 3650 |
3624 | 3651 |
3625 // Get the offset into the constant pool. First extract movw immediate into | 3652 void MacroAssembler::SetRelocatedValue(Register location, |
3626 // result. | 3653 Register scratch, |
3627 and_(scratch, result, Operand(0xfff)); | 3654 Register new_value) { |
3628 mov(ip, Operand(result, LSR, 4)); | 3655 lwz(scratch, MemOperand(location)); |
3629 and_(ip, ip, Operand(0xf000)); | 3656 |
3630 orr(result, scratch, Operand(ip)); | 3657 #if V8_OOL_CONSTANT_POOL |
3631 // Then extract movt immediate and or into result. | 3658 if (emit_debug_code()) { |
3632 ldr(scratch, MemOperand(ldr_location, kInstrSize)); | 3659 // Check that the instruction sequence is a load from the constant pool |
3633 and_(ip, scratch, Operand(0xf0000)); | 3660 #if V8_TARGET_ARCH_PPC64 |
3634 orr(result, result, Operand(ip, LSL, 12)); | 3661 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16))); |
3635 and_(scratch, scratch, Operand(0xfff)); | 3662 Cmpi(scratch, Operand(ADDI), r0); |
3636 orr(result, result, Operand(scratch, LSL, 16)); | 3663 Check(eq, kTheInstructionShouldBeALi); |
3637 | 3664 lwz(scratch, MemOperand(location, kInstrSize)); |
3638 b(&load_result); | 3665 #endif |
3639 } | 3666 ExtractBitMask(scratch, scratch, 0x1f * B16); |
3640 | 3667 cmpi(scratch, Operand(kConstantPoolRegister.code())); |
3641 bind(&small_constant_pool_load); | |
3642 if (emit_debug_code()) { | |
3643 // Check that the instruction is a ldr reg, [<pc or pp> + offset] . | |
3644 and_(result, result, Operand(GetConsantPoolLoadPattern())); | |
3645 cmp(result, Operand(GetConsantPoolLoadPattern())); | |
3646 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); | 3668 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); |
3647 // Result was clobbered. Restore it. | 3669 // Scratch was clobbered. Restore it. |
3648 ldr(result, MemOperand(ldr_location)); | 3670 lwz(scratch, MemOperand(location)); |
3649 } | 3671 } |
3650 | 3672 // Get the address of the constant and patch it. |
3651 // Get the offset into the constant pool. | 3673 andi(scratch, scratch, Operand(kImm16Mask)); |
3652 const uint32_t kLdrOffsetMask = (1 << 12) - 1; | 3674 StorePX(new_value, MemOperand(kConstantPoolRegister, scratch)); |
3653 and_(result, result, Operand(kLdrOffsetMask)); | 3675 #else |
3654 | 3676 // This code assumes a FIXED_SEQUENCE for lis/ori |
3655 bind(&load_result); | 3677 |
3656 // Get the address of the constant. | 3678 // At this point scratch is a lis instruction. |
3657 if (FLAG_enable_ool_constant_pool) { | 3679 if (emit_debug_code()) { |
3658 add(result, pp, Operand(result)); | 3680 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16))); |
3659 } else { | 3681 Cmpi(scratch, Operand(ADDIS), r0); |
3660 add(result, ldr_location, Operand(result)); | 3682 Check(eq, kTheInstructionToPatchShouldBeALis); |
3661 add(result, result, Operand(Instruction::kPCReadOffset)); | 3683 lwz(scratch, MemOperand(location)); |
3662 } | 3684 } |
| 3685 |
| 3686 // insert new high word into lis instruction |
| 3687 #if V8_TARGET_ARCH_PPC64 |
| 3688 srdi(ip, new_value, Operand(32)); |
| 3689 rlwimi(scratch, ip, 16, 16, 31); |
| 3690 #else |
| 3691 rlwimi(scratch, new_value, 16, 16, 31); |
| 3692 #endif |
| 3693 |
| 3694 stw(scratch, MemOperand(location)); |
| 3695 |
| 3696 lwz(scratch, MemOperand(location, kInstrSize)); |
| 3697 // scratch is now ori. |
| 3698 if (emit_debug_code()) { |
| 3699 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3700 Cmpi(scratch, Operand(ORI), r0); |
| 3701 Check(eq, kTheInstructionShouldBeAnOri); |
| 3702 lwz(scratch, MemOperand(location, kInstrSize)); |
| 3703 } |
| 3704 |
| 3705 // insert new low word into ori instruction |
| 3706 #if V8_TARGET_ARCH_PPC64 |
| 3707 rlwimi(scratch, ip, 0, 16, 31); |
| 3708 #else |
| 3709 rlwimi(scratch, new_value, 0, 16, 31); |
| 3710 #endif |
| 3711 stw(scratch, MemOperand(location, kInstrSize)); |
| 3712 |
| 3713 #if V8_TARGET_ARCH_PPC64 |
| 3714 if (emit_debug_code()) { |
| 3715 lwz(scratch, MemOperand(location, 2*kInstrSize)); |
| 3716 // scratch is now sldi. |
| 3717 And(scratch, scratch, Operand(kOpcodeMask|kExt5OpcodeMask)); |
| 3718 Cmpi(scratch, Operand(EXT5|RLDICR), r0); |
| 3719 Check(eq, kTheInstructionShouldBeASldi); |
| 3720 } |
| 3721 |
| 3722 lwz(scratch, MemOperand(location, 3*kInstrSize)); |
| 3723 // scratch is now ori. |
| 3724 if (emit_debug_code()) { |
| 3725 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3726 Cmpi(scratch, Operand(ORIS), r0); |
| 3727 Check(eq, kTheInstructionShouldBeAnOris); |
| 3728 lwz(scratch, MemOperand(location, 3*kInstrSize)); |
| 3729 } |
| 3730 |
| 3731 rlwimi(scratch, new_value, 16, 16, 31); |
| 3732 stw(scratch, MemOperand(location, 3*kInstrSize)); |
| 3733 |
| 3734 lwz(scratch, MemOperand(location, 4*kInstrSize)); |
| 3735 // scratch is now ori. |
| 3736 if (emit_debug_code()) { |
| 3737 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3738 Cmpi(scratch, Operand(ORI), r0); |
| 3739 Check(eq, kTheInstructionShouldBeAnOri); |
| 3740 lwz(scratch, MemOperand(location, 4*kInstrSize)); |
| 3741 } |
| 3742 rlwimi(scratch, new_value, 0, 16, 31); |
| 3743 stw(scratch, MemOperand(location, 4*kInstrSize)); |
| 3744 #endif |
| 3745 |
| 3746 // Update the I-cache so the new lis and addic can be executed. |
| 3747 #if V8_TARGET_ARCH_PPC64 |
| 3748 FlushICache(location, 5 * kInstrSize, scratch); |
| 3749 #else |
| 3750 FlushICache(location, 2 * kInstrSize, scratch); |
| 3751 #endif |
| 3752 #endif |
| 3753 } |
| 3754 |
| 3755 |
| 3756 void MacroAssembler::GetRelocatedValue(Register location, |
| 3757 Register result, |
| 3758 Register scratch) { |
| 3759 lwz(result, MemOperand(location)); |
| 3760 |
| 3761 #if V8_OOL_CONSTANT_POOL |
| 3762 if (emit_debug_code()) { |
| 3763 // Check that the instruction sequence is a load from the constant pool |
| 3764 #if V8_TARGET_ARCH_PPC64 |
| 3765 And(result, result, Operand(kOpcodeMask | (0x1f * B16))); |
| 3766 Cmpi(result, Operand(ADDI), r0); |
| 3767 Check(eq, kTheInstructionShouldBeALi); |
| 3768 lwz(result, MemOperand(location, kInstrSize)); |
| 3769 #endif |
| 3770 ExtractBitMask(result, result, 0x1f * B16); |
| 3771 cmpi(result, Operand(kConstantPoolRegister.code())); |
| 3772 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); |
| 3773 lwz(result, MemOperand(location)); |
| 3774 } |
| 3775 // Get the address of the constant and retrieve it. |
| 3776 andi(result, result, Operand(kImm16Mask)); |
| 3777 LoadPX(result, MemOperand(kConstantPoolRegister, result)); |
| 3778 #else |
| 3779 // This code assumes a FIXED_SEQUENCE for lis/ori |
| 3780 if (emit_debug_code()) { |
| 3781 And(result, result, Operand(kOpcodeMask | (0x1f * B16))); |
| 3782 Cmpi(result, Operand(ADDIS), r0); |
| 3783 Check(eq, kTheInstructionShouldBeALis); |
| 3784 lwz(result, MemOperand(location)); |
| 3785 } |
| 3786 |
| 3787 // result now holds a lis instruction. Extract the immediate. |
| 3788 slwi(result, result, Operand(16)); |
| 3789 |
| 3790 lwz(scratch, MemOperand(location, kInstrSize)); |
| 3791 if (emit_debug_code()) { |
| 3792 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3793 Cmpi(scratch, Operand(ORI), r0); |
| 3794 Check(eq, kTheInstructionShouldBeAnOri); |
| 3795 lwz(scratch, MemOperand(location, kInstrSize)); |
| 3796 } |
| 3797 // Copy the low 16bits from ori instruction into result |
| 3798 rlwimi(result, scratch, 0, 16, 31); |
| 3799 |
| 3800 #if V8_TARGET_ARCH_PPC64 |
| 3801 if (emit_debug_code()) { |
| 3802 lwz(scratch, MemOperand(location, 2*kInstrSize)); |
| 3803 // scratch is now sldi. |
| 3804 And(scratch, scratch, Operand(kOpcodeMask|kExt5OpcodeMask)); |
| 3805 Cmpi(scratch, Operand(EXT5|RLDICR), r0); |
| 3806 Check(eq, kTheInstructionShouldBeASldi); |
| 3807 } |
| 3808 |
| 3809 lwz(scratch, MemOperand(location, 3*kInstrSize)); |
| 3810 // scratch is now ori. |
| 3811 if (emit_debug_code()) { |
| 3812 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3813 Cmpi(scratch, Operand(ORIS), r0); |
| 3814 Check(eq, kTheInstructionShouldBeAnOris); |
| 3815 lwz(scratch, MemOperand(location, 3*kInstrSize)); |
| 3816 } |
| 3817 sldi(result, result, Operand(16)); |
| 3818 rldimi(result, scratch, 0, 48); |
| 3819 |
| 3820 lwz(scratch, MemOperand(location, 4*kInstrSize)); |
| 3821 // scratch is now ori. |
| 3822 if (emit_debug_code()) { |
| 3823 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3824 Cmpi(scratch, Operand(ORI), r0); |
| 3825 Check(eq, kTheInstructionShouldBeAnOri); |
| 3826 lwz(scratch, MemOperand(location, 4*kInstrSize)); |
| 3827 } |
| 3828 sldi(result, result, Operand(16)); |
| 3829 rldimi(result, scratch, 0, 48); |
| 3830 #endif |
| 3831 #endif |
3663 } | 3832 } |
3664 | 3833 |
3665 | 3834 |
3666 void MacroAssembler::CheckPageFlag( | 3835 void MacroAssembler::CheckPageFlag( |
3667 Register object, | 3836 Register object, |
3668 Register scratch, | 3837 Register scratch, // scratch may be same register as object |
3669 int mask, | 3838 int mask, |
3670 Condition cc, | 3839 Condition cc, |
3671 Label* condition_met) { | 3840 Label* condition_met) { |
3672 Bfc(scratch, object, 0, kPageSizeBits); | 3841 DCHECK(cc == ne || cc == eq); |
3673 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | 3842 ClearRightImm(scratch, object, Operand(kPageSizeBits)); |
3674 tst(scratch, Operand(mask)); | 3843 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
3675 b(cc, condition_met); | 3844 |
3676 } | 3845 And(r0, scratch, Operand(mask), SetRC); |
3677 | 3846 |
3678 | 3847 if (cc == ne) { |
| 3848 bne(condition_met, cr0); |
| 3849 } |
| 3850 if (cc == eq) { |
| 3851 beq(condition_met, cr0); |
| 3852 } |
| 3853 } |
| 3854 |
| 3855 |
3679 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, | 3856 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, |
3680 Register scratch, | 3857 Register scratch, |
3681 Label* if_deprecated) { | 3858 Label* if_deprecated) { |
3682 if (map->CanBeDeprecated()) { | 3859 if (map->CanBeDeprecated()) { |
3683 mov(scratch, Operand(map)); | 3860 mov(scratch, Operand(map)); |
3684 ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); | 3861 lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); |
3685 tst(scratch, Operand(Map::Deprecated::kMask)); | 3862 ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC); |
3686 b(ne, if_deprecated); | 3863 bne(if_deprecated, cr0); |
3687 } | 3864 } |
3688 } | 3865 } |
3689 | 3866 |
3690 | 3867 |
3691 void MacroAssembler::JumpIfBlack(Register object, | 3868 void MacroAssembler::JumpIfBlack(Register object, |
3692 Register scratch0, | 3869 Register scratch0, |
3693 Register scratch1, | 3870 Register scratch1, |
3694 Label* on_black) { | 3871 Label* on_black) { |
3695 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. | 3872 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. |
3696 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); | 3873 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
3697 } | 3874 } |
3698 | 3875 |
3699 | 3876 |
3700 void MacroAssembler::HasColor(Register object, | 3877 void MacroAssembler::HasColor(Register object, |
3701 Register bitmap_scratch, | 3878 Register bitmap_scratch, |
3702 Register mask_scratch, | 3879 Register mask_scratch, |
3703 Label* has_color, | 3880 Label* has_color, |
3704 int first_bit, | 3881 int first_bit, |
3705 int second_bit) { | 3882 int second_bit) { |
3706 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); | 3883 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); |
3707 | 3884 |
3708 GetMarkBits(object, bitmap_scratch, mask_scratch); | 3885 GetMarkBits(object, bitmap_scratch, mask_scratch); |
3709 | 3886 |
3710 Label other_color, word_boundary; | 3887 Label other_color, word_boundary; |
3711 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3888 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3712 tst(ip, Operand(mask_scratch)); | 3889 // Test the first bit |
3713 b(first_bit == 1 ? eq : ne, &other_color); | 3890 and_(r0, ip, mask_scratch, SetRC); |
3714 // Shift left 1 by adding. | 3891 b(first_bit == 1 ? eq : ne, &other_color, cr0); |
3715 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); | 3892 // Shift left 1 |
3716 b(eq, &word_boundary); | 3893 // May need to load the next cell |
3717 tst(ip, Operand(mask_scratch)); | 3894 slwi(mask_scratch, mask_scratch, Operand(1), SetRC); |
3718 b(second_bit == 1 ? ne : eq, has_color); | 3895 beq(&word_boundary, cr0); |
3719 jmp(&other_color); | 3896 // Test the second bit |
| 3897 and_(r0, ip, mask_scratch, SetRC); |
| 3898 b(second_bit == 1 ? ne : eq, has_color, cr0); |
| 3899 b(&other_color); |
3720 | 3900 |
3721 bind(&word_boundary); | 3901 bind(&word_boundary); |
3722 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); | 3902 lwz(ip, MemOperand(bitmap_scratch, |
3723 tst(ip, Operand(1)); | 3903 MemoryChunk::kHeaderSize + kIntSize)); |
3724 b(second_bit == 1 ? ne : eq, has_color); | 3904 andi(r0, ip, Operand(1)); |
| 3905 b(second_bit == 1 ? ne : eq, has_color, cr0); |
3725 bind(&other_color); | 3906 bind(&other_color); |
3726 } | 3907 } |
3727 | 3908 |
3728 | 3909 |
3729 // Detect some, but not all, common pointer-free objects. This is used by the | 3910 // Detect some, but not all, common pointer-free objects. This is used by the |
3730 // incremental write barrier which doesn't care about oddballs (they are always | 3911 // incremental write barrier which doesn't care about oddballs (they are always |
3731 // marked black immediately so this code is not hit). | 3912 // marked black immediately so this code is not hit). |
3732 void MacroAssembler::JumpIfDataObject(Register value, | 3913 void MacroAssembler::JumpIfDataObject(Register value, |
3733 Register scratch, | 3914 Register scratch, |
3734 Label* not_data_object) { | 3915 Label* not_data_object) { |
3735 Label is_data_object; | 3916 Label is_data_object; |
3736 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); | 3917 LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); |
3737 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); | 3918 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); |
3738 b(eq, &is_data_object); | 3919 beq(&is_data_object); |
3739 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 3920 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
3740 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 3921 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
3741 // If it's a string and it's not a cons string then it's an object containing | 3922 // If it's a string and it's not a cons string then it's an object containing |
3742 // no GC pointers. | 3923 // no GC pointers. |
3743 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 3924 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
3744 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); | 3925 STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81); |
3745 b(ne, not_data_object); | 3926 andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
| 3927 bne(not_data_object, cr0); |
3746 bind(&is_data_object); | 3928 bind(&is_data_object); |
3747 } | 3929 } |
3748 | 3930 |
3749 | 3931 |
3750 void MacroAssembler::GetMarkBits(Register addr_reg, | 3932 void MacroAssembler::GetMarkBits(Register addr_reg, |
3751 Register bitmap_reg, | 3933 Register bitmap_reg, |
3752 Register mask_reg) { | 3934 Register mask_reg) { |
3753 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); | 3935 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); |
3754 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); | 3936 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0); |
3755 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | 3937 lis(r0, Operand((~Page::kPageAlignmentMask >> 16))); |
| 3938 and_(bitmap_reg, addr_reg, r0); |
3756 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | 3939 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
3757 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); | 3940 ExtractBitRange(mask_reg, addr_reg, |
3758 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); | 3941 kLowBits - 1, |
3759 mov(ip, Operand(1)); | 3942 kPointerSizeLog2); |
3760 mov(mask_reg, Operand(ip, LSL, mask_reg)); | 3943 ExtractBitRange(ip, addr_reg, |
| 3944 kPageSizeBits - 1, |
| 3945 kLowBits); |
| 3946 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2)); |
| 3947 add(bitmap_reg, bitmap_reg, ip); |
| 3948 li(ip, Operand(1)); |
| 3949 slw(mask_reg, ip, mask_reg); |
3761 } | 3950 } |
3762 | 3951 |
3763 | 3952 |
3764 void MacroAssembler::EnsureNotWhite( | 3953 void MacroAssembler::EnsureNotWhite( |
3765 Register value, | 3954 Register value, |
3766 Register bitmap_scratch, | 3955 Register bitmap_scratch, |
3767 Register mask_scratch, | 3956 Register mask_scratch, |
3768 Register load_scratch, | 3957 Register load_scratch, |
3769 Label* value_is_white_and_not_data) { | 3958 Label* value_is_white_and_not_data) { |
3770 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); | 3959 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); |
3771 GetMarkBits(value, bitmap_scratch, mask_scratch); | 3960 GetMarkBits(value, bitmap_scratch, mask_scratch); |
3772 | 3961 |
3773 // If the value is black or grey we don't need to do anything. | 3962 // If the value is black or grey we don't need to do anything. |
3774 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); | 3963 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
3775 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); | 3964 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
3776 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); | 3965 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
3777 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | 3966 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
3778 | 3967 |
3779 Label done; | 3968 Label done; |
3780 | 3969 |
3781 // Since both black and grey have a 1 in the first position and white does | 3970 // Since both black and grey have a 1 in the first position and white does |
3782 // not have a 1 there we only need to check one bit. | 3971 // not have a 1 there we only need to check one bit. |
3783 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3972 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3784 tst(mask_scratch, load_scratch); | 3973 and_(r0, mask_scratch, load_scratch, SetRC); |
3785 b(ne, &done); | 3974 bne(&done, cr0); |
3786 | 3975 |
3787 if (emit_debug_code()) { | 3976 if (emit_debug_code()) { |
3788 // Check for impossible bit pattern. | 3977 // Check for impossible bit pattern. |
3789 Label ok; | 3978 Label ok; |
3790 // LSL may overflow, making the check conservative. | 3979 // LSL may overflow, making the check conservative. |
3791 tst(load_scratch, Operand(mask_scratch, LSL, 1)); | 3980 slwi(r0, mask_scratch, Operand(1)); |
3792 b(eq, &ok); | 3981 and_(r0, load_scratch, r0, SetRC); |
| 3982 beq(&ok, cr0); |
3793 stop("Impossible marking bit pattern"); | 3983 stop("Impossible marking bit pattern"); |
3794 bind(&ok); | 3984 bind(&ok); |
3795 } | 3985 } |
3796 | 3986 |
3797 // Value is white. We check whether it is data that doesn't need scanning. | 3987 // Value is white. We check whether it is data that doesn't need scanning. |
3798 // Currently only checks for HeapNumber and non-cons strings. | 3988 // Currently only checks for HeapNumber and non-cons strings. |
3799 Register map = load_scratch; // Holds map while checking type. | 3989 Register map = load_scratch; // Holds map while checking type. |
3800 Register length = load_scratch; // Holds length of object after testing type. | 3990 Register length = load_scratch; // Holds length of object after testing type. |
3801 Label is_data_object; | 3991 Label is_data_object, maybe_string_object, is_string_object, is_encoded; |
| 3992 #if V8_TARGET_ARCH_PPC64 |
| 3993 Label length_computed; |
| 3994 #endif |
| 3995 |
3802 | 3996 |
3803 // Check for heap-number | 3997 // Check for heap-number |
3804 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | 3998 LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
3805 CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 3999 CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
3806 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); | 4000 bne(&maybe_string_object); |
3807 b(eq, &is_data_object); | 4001 li(length, Operand(HeapNumber::kSize)); |
| 4002 b(&is_data_object); |
| 4003 bind(&maybe_string_object); |
3808 | 4004 |
3809 // Check for strings. | 4005 // Check for strings. |
3810 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 4006 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
3811 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 4007 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
3812 // If it's a string and it's not a cons string then it's an object containing | 4008 // If it's a string and it's not a cons string then it's an object containing |
3813 // no GC pointers. | 4009 // no GC pointers. |
3814 Register instance_type = load_scratch; | 4010 Register instance_type = load_scratch; |
3815 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 4011 lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
3816 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); | 4012 andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
3817 b(ne, value_is_white_and_not_data); | 4013 bne(value_is_white_and_not_data, cr0); |
3818 // It's a non-indirect (non-cons and non-slice) string. | 4014 // It's a non-indirect (non-cons and non-slice) string. |
3819 // If it's external, the length is just ExternalString::kSize. | 4015 // If it's external, the length is just ExternalString::kSize. |
3820 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). | 4016 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). |
3821 // External strings are the only ones with the kExternalStringTag bit | 4017 // External strings are the only ones with the kExternalStringTag bit |
3822 // set. | 4018 // set. |
3823 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); | 4019 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); |
3824 DCHECK_EQ(0, kConsStringTag & kExternalStringTag); | 4020 DCHECK_EQ(0, kConsStringTag & kExternalStringTag); |
3825 tst(instance_type, Operand(kExternalStringTag)); | 4021 andi(r0, instance_type, Operand(kExternalStringTag)); |
3826 mov(length, Operand(ExternalString::kSize), LeaveCC, ne); | 4022 beq(&is_string_object, cr0); |
3827 b(ne, &is_data_object); | 4023 li(length, Operand(ExternalString::kSize)); |
| 4024 b(&is_data_object); |
| 4025 bind(&is_string_object); |
3828 | 4026 |
3829 // Sequential string, either ASCII or UC16. | 4027 // Sequential string, either ASCII or UC16. |
3830 // For ASCII (char-size of 1) we shift the smi tag away to get the length. | 4028 // For ASCII (char-size of 1) we untag the smi to get the length. |
3831 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby | 4029 // For UC16 (char-size of 2): |
3832 // getting the length multiplied by 2. | 4030 // - (32-bit) we just leave the smi tag in place, thereby getting |
| 4031 // the length multiplied by 2. |
| 4032 // - (64-bit) we compute the offset in the 2-byte array |
3833 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); | 4033 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); |
3834 DCHECK(kSmiTag == 0 && kSmiTagSize == 1); | 4034 LoadP(ip, FieldMemOperand(value, String::kLengthOffset)); |
3835 ldr(ip, FieldMemOperand(value, String::kLengthOffset)); | 4035 andi(r0, instance_type, Operand(kStringEncodingMask)); |
3836 tst(instance_type, Operand(kStringEncodingMask)); | 4036 beq(&is_encoded, cr0); |
3837 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); | 4037 SmiUntag(ip); |
3838 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); | 4038 #if V8_TARGET_ARCH_PPC64 |
3839 and_(length, length, Operand(~kObjectAlignmentMask)); | 4039 b(&length_computed); |
| 4040 #endif |
| 4041 bind(&is_encoded); |
| 4042 #if V8_TARGET_ARCH_PPC64 |
| 4043 SmiToShortArrayOffset(ip, ip); |
| 4044 bind(&length_computed); |
| 4045 #else |
| 4046 DCHECK(kSmiShift == 1); |
| 4047 #endif |
| 4048 addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); |
| 4049 li(r0, Operand(~kObjectAlignmentMask)); |
| 4050 and_(length, length, r0); |
3840 | 4051 |
3841 bind(&is_data_object); | 4052 bind(&is_data_object); |
3842 // Value is a data object, and it is white. Mark it black. Since we know | 4053 // Value is a data object, and it is white. Mark it black. Since we know |
3843 // that the object is white we can make it black by flipping one bit. | 4054 // that the object is white we can make it black by flipping one bit. |
3844 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 4055 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3845 orr(ip, ip, Operand(mask_scratch)); | 4056 orx(ip, ip, mask_scratch); |
3846 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 4057 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3847 | 4058 |
3848 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); | 4059 mov(ip, Operand(~Page::kPageAlignmentMask)); |
3849 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | 4060 and_(bitmap_scratch, bitmap_scratch, ip); |
3850 add(ip, ip, Operand(length)); | 4061 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
3851 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | 4062 add(ip, ip, length); |
| 4063 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
3852 | 4064 |
3853 bind(&done); | 4065 bind(&done); |
3854 } | 4066 } |
3855 | 4067 |
3856 | 4068 |
| 4069 // Saturate a value into 8-bit unsigned integer |
| 4070 // if input_value < 0, output_value is 0 |
| 4071 // if input_value > 255, output_value is 255 |
| 4072 // otherwise output_value is the input_value |
3857 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { | 4073 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { |
3858 Usat(output_reg, 8, Operand(input_reg)); | 4074 Label done, negative_label, overflow_label; |
| 4075 int satval = (1 << 8) - 1; |
| 4076 |
| 4077 cmpi(input_reg, Operand::Zero()); |
| 4078 blt(&negative_label); |
| 4079 |
| 4080 cmpi(input_reg, Operand(satval)); |
| 4081 bgt(&overflow_label); |
| 4082 if (!output_reg.is(input_reg)) { |
| 4083 mr(output_reg, input_reg); |
| 4084 } |
| 4085 b(&done); |
| 4086 |
| 4087 bind(&negative_label); |
| 4088 li(output_reg, Operand::Zero()); // set to 0 if negative |
| 4089 b(&done); |
| 4090 |
| 4091 |
| 4092 bind(&overflow_label); // set to satval if > satval |
| 4093 li(output_reg, Operand(satval)); |
| 4094 |
| 4095 bind(&done); |
| 4096 } |
| 4097 |
| 4098 |
| 4099 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { |
| 4100 mtfsfi(7, RN); |
| 4101 } |
| 4102 |
| 4103 |
| 4104 void MacroAssembler::ResetRoundingMode() { |
| 4105 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest) |
3859 } | 4106 } |
3860 | 4107 |
3861 | 4108 |
3862 void MacroAssembler::ClampDoubleToUint8(Register result_reg, | 4109 void MacroAssembler::ClampDoubleToUint8(Register result_reg, |
3863 DwVfpRegister input_reg, | 4110 DoubleRegister input_reg, |
3864 LowDwVfpRegister double_scratch) { | 4111 DoubleRegister double_scratch) { |
| 4112 Label above_zero; |
3865 Label done; | 4113 Label done; |
3866 | 4114 Label in_bounds; |
3867 // Handle inputs >= 255 (including +infinity). | 4115 |
3868 Vmov(double_scratch, 255.0, result_reg); | 4116 LoadDoubleLiteral(double_scratch, 0.0, result_reg); |
3869 mov(result_reg, Operand(255)); | 4117 fcmpu(input_reg, double_scratch); |
3870 VFPCompareAndSetFlags(input_reg, double_scratch); | 4118 bgt(&above_zero); |
3871 b(ge, &done); | 4119 |
3872 | 4120 // Double value is less than zero, NaN or Inf, return 0. |
3873 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest | 4121 LoadIntLiteral(result_reg, 0); |
3874 // rounding mode will provide the correct result. | 4122 b(&done); |
3875 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding); | 4123 |
3876 vmov(result_reg, double_scratch.low()); | 4124 // Double value is >= 255, return 255. |
3877 | 4125 bind(&above_zero); |
| 4126 LoadDoubleLiteral(double_scratch, 255.0, result_reg); |
| 4127 fcmpu(input_reg, double_scratch); |
| 4128 ble(&in_bounds); |
| 4129 LoadIntLiteral(result_reg, 255); |
| 4130 b(&done); |
| 4131 |
| 4132 // In 0-255 range, round and truncate. |
| 4133 bind(&in_bounds); |
| 4134 |
| 4135 // round to nearest (default rounding mode) |
| 4136 fctiw(double_scratch, input_reg); |
| 4137 MovDoubleLowToInt(result_reg, double_scratch); |
3878 bind(&done); | 4138 bind(&done); |
3879 } | 4139 } |
3880 | 4140 |
3881 | 4141 |
3882 void MacroAssembler::LoadInstanceDescriptors(Register map, | 4142 void MacroAssembler::LoadInstanceDescriptors(Register map, |
3883 Register descriptors) { | 4143 Register descriptors) { |
3884 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); | 4144 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); |
3885 } | 4145 } |
3886 | 4146 |
3887 | 4147 |
3888 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { | 4148 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { |
3889 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 4149 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
3890 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); | 4150 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); |
3891 } | 4151 } |
3892 | 4152 |
3893 | 4153 |
3894 void MacroAssembler::EnumLength(Register dst, Register map) { | 4154 void MacroAssembler::EnumLength(Register dst, Register map) { |
3895 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | 4155 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); |
3896 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 4156 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
3897 and_(dst, dst, Operand(Map::EnumLengthBits::kMask)); | 4157 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask); |
3898 SmiTag(dst); | 4158 SmiTag(dst); |
3899 } | 4159 } |
3900 | 4160 |
3901 | 4161 |
3902 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { | 4162 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { |
3903 Register empty_fixed_array_value = r6; | 4163 Register empty_fixed_array_value = r9; |
3904 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); | 4164 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); |
3905 Label next, start; | 4165 Label next, start; |
3906 mov(r2, r0); | 4166 mr(r5, r3); |
3907 | 4167 |
3908 // Check if the enum length field is properly initialized, indicating that | 4168 // Check if the enum length field is properly initialized, indicating that |
3909 // there is an enum cache. | 4169 // there is an enum cache. |
3910 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); | 4170 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); |
3911 | 4171 |
3912 EnumLength(r3, r1); | 4172 EnumLength(r6, r4); |
3913 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel))); | 4173 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0); |
3914 b(eq, call_runtime); | 4174 beq(call_runtime); |
3915 | 4175 |
3916 jmp(&start); | 4176 b(&start); |
3917 | 4177 |
3918 bind(&next); | 4178 bind(&next); |
3919 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); | 4179 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); |
3920 | 4180 |
3921 // For all objects but the receiver, check that the cache is empty. | 4181 // For all objects but the receiver, check that the cache is empty. |
3922 EnumLength(r3, r1); | 4182 EnumLength(r6, r4); |
3923 cmp(r3, Operand(Smi::FromInt(0))); | 4183 CmpSmiLiteral(r6, Smi::FromInt(0), r0); |
3924 b(ne, call_runtime); | 4184 bne(call_runtime); |
3925 | 4185 |
3926 bind(&start); | 4186 bind(&start); |
3927 | 4187 |
3928 // Check that there are no elements. Register r2 contains the current JS | 4188 // Check that there are no elements. Register r5 contains the current JS |
3929 // object we've reached through the prototype chain. | 4189 // object we've reached through the prototype chain. |
3930 Label no_elements; | 4190 Label no_elements; |
3931 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset)); | 4191 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset)); |
3932 cmp(r2, empty_fixed_array_value); | 4192 cmp(r5, empty_fixed_array_value); |
3933 b(eq, &no_elements); | 4193 beq(&no_elements); |
3934 | 4194 |
3935 // Second chance, the object may be using the empty slow element dictionary. | 4195 // Second chance, the object may be using the empty slow element dictionary. |
3936 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex); | 4196 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex); |
3937 b(ne, call_runtime); | 4197 bne(call_runtime); |
3938 | 4198 |
3939 bind(&no_elements); | 4199 bind(&no_elements); |
3940 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset)); | 4200 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset)); |
3941 cmp(r2, null_value); | 4201 cmp(r5, null_value); |
3942 b(ne, &next); | 4202 bne(&next); |
3943 } | 4203 } |
3944 | 4204 |
3945 | 4205 |
| 4206 //////////////////////////////////////////////////////////////////////////////// |
| 4207 // |
| 4208 // New MacroAssembler Interfaces added for PPC |
| 4209 // |
| 4210 //////////////////////////////////////////////////////////////////////////////// |
| 4211 void MacroAssembler::LoadIntLiteral(Register dst, int value) { |
| 4212 mov(dst, Operand(value)); |
| 4213 } |
| 4214 |
| 4215 |
| 4216 void MacroAssembler::LoadSmiLiteral(Register dst, Smi *smi) { |
| 4217 mov(dst, Operand(smi)); |
| 4218 } |
| 4219 |
| 4220 |
| 4221 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, |
| 4222 double value, |
| 4223 Register scratch) { |
| 4224 #if V8_OOL_CONSTANT_POOL |
| 4225 // TODO(mbrandy): enable extended constant pool usage for doubles. |
| 4226 // See ARM commit e27ab337 for a reference. |
| 4227 if (is_constant_pool_available() && !is_constant_pool_full()) { |
| 4228 RelocInfo rinfo(pc_, value); |
| 4229 ConstantPoolAddEntry(rinfo); |
| 4230 #if V8_TARGET_ARCH_PPC64 |
| 4231 // We use 2 instruction sequence here for consistency with mov. |
| 4232 li(scratch, Operand::Zero()); |
| 4233 lfdx(result, MemOperand(kConstantPoolRegister, scratch)); |
| 4234 #else |
| 4235 lfd(result, MemOperand(kConstantPoolRegister, 0)); |
| 4236 #endif |
| 4237 return; |
| 4238 } |
| 4239 #endif |
| 4240 |
| 4241 // avoid gcc strict aliasing error using union cast |
| 4242 union { |
| 4243 double dval; |
| 4244 #if V8_TARGET_ARCH_PPC64 |
| 4245 intptr_t ival; |
| 4246 #else |
| 4247 intptr_t ival[2]; |
| 4248 #endif |
| 4249 } litVal; |
| 4250 |
| 4251 litVal.dval = value; |
| 4252 |
| 4253 #if V8_TARGET_ARCH_PPC64 |
| 4254 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { |
| 4255 mov(scratch, Operand(litVal.ival)); |
| 4256 mtfprd(result, scratch); |
| 4257 return; |
| 4258 } |
| 4259 #endif |
| 4260 |
| 4261 addi(sp, sp, Operand(-kDoubleSize)); |
| 4262 #if V8_TARGET_ARCH_PPC64 |
| 4263 mov(scratch, Operand(litVal.ival)); |
| 4264 std(scratch, MemOperand(sp)); |
| 4265 #else |
| 4266 LoadIntLiteral(scratch, litVal.ival[0]); |
| 4267 stw(scratch, MemOperand(sp, 0)); |
| 4268 LoadIntLiteral(scratch, litVal.ival[1]); |
| 4269 stw(scratch, MemOperand(sp, 4)); |
| 4270 #endif |
| 4271 nop(); // LHS/RAW optimization |
| 4272 lfd(result, MemOperand(sp, 0)); |
| 4273 addi(sp, sp, Operand(kDoubleSize)); |
| 4274 } |
| 4275 |
| 4276 |
| 4277 void MacroAssembler::MovIntToDouble(DoubleRegister dst, |
| 4278 Register src, |
| 4279 Register scratch) { |
| 4280 // sign-extend src to 64-bit |
| 4281 #if V8_TARGET_ARCH_PPC64 |
| 4282 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { |
| 4283 mtfprwa(dst, src); |
| 4284 return; |
| 4285 } |
| 4286 #endif |
| 4287 |
| 4288 DCHECK(!src.is(scratch)); |
| 4289 subi(sp, sp, Operand(kDoubleSize)); |
| 4290 #if V8_TARGET_ARCH_PPC64 |
| 4291 extsw(scratch, src); |
| 4292 std(scratch, MemOperand(sp, 0)); |
| 4293 #else |
| 4294 srawi(scratch, src, 31); |
| 4295 stw(scratch, MemOperand(sp, Register::kExponentOffset)); |
| 4296 stw(src, MemOperand(sp, Register::kMantissaOffset)); |
| 4297 #endif |
| 4298 nop(); // LHS/RAW optimization |
| 4299 lfd(dst, MemOperand(sp, 0)); |
| 4300 addi(sp, sp, Operand(kDoubleSize)); |
| 4301 } |
| 4302 |
| 4303 |
| 4304 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, |
| 4305 Register src, |
| 4306 Register scratch) { |
| 4307 // zero-extend src to 64-bit |
| 4308 #if V8_TARGET_ARCH_PPC64 |
| 4309 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { |
| 4310 mtfprwz(dst, src); |
| 4311 return; |
| 4312 } |
| 4313 #endif |
| 4314 |
| 4315 DCHECK(!src.is(scratch)); |
| 4316 subi(sp, sp, Operand(kDoubleSize)); |
| 4317 #if V8_TARGET_ARCH_PPC64 |
| 4318 clrldi(scratch, src, Operand(32)); |
| 4319 std(scratch, MemOperand(sp, 0)); |
| 4320 #else |
| 4321 li(scratch, Operand::Zero()); |
| 4322 stw(scratch, MemOperand(sp, Register::kExponentOffset)); |
| 4323 stw(src, MemOperand(sp, Register::kMantissaOffset)); |
| 4324 #endif |
| 4325 nop(); // LHS/RAW optimization |
| 4326 lfd(dst, MemOperand(sp, 0)); |
| 4327 addi(sp, sp, Operand(kDoubleSize)); |
| 4328 } |
| 4329 |
| 4330 |
| 4331 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, |
| 4332 #if !V8_TARGET_ARCH_PPC64 |
| 4333 Register src_hi, |
| 4334 #endif |
| 4335 Register src) { |
| 4336 #if V8_TARGET_ARCH_PPC64 |
| 4337 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { |
| 4338 mtfprd(dst, src); |
| 4339 return; |
| 4340 } |
| 4341 #endif |
| 4342 |
| 4343 subi(sp, sp, Operand(kDoubleSize)); |
| 4344 #if V8_TARGET_ARCH_PPC64 |
| 4345 std(src, MemOperand(sp, 0)); |
| 4346 #else |
| 4347 stw(src_hi, MemOperand(sp, Register::kExponentOffset)); |
| 4348 stw(src, MemOperand(sp, Register::kMantissaOffset)); |
| 4349 #endif |
| 4350 nop(); // LHS/RAW optimization |
| 4351 lfd(dst, MemOperand(sp, 0)); |
| 4352 addi(sp, sp, Operand(kDoubleSize)); |
| 4353 } |
| 4354 |
| 4355 |
| 4356 #if V8_TARGET_ARCH_PPC64 |
| 4357 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, |
| 4358 Register src_hi, |
| 4359 Register src_lo, |
| 4360 Register scratch) { |
| 4361 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { |
| 4362 sldi(scratch, src_hi, Operand(32)); |
| 4363 rldimi(scratch, src_lo, 0, 32); |
| 4364 mtfprd(dst, scratch); |
| 4365 return; |
| 4366 } |
| 4367 |
| 4368 subi(sp, sp, Operand(kDoubleSize)); |
| 4369 stw(src_hi, MemOperand(sp, Register::kExponentOffset)); |
| 4370 stw(src_lo, MemOperand(sp, Register::kMantissaOffset)); |
| 4371 nop(); // LHS/RAW optimization |
| 4372 lfd(dst, MemOperand(sp)); |
| 4373 addi(sp, sp, Operand(kDoubleSize)); |
| 4374 } |
| 4375 #endif |
| 4376 |
| 4377 |
| 4378 void MacroAssembler::MovDoubleLowToInt(Register dst, |
| 4379 DoubleRegister src) { |
| 4380 #if V8_TARGET_ARCH_PPC64 |
| 4381 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { |
| 4382 mffprwz(dst, src); |
| 4383 return; |
| 4384 } |
| 4385 #endif |
| 4386 |
| 4387 subi(sp, sp, Operand(kDoubleSize)); |
| 4388 stfd(src, MemOperand(sp)); |
| 4389 nop(); // LHS/RAW optimization |
| 4390 lwz(dst, MemOperand(sp, Register::kMantissaOffset)); |
| 4391 addi(sp, sp, Operand(kDoubleSize)); |
| 4392 } |
| 4393 |
| 4394 |
| 4395 void MacroAssembler::MovDoubleHighToInt(Register dst, |
| 4396 DoubleRegister src) { |
| 4397 #if V8_TARGET_ARCH_PPC64 |
| 4398 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { |
| 4399 mffprd(dst, src); |
| 4400 srdi(dst, dst, Operand(32)); |
| 4401 return; |
| 4402 } |
| 4403 #endif |
| 4404 |
| 4405 subi(sp, sp, Operand(kDoubleSize)); |
| 4406 stfd(src, MemOperand(sp)); |
| 4407 nop(); // LHS/RAW optimization |
| 4408 lwz(dst, MemOperand(sp, Register::kExponentOffset)); |
| 4409 addi(sp, sp, Operand(kDoubleSize)); |
| 4410 } |
| 4411 |
| 4412 |
| 4413 void MacroAssembler::MovDoubleToInt64( |
| 4414 #if !V8_TARGET_ARCH_PPC64 |
| 4415 Register dst_hi, |
| 4416 #endif |
| 4417 Register dst, |
| 4418 DoubleRegister src) { |
| 4419 #if V8_TARGET_ARCH_PPC64 |
| 4420 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { |
| 4421 mffprd(dst, src); |
| 4422 return; |
| 4423 } |
| 4424 #endif |
| 4425 |
| 4426 subi(sp, sp, Operand(kDoubleSize)); |
| 4427 stfd(src, MemOperand(sp)); |
| 4428 nop(); // LHS/RAW optimization |
| 4429 #if V8_TARGET_ARCH_PPC64 |
| 4430 ld(dst, MemOperand(sp, 0)); |
| 4431 #else |
| 4432 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset)); |
| 4433 lwz(dst, MemOperand(sp, Register::kMantissaOffset)); |
| 4434 #endif |
| 4435 addi(sp, sp, Operand(kDoubleSize)); |
| 4436 } |
| 4437 |
| 4438 |
| 4439 void MacroAssembler::Add(Register dst, Register src, |
| 4440 intptr_t value, Register scratch) { |
| 4441 if (is_int16(value)) { |
| 4442 addi(dst, src, Operand(value)); |
| 4443 } else { |
| 4444 mov(scratch, Operand(value)); |
| 4445 add(dst, src, scratch); |
| 4446 } |
| 4447 } |
| 4448 |
| 4449 |
| 4450 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch, |
| 4451 CRegister cr) { |
| 4452 intptr_t value = src2.immediate(); |
| 4453 if (is_int16(value)) { |
| 4454 cmpi(src1, src2, cr); |
| 4455 } else { |
| 4456 mov(scratch, src2); |
| 4457 cmp(src1, scratch, cr); |
| 4458 } |
| 4459 } |
| 4460 |
| 4461 |
| 4462 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch, |
| 4463 CRegister cr) { |
| 4464 intptr_t value = src2.immediate(); |
| 4465 if (is_uint16(value)) { |
| 4466 cmpli(src1, src2, cr); |
| 4467 } else { |
| 4468 mov(scratch, src2); |
| 4469 cmpl(src1, scratch, cr); |
| 4470 } |
| 4471 } |
| 4472 |
| 4473 |
| 4474 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, |
| 4475 Register scratch, CRegister cr) { |
| 4476 intptr_t value = src2.immediate(); |
| 4477 if (is_int16(value)) { |
| 4478 cmpwi(src1, src2, cr); |
| 4479 } else { |
| 4480 mov(scratch, src2); |
| 4481 cmpw(src1, scratch, cr); |
| 4482 } |
| 4483 } |
| 4484 |
| 4485 |
| 4486 void MacroAssembler::Cmplwi(Register src1, const Operand& src2, |
| 4487 Register scratch, CRegister cr) { |
| 4488 intptr_t value = src2.immediate(); |
| 4489 if (is_uint16(value)) { |
| 4490 cmplwi(src1, src2, cr); |
| 4491 } else { |
| 4492 mov(scratch, src2); |
| 4493 cmplw(src1, scratch, cr); |
| 4494 } |
| 4495 } |
| 4496 |
| 4497 |
| 4498 void MacroAssembler::And(Register ra, Register rs, const Operand& rb, |
| 4499 RCBit rc) { |
| 4500 if (rb.is_reg()) { |
| 4501 and_(ra, rs, rb.rm(), rc); |
| 4502 } else { |
| 4503 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) { |
| 4504 andi(ra, rs, rb); |
| 4505 } else { |
| 4506 // mov handles the relocation. |
| 4507 DCHECK(!rs.is(r0)); |
| 4508 mov(r0, rb); |
| 4509 and_(ra, rs, r0, rc); |
| 4510 } |
| 4511 } |
| 4512 } |
| 4513 |
| 4514 |
| 4515 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) { |
| 4516 if (rb.is_reg()) { |
| 4517 orx(ra, rs, rb.rm(), rc); |
| 4518 } else { |
| 4519 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) { |
| 4520 ori(ra, rs, rb); |
| 4521 } else { |
| 4522 // mov handles the relocation. |
| 4523 DCHECK(!rs.is(r0)); |
| 4524 mov(r0, rb); |
| 4525 orx(ra, rs, r0, rc); |
| 4526 } |
| 4527 } |
| 4528 } |
| 4529 |
| 4530 |
| 4531 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb, |
| 4532 RCBit rc) { |
| 4533 if (rb.is_reg()) { |
| 4534 xor_(ra, rs, rb.rm(), rc); |
| 4535 } else { |
| 4536 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) { |
| 4537 xori(ra, rs, rb); |
| 4538 } else { |
| 4539 // mov handles the relocation. |
| 4540 DCHECK(!rs.is(r0)); |
| 4541 mov(r0, rb); |
| 4542 xor_(ra, rs, r0, rc); |
| 4543 } |
| 4544 } |
| 4545 } |
| 4546 |
| 4547 |
| 4548 void MacroAssembler::CmpSmiLiteral(Register src1, Smi *smi, Register scratch, |
| 4549 CRegister cr) { |
| 4550 #if V8_TARGET_ARCH_PPC64 |
| 4551 LoadSmiLiteral(scratch, smi); |
| 4552 cmp(src1, scratch, cr); |
| 4553 #else |
| 4554 Cmpi(src1, Operand(smi), scratch, cr); |
| 4555 #endif |
| 4556 } |
| 4557 |
| 4558 |
| 4559 void MacroAssembler::CmplSmiLiteral(Register src1, Smi *smi, Register scratch, |
| 4560 CRegister cr) { |
| 4561 #if V8_TARGET_ARCH_PPC64 |
| 4562 LoadSmiLiteral(scratch, smi); |
| 4563 cmpl(src1, scratch, cr); |
| 4564 #else |
| 4565 Cmpli(src1, Operand(smi), scratch, cr); |
| 4566 #endif |
| 4567 } |
| 4568 |
| 4569 |
| 4570 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi *smi, |
| 4571 Register scratch) { |
| 4572 #if V8_TARGET_ARCH_PPC64 |
| 4573 LoadSmiLiteral(scratch, smi); |
| 4574 add(dst, src, scratch); |
| 4575 #else |
| 4576 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch); |
| 4577 #endif |
| 4578 } |
| 4579 |
| 4580 |
| 4581 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi *smi, |
| 4582 Register scratch) { |
| 4583 #if V8_TARGET_ARCH_PPC64 |
| 4584 LoadSmiLiteral(scratch, smi); |
| 4585 sub(dst, src, scratch); |
| 4586 #else |
| 4587 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch); |
| 4588 #endif |
| 4589 } |
| 4590 |
| 4591 |
| 4592 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi *smi, |
| 4593 Register scratch, RCBit rc) { |
| 4594 #if V8_TARGET_ARCH_PPC64 |
| 4595 LoadSmiLiteral(scratch, smi); |
| 4596 and_(dst, src, scratch, rc); |
| 4597 #else |
| 4598 And(dst, src, Operand(smi), rc); |
| 4599 #endif |
| 4600 } |
| 4601 |
| 4602 |
| 4603 // Load a "pointer" sized value from the memory location |
| 4604 void MacroAssembler::LoadP(Register dst, const MemOperand& mem, |
| 4605 Register scratch) { |
| 4606 int offset = mem.offset(); |
| 4607 |
| 4608 if (!scratch.is(no_reg) && !is_int16(offset)) { |
| 4609 /* cannot use d-form */ |
| 4610 LoadIntLiteral(scratch, offset); |
| 4611 #if V8_TARGET_ARCH_PPC64 |
| 4612 ldx(dst, MemOperand(mem.ra(), scratch)); |
| 4613 #else |
| 4614 lwzx(dst, MemOperand(mem.ra(), scratch)); |
| 4615 #endif |
| 4616 } else { |
| 4617 #if V8_TARGET_ARCH_PPC64 |
| 4618 int misaligned = (offset & 3); |
| 4619 if (misaligned) { |
| 4620 // adjust base to conform to offset alignment requirements |
| 4621 // Todo: enhance to use scratch if dst is unsuitable |
| 4622 DCHECK(!dst.is(r0)); |
| 4623 addi(dst, mem.ra(), Operand((offset & 3) - 4)); |
| 4624 ld(dst, MemOperand(dst, (offset & ~3) + 4)); |
| 4625 } else { |
| 4626 ld(dst, mem); |
| 4627 } |
| 4628 #else |
| 4629 lwz(dst, mem); |
| 4630 #endif |
| 4631 } |
| 4632 } |
| 4633 |
| 4634 |
| 4635 // Store a "pointer" sized value to the memory location |
| 4636 void MacroAssembler::StoreP(Register src, const MemOperand& mem, |
| 4637 Register scratch) { |
| 4638 int offset = mem.offset(); |
| 4639 |
| 4640 if (!scratch.is(no_reg) && !is_int16(offset)) { |
| 4641 /* cannot use d-form */ |
| 4642 LoadIntLiteral(scratch, offset); |
| 4643 #if V8_TARGET_ARCH_PPC64 |
| 4644 stdx(src, MemOperand(mem.ra(), scratch)); |
| 4645 #else |
| 4646 stwx(src, MemOperand(mem.ra(), scratch)); |
| 4647 #endif |
| 4648 } else { |
| 4649 #if V8_TARGET_ARCH_PPC64 |
| 4650 int misaligned = (offset & 3); |
| 4651 if (misaligned) { |
| 4652 // adjust base to conform to offset alignment requirements |
| 4653 // a suitable scratch is required here |
| 4654 DCHECK(!scratch.is(no_reg)); |
| 4655 if (scratch.is(r0)) { |
| 4656 LoadIntLiteral(scratch, offset); |
| 4657 stdx(src, MemOperand(mem.ra(), scratch)); |
| 4658 } else { |
| 4659 addi(scratch, mem.ra(), Operand((offset & 3) - 4)); |
| 4660 std(src, MemOperand(scratch, (offset & ~3) + 4)); |
| 4661 } |
| 4662 } else { |
| 4663 std(src, mem); |
| 4664 } |
| 4665 #else |
| 4666 stw(src, mem); |
| 4667 #endif |
| 4668 } |
| 4669 } |
| 4670 |
| 4671 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem, |
| 4672 Register scratch) { |
| 4673 int offset = mem.offset(); |
| 4674 |
| 4675 if (!scratch.is(no_reg) && !is_int16(offset)) { |
| 4676 /* cannot use d-form */ |
| 4677 LoadIntLiteral(scratch, offset); |
| 4678 #if V8_TARGET_ARCH_PPC64 |
| 4679 // lwax(dst, MemOperand(mem.ra(), scratch)); |
| 4680 DCHECK(0); // lwax not yet implemented |
| 4681 #else |
| 4682 lwzx(dst, MemOperand(mem.ra(), scratch)); |
| 4683 #endif |
| 4684 } else { |
| 4685 #if V8_TARGET_ARCH_PPC64 |
| 4686 int misaligned = (offset & 3); |
| 4687 if (misaligned) { |
| 4688 // adjust base to conform to offset alignment requirements |
| 4689 // Todo: enhance to use scratch if dst is unsuitable |
| 4690 DCHECK(!dst.is(r0)); |
| 4691 addi(dst, mem.ra(), Operand((offset & 3) - 4)); |
| 4692 lwa(dst, MemOperand(dst, (offset & ~3) + 4)); |
| 4693 } else { |
| 4694 lwa(dst, mem); |
| 4695 } |
| 4696 #else |
| 4697 lwz(dst, mem); |
| 4698 #endif |
| 4699 } |
| 4700 } |
| 4701 |
| 4702 |
| 4703 // Variable length depending on whether offset fits into immediate field |
| 4704 // MemOperand currently only supports d-form |
| 4705 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem, |
| 4706 Register scratch, bool updateForm) { |
| 4707 Register base = mem.ra(); |
| 4708 int offset = mem.offset(); |
| 4709 |
| 4710 bool use_dform = true; |
| 4711 if (!is_int16(offset)) { |
| 4712 use_dform = false; |
| 4713 LoadIntLiteral(scratch, offset); |
| 4714 } |
| 4715 |
| 4716 if (!updateForm) { |
| 4717 if (use_dform) { |
| 4718 lwz(dst, mem); |
| 4719 } else { |
| 4720 lwzx(dst, MemOperand(base, scratch)); |
| 4721 } |
| 4722 } else { |
| 4723 if (use_dform) { |
| 4724 lwzu(dst, mem); |
| 4725 } else { |
| 4726 lwzux(dst, MemOperand(base, scratch)); |
| 4727 } |
| 4728 } |
| 4729 } |
| 4730 |
| 4731 |
| 4732 // Variable length depending on whether offset fits into immediate field |
| 4733 // MemOperand current only supports d-form |
| 4734 void MacroAssembler::StoreWord(Register src, const MemOperand& mem, |
| 4735 Register scratch, bool updateForm) { |
| 4736 Register base = mem.ra(); |
| 4737 int offset = mem.offset(); |
| 4738 |
| 4739 bool use_dform = true; |
| 4740 if (!is_int16(offset)) { |
| 4741 use_dform = false; |
| 4742 LoadIntLiteral(scratch, offset); |
| 4743 } |
| 4744 |
| 4745 if (!updateForm) { |
| 4746 if (use_dform) { |
| 4747 stw(src, mem); |
| 4748 } else { |
| 4749 stwx(src, MemOperand(base, scratch)); |
| 4750 } |
| 4751 } else { |
| 4752 if (use_dform) { |
| 4753 stwu(src, mem); |
| 4754 } else { |
| 4755 stwux(src, MemOperand(base, scratch)); |
| 4756 } |
| 4757 } |
| 4758 } |
| 4759 |
| 4760 |
| 4761 // Variable length depending on whether offset fits into immediate field |
| 4762 // MemOperand currently only supports d-form |
| 4763 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem, |
| 4764 Register scratch, bool updateForm) { |
| 4765 Register base = mem.ra(); |
| 4766 int offset = mem.offset(); |
| 4767 |
| 4768 bool use_dform = true; |
| 4769 if (!is_int16(offset)) { |
| 4770 use_dform = false; |
| 4771 LoadIntLiteral(scratch, offset); |
| 4772 } |
| 4773 |
| 4774 if (!updateForm) { |
| 4775 if (use_dform) { |
| 4776 lhz(dst, mem); |
| 4777 } else { |
| 4778 lhzx(dst, MemOperand(base, scratch)); |
| 4779 } |
| 4780 } else { |
| 4781 // If updateForm is ever true, then lhzu will |
| 4782 // need to be implemented |
| 4783 assert(0); |
| 4784 #if 0 // LoadHalfWord w\ update not yet needed |
| 4785 if (use_dform) { |
| 4786 lhzu(dst, mem); |
| 4787 } else { |
| 4788 lhzux(dst, MemOperand(base, scratch)); |
| 4789 } |
| 4790 #endif |
| 4791 } |
| 4792 } |
| 4793 |
| 4794 |
| 4795 // Variable length depending on whether offset fits into immediate field |
| 4796 // MemOperand current only supports d-form |
| 4797 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem, |
| 4798 Register scratch, bool updateForm) { |
| 4799 Register base = mem.ra(); |
| 4800 int offset = mem.offset(); |
| 4801 |
| 4802 bool use_dform = true; |
| 4803 if (!is_int16(offset)) { |
| 4804 use_dform = false; |
| 4805 LoadIntLiteral(scratch, offset); |
| 4806 } |
| 4807 |
| 4808 if (!updateForm) { |
| 4809 if (use_dform) { |
| 4810 sth(src, mem); |
| 4811 } else { |
| 4812 sthx(src, MemOperand(base, scratch)); |
| 4813 } |
| 4814 } else { |
| 4815 // If updateForm is ever true, then sthu will |
| 4816 // need to be implemented |
| 4817 assert(0); |
| 4818 #if 0 // StoreHalfWord w\ update not yet needed |
| 4819 if (use_dform) { |
| 4820 sthu(src, mem); |
| 4821 } else { |
| 4822 sthux(src, MemOperand(base, scratch)); |
| 4823 } |
| 4824 #endif |
| 4825 } |
| 4826 } |
| 4827 |
| 4828 |
| 4829 // Variable length depending on whether offset fits into immediate field |
| 4830 // MemOperand currently only supports d-form |
| 4831 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem, |
| 4832 Register scratch, bool updateForm) { |
| 4833 Register base = mem.ra(); |
| 4834 int offset = mem.offset(); |
| 4835 |
| 4836 bool use_dform = true; |
| 4837 if (!is_int16(offset)) { |
| 4838 use_dform = false; |
| 4839 LoadIntLiteral(scratch, offset); |
| 4840 } |
| 4841 |
| 4842 if (!updateForm) { |
| 4843 if (use_dform) { |
| 4844 lbz(dst, mem); |
| 4845 } else { |
| 4846 lbzx(dst, MemOperand(base, scratch)); |
| 4847 } |
| 4848 } else { |
| 4849 // If updateForm is ever true, then lbzu will |
| 4850 // need to be implemented |
| 4851 assert(0); |
| 4852 #if 0 // LoadByte w\ update not yet needed |
| 4853 if (use_dform) { |
| 4854 lbzu(dst, mem); |
| 4855 } else { |
| 4856 lbzux(dst, MemOperand(base, scratch)); |
| 4857 } |
| 4858 #endif |
| 4859 } |
| 4860 } |
| 4861 |
| 4862 |
| 4863 // Variable length depending on whether offset fits into immediate field |
| 4864 // MemOperand current only supports d-form |
| 4865 void MacroAssembler::StoreByte(Register src, const MemOperand& mem, |
| 4866 Register scratch, bool updateForm) { |
| 4867 Register base = mem.ra(); |
| 4868 int offset = mem.offset(); |
| 4869 |
| 4870 bool use_dform = true; |
| 4871 if (!is_int16(offset)) { |
| 4872 use_dform = false; |
| 4873 LoadIntLiteral(scratch, offset); |
| 4874 } |
| 4875 |
| 4876 if (!updateForm) { |
| 4877 if (use_dform) { |
| 4878 stb(src, mem); |
| 4879 } else { |
| 4880 stbx(src, MemOperand(base, scratch)); |
| 4881 } |
| 4882 } else { |
| 4883 // If updateForm is ever true, then stbu will |
| 4884 // need to be implemented |
| 4885 assert(0); |
| 4886 #if 0 // StoreByte w\ update not yet needed |
| 4887 if (use_dform) { |
| 4888 stbu(src, mem); |
| 4889 } else { |
| 4890 stbux(src, MemOperand(base, scratch)); |
| 4891 } |
| 4892 #endif |
| 4893 } |
| 4894 } |
| 4895 |
| 4896 |
| 4897 void MacroAssembler::LoadRepresentation(Register dst, |
| 4898 const MemOperand& mem, |
| 4899 Representation r, |
| 4900 Register scratch) { |
| 4901 DCHECK(!r.IsDouble()); |
| 4902 if (r.IsInteger8()) { |
| 4903 LoadByte(dst, mem, scratch); |
| 4904 extsb(dst, dst); |
| 4905 } else if (r.IsUInteger8()) { |
| 4906 LoadByte(dst, mem, scratch); |
| 4907 } else if (r.IsInteger16()) { |
| 4908 LoadHalfWord(dst, mem, scratch); |
| 4909 extsh(dst, dst); |
| 4910 } else if (r.IsUInteger16()) { |
| 4911 LoadHalfWord(dst, mem, scratch); |
| 4912 #if V8_TARGET_ARCH_PPC64 |
| 4913 } else if (r.IsInteger32()) { |
| 4914 LoadWord(dst, mem, scratch); |
| 4915 #endif |
| 4916 } else { |
| 4917 LoadP(dst, mem, scratch); |
| 4918 } |
| 4919 } |
| 4920 |
| 4921 |
| 4922 void MacroAssembler::StoreRepresentation(Register src, |
| 4923 const MemOperand& mem, |
| 4924 Representation r, |
| 4925 Register scratch) { |
| 4926 DCHECK(!r.IsDouble()); |
| 4927 if (r.IsInteger8() || r.IsUInteger8()) { |
| 4928 StoreByte(src, mem, scratch); |
| 4929 } else if (r.IsInteger16() || r.IsUInteger16()) { |
| 4930 StoreHalfWord(src, mem, scratch); |
| 4931 #if V8_TARGET_ARCH_PPC64 |
| 4932 } else if (r.IsInteger32()) { |
| 4933 StoreWord(src, mem, scratch); |
| 4934 #endif |
| 4935 } else { |
| 4936 if (r.IsHeapObject()) { |
| 4937 AssertNotSmi(src); |
| 4938 } else if (r.IsSmi()) { |
| 4939 AssertSmi(src); |
| 4940 } |
| 4941 StoreP(src, mem, scratch); |
| 4942 } |
| 4943 } |
| 4944 |
| 4945 |
3946 void MacroAssembler::TestJSArrayForAllocationMemento( | 4946 void MacroAssembler::TestJSArrayForAllocationMemento( |
3947 Register receiver_reg, | 4947 Register receiver_reg, |
3948 Register scratch_reg, | 4948 Register scratch_reg, |
3949 Label* no_memento_found) { | 4949 Label* no_memento_found) { |
3950 ExternalReference new_space_start = | 4950 ExternalReference new_space_start = |
3951 ExternalReference::new_space_start(isolate()); | 4951 ExternalReference::new_space_start(isolate()); |
3952 ExternalReference new_space_allocation_top = | 4952 ExternalReference new_space_allocation_top = |
3953 ExternalReference::new_space_allocation_top_address(isolate()); | 4953 ExternalReference::new_space_allocation_top_address(isolate()); |
3954 add(scratch_reg, receiver_reg, | 4954 addi(scratch_reg, receiver_reg, |
3955 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); | 4955 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); |
3956 cmp(scratch_reg, Operand(new_space_start)); | 4956 Cmpi(scratch_reg, Operand(new_space_start), r0); |
3957 b(lt, no_memento_found); | 4957 blt(no_memento_found); |
3958 mov(ip, Operand(new_space_allocation_top)); | 4958 mov(ip, Operand(new_space_allocation_top)); |
3959 ldr(ip, MemOperand(ip)); | 4959 LoadP(ip, MemOperand(ip)); |
3960 cmp(scratch_reg, ip); | 4960 cmp(scratch_reg, ip); |
3961 b(gt, no_memento_found); | 4961 bgt(no_memento_found); |
3962 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); | 4962 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); |
3963 cmp(scratch_reg, | 4963 Cmpi(scratch_reg, |
3964 Operand(isolate()->factory()->allocation_memento_map())); | 4964 Operand(isolate()->factory()->allocation_memento_map()), r0); |
3965 } | 4965 } |
3966 | 4966 |
3967 | 4967 |
3968 Register GetRegisterThatIsNotOneOf(Register reg1, | 4968 Register GetRegisterThatIsNotOneOf(Register reg1, |
3969 Register reg2, | 4969 Register reg2, |
3970 Register reg3, | 4970 Register reg3, |
3971 Register reg4, | 4971 Register reg4, |
3972 Register reg5, | 4972 Register reg5, |
3973 Register reg6) { | 4973 Register reg6) { |
3974 RegList regs = 0; | 4974 RegList regs = 0; |
(...skipping 18 matching lines...) Expand all Loading... |
3993 Register object, | 4993 Register object, |
3994 Register scratch0, | 4994 Register scratch0, |
3995 Register scratch1, | 4995 Register scratch1, |
3996 Label* found) { | 4996 Label* found) { |
3997 DCHECK(!scratch1.is(scratch0)); | 4997 DCHECK(!scratch1.is(scratch0)); |
3998 Factory* factory = isolate()->factory(); | 4998 Factory* factory = isolate()->factory(); |
3999 Register current = scratch0; | 4999 Register current = scratch0; |
4000 Label loop_again; | 5000 Label loop_again; |
4001 | 5001 |
4002 // scratch contained elements pointer. | 5002 // scratch contained elements pointer. |
4003 mov(current, object); | 5003 mr(current, object); |
4004 | 5004 |
4005 // Loop based on the map going up the prototype chain. | 5005 // Loop based on the map going up the prototype chain. |
4006 bind(&loop_again); | 5006 bind(&loop_again); |
4007 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); | 5007 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset)); |
4008 ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); | 5008 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); |
4009 DecodeField<Map::ElementsKindBits>(scratch1); | 5009 DecodeField<Map::ElementsKindBits>(scratch1); |
4010 cmp(scratch1, Operand(DICTIONARY_ELEMENTS)); | 5010 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS)); |
4011 b(eq, found); | 5011 beq(found); |
4012 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); | 5012 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset)); |
4013 cmp(current, Operand(factory->null_value())); | 5013 Cmpi(current, Operand(factory->null_value()), r0); |
4014 b(ne, &loop_again); | 5014 bne(&loop_again); |
4015 } | 5015 } |
4016 | 5016 |
4017 | 5017 |
4018 #ifdef DEBUG | 5018 #ifdef DEBUG |
4019 bool AreAliased(Register reg1, | 5019 bool AreAliased(Register reg1, |
4020 Register reg2, | 5020 Register reg2, |
4021 Register reg3, | 5021 Register reg3, |
4022 Register reg4, | 5022 Register reg4, |
4023 Register reg5, | 5023 Register reg5, |
4024 Register reg6, | 5024 Register reg6, |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4068 DCHECK(masm_.pc_ == address_ + size_); | 5068 DCHECK(masm_.pc_ == address_ + size_); |
4069 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 5069 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
4070 } | 5070 } |
4071 | 5071 |
4072 | 5072 |
4073 void CodePatcher::Emit(Instr instr) { | 5073 void CodePatcher::Emit(Instr instr) { |
4074 masm()->emit(instr); | 5074 masm()->emit(instr); |
4075 } | 5075 } |
4076 | 5076 |
4077 | 5077 |
4078 void CodePatcher::Emit(Address addr) { | |
4079 masm()->emit(reinterpret_cast<Instr>(addr)); | |
4080 } | |
4081 | |
4082 | |
4083 void CodePatcher::EmitCondition(Condition cond) { | 5078 void CodePatcher::EmitCondition(Condition cond) { |
4084 Instr instr = Assembler::instr_at(masm_.pc_); | 5079 Instr instr = Assembler::instr_at(masm_.pc_); |
4085 instr = (instr & ~kCondMask) | cond; | 5080 switch (cond) { |
| 5081 case eq: |
| 5082 instr = (instr & ~kCondMask) | BT; |
| 5083 break; |
| 5084 case ne: |
| 5085 instr = (instr & ~kCondMask) | BF; |
| 5086 break; |
| 5087 default: |
| 5088 UNIMPLEMENTED(); |
| 5089 } |
4086 masm_.emit(instr); | 5090 masm_.emit(instr); |
4087 } | 5091 } |
4088 | 5092 |
4089 | 5093 |
4090 void MacroAssembler::TruncatingDiv(Register result, | 5094 void MacroAssembler::TruncatingDiv(Register result, |
4091 Register dividend, | 5095 Register dividend, |
4092 int32_t divisor) { | 5096 int32_t divisor) { |
4093 DCHECK(!dividend.is(result)); | 5097 DCHECK(!dividend.is(result)); |
4094 DCHECK(!dividend.is(ip)); | 5098 DCHECK(!dividend.is(r0)); |
4095 DCHECK(!result.is(ip)); | 5099 DCHECK(!result.is(r0)); |
4096 MultiplierAndShift ms(divisor); | 5100 MultiplierAndShift ms(divisor); |
4097 mov(ip, Operand(ms.multiplier())); | 5101 mov(r0, Operand(ms.multiplier())); |
4098 smull(ip, result, dividend, ip); | 5102 mulhw(result, dividend, r0); |
4099 if (divisor > 0 && ms.multiplier() < 0) { | 5103 if (divisor > 0 && ms.multiplier() < 0) { |
4100 add(result, result, Operand(dividend)); | 5104 add(result, result, dividend); |
4101 } | 5105 } |
4102 if (divisor < 0 && ms.multiplier() > 0) { | 5106 if (divisor < 0 && ms.multiplier() > 0) { |
4103 sub(result, result, Operand(dividend)); | 5107 sub(result, result, dividend); |
4104 } | 5108 } |
4105 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); | 5109 if (ms.shift() > 0) srawi(result, result, ms.shift()); |
4106 add(result, result, Operand(dividend, LSR, 31)); | 5110 ExtractBit(r0, dividend, 31); |
| 5111 add(result, result, r0); |
4107 } | 5112 } |
4108 | 5113 |
4109 | 5114 |
4110 } } // namespace v8::internal | 5115 } } // namespace v8::internal |
4111 | 5116 |
4112 #endif // V8_TARGET_ARCH_ARM | 5117 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |