OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // |
| 3 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 4 // |
2 // Use of this source code is governed by a BSD-style license that can be | 5 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 6 // found in the LICENSE file. |
4 | 7 |
| 8 #include <assert.h> // For assert |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 9 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 10 |
7 #include "src/v8.h" | 11 #include "src/v8.h" |
8 | 12 |
9 #if V8_TARGET_ARCH_ARM | 13 #if V8_TARGET_ARCH_PPC |
10 | 14 |
11 #include "src/bootstrapper.h" | 15 #include "src/bootstrapper.h" |
12 #include "src/codegen.h" | 16 #include "src/codegen.h" |
13 #include "src/cpu-profiler.h" | 17 #include "src/cpu-profiler.h" |
14 #include "src/debug.h" | 18 #include "src/debug.h" |
15 #include "src/isolate-inl.h" | 19 #include "src/isolate-inl.h" |
16 #include "src/runtime.h" | 20 #include "src/runtime.h" |
17 | 21 |
18 namespace v8 { | 22 namespace v8 { |
19 namespace internal { | 23 namespace internal { |
20 | 24 |
21 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) | 25 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) |
22 : Assembler(arg_isolate, buffer, size), | 26 : Assembler(arg_isolate, buffer, size), |
23 generating_stub_(false), | 27 generating_stub_(false), |
24 has_frame_(false) { | 28 has_frame_(false) { |
25 if (isolate() != NULL) { | 29 if (isolate() != NULL) { |
26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 30 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
27 isolate()); | 31 isolate()); |
28 } | 32 } |
29 } | 33 } |
30 | 34 |
31 | 35 |
32 void MacroAssembler::Jump(Register target, Condition cond) { | 36 void MacroAssembler::Jump(Register target, Condition cond) { |
33 bx(target, cond); | 37 ASSERT(cond == al); |
| 38 mtctr(target); |
| 39 bctr(); |
34 } | 40 } |
35 | 41 |
36 | 42 |
37 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, | 43 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, |
38 Condition cond) { | 44 Condition cond, CRegister cr) { |
39 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 45 Label skip; |
40 mov(pc, Operand(target, rmode), LeaveCC, cond); | 46 |
| 47 if (cond != al) b(NegateCondition(cond), &skip, cr); |
| 48 |
| 49 ASSERT(rmode == RelocInfo::CODE_TARGET || |
| 50 rmode == RelocInfo::RUNTIME_ENTRY); |
| 51 |
| 52 mov(r0, Operand(target, rmode)); |
| 53 mtctr(r0); |
| 54 bctr(); |
| 55 |
| 56 bind(&skip); |
| 57 // mov(pc, Operand(target, rmode), LeaveCC, cond); |
41 } | 58 } |
42 | 59 |
43 | 60 |
44 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, | 61 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, |
45 Condition cond) { | 62 Condition cond, CRegister cr) { |
46 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | 63 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
47 Jump(reinterpret_cast<intptr_t>(target), rmode, cond); | 64 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr); |
48 } | 65 } |
49 | 66 |
50 | 67 |
51 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, | 68 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, |
52 Condition cond) { | 69 Condition cond) { |
53 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 70 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
54 // 'code' is always generated ARM code, never THUMB code | 71 // 'code' is always generated ppc code, never THUMB code |
55 AllowDeferredHandleDereference embedding_raw_address; | 72 AllowDeferredHandleDereference embedding_raw_address; |
56 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); | 73 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); |
57 } | 74 } |
58 | 75 |
59 | 76 |
60 int MacroAssembler::CallSize(Register target, Condition cond) { | 77 int MacroAssembler::CallSize(Register target, Condition cond) { |
61 return kInstrSize; | 78 return 2 * kInstrSize; |
62 } | 79 } |
63 | 80 |
64 | 81 |
65 void MacroAssembler::Call(Register target, Condition cond) { | 82 void MacroAssembler::Call(Register target, Condition cond) { |
66 // Block constant pool for the call instruction sequence. | 83 BlockTrampolinePoolScope block_trampoline_pool(this); |
67 BlockConstPoolScope block_const_pool(this); | |
68 Label start; | 84 Label start; |
69 bind(&start); | 85 bind(&start); |
70 blx(target, cond); | 86 ASSERT(cond == al); // in prep of removal of condition |
| 87 |
| 88 // Statement positions are expected to be recorded when the target |
| 89 // address is loaded. |
| 90 positions_recorder()->WriteRecordedPositions(); |
| 91 |
| 92 // branch via link register and set LK bit for return point |
| 93 mtlr(target); |
| 94 bclr(BA, SetLK); |
| 95 |
71 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); | 96 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); |
72 } | 97 } |
73 | 98 |
74 | 99 |
75 int MacroAssembler::CallSize( | 100 int MacroAssembler::CallSize( |
76 Address target, RelocInfo::Mode rmode, Condition cond) { | 101 Address target, RelocInfo::Mode rmode, Condition cond) { |
77 Instr mov_instr = cond | MOV | LeaveCC; | |
78 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); | 102 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); |
79 return kInstrSize + | 103 return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize; |
80 mov_operand.instructions_required(this, mov_instr) * kInstrSize; | |
81 } | 104 } |
82 | 105 |
83 | 106 |
84 int MacroAssembler::CallStubSize( | 107 int MacroAssembler::CallSizeNotPredictableCodeSize( |
85 CodeStub* stub, TypeFeedbackId ast_id, Condition cond) { | 108 Address target, RelocInfo::Mode rmode, Condition cond) { |
86 return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); | 109 return (2 + kMovInstructionsNoConstantPool) * kInstrSize; |
87 } | |
88 | |
89 | |
90 int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate, | |
91 Address target, | |
92 RelocInfo::Mode rmode, | |
93 Condition cond) { | |
94 Instr mov_instr = cond | MOV | LeaveCC; | |
95 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); | |
96 return kInstrSize + | |
97 mov_operand.instructions_required(NULL, mov_instr) * kInstrSize; | |
98 } | 110 } |
99 | 111 |
100 | 112 |
101 void MacroAssembler::Call(Address target, | 113 void MacroAssembler::Call(Address target, |
102 RelocInfo::Mode rmode, | 114 RelocInfo::Mode rmode, |
103 Condition cond, | 115 Condition cond) { |
104 TargetAddressStorageMode mode) { | 116 BlockTrampolinePoolScope block_trampoline_pool(this); |
105 // Block constant pool for the call instruction sequence. | 117 ASSERT(cond == al); |
106 BlockConstPoolScope block_const_pool(this); | |
107 Label start; | |
108 bind(&start); | |
109 | |
110 bool old_predictable_code_size = predictable_code_size(); | |
111 if (mode == NEVER_INLINE_TARGET_ADDRESS) { | |
112 set_predictable_code_size(true); | |
113 } | |
114 | 118 |
115 #ifdef DEBUG | 119 #ifdef DEBUG |
116 // Check the expected size before generating code to ensure we assume the same | 120 // Check the expected size before generating code to ensure we assume the same |
117 // constant pool availability (e.g., whether constant pool is full or not). | 121 // constant pool availability (e.g., whether constant pool is full or not). |
118 int expected_size = CallSize(target, rmode, cond); | 122 int expected_size = CallSize(target, rmode, cond); |
| 123 Label start; |
| 124 bind(&start); |
119 #endif | 125 #endif |
120 | 126 |
121 // Call sequence on V7 or later may be : | |
122 // movw ip, #... @ call address low 16 | |
123 // movt ip, #... @ call address high 16 | |
124 // blx ip | |
125 // @ return address | |
126 // Or for pre-V7 or values that may be back-patched | |
127 // to avoid ICache flushes: | |
128 // ldr ip, [pc, #...] @ call address | |
129 // blx ip | |
130 // @ return address | |
131 | |
132 // Statement positions are expected to be recorded when the target | 127 // Statement positions are expected to be recorded when the target |
133 // address is loaded. The mov method will automatically record | 128 // address is loaded. |
134 // positions when pc is the target, since this is not the case here | |
135 // we have to do it explicitly. | |
136 positions_recorder()->WriteRecordedPositions(); | 129 positions_recorder()->WriteRecordedPositions(); |
137 | 130 |
138 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); | 131 // This can likely be optimized to make use of bc() with 24bit relative |
139 blx(ip, cond); | 132 // |
| 133 // RecordRelocInfo(x.rmode_, x.imm_); |
| 134 // bc( BA, .... offset, LKset); |
| 135 // |
| 136 |
| 137 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode)); |
| 138 mtlr(ip); |
| 139 bclr(BA, SetLK); |
140 | 140 |
141 ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); | 141 ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); |
142 if (mode == NEVER_INLINE_TARGET_ADDRESS) { | |
143 set_predictable_code_size(old_predictable_code_size); | |
144 } | |
145 } | 142 } |
146 | 143 |
147 | 144 |
148 int MacroAssembler::CallSize(Handle<Code> code, | 145 int MacroAssembler::CallSize(Handle<Code> code, |
149 RelocInfo::Mode rmode, | 146 RelocInfo::Mode rmode, |
150 TypeFeedbackId ast_id, | 147 TypeFeedbackId ast_id, |
151 Condition cond) { | 148 Condition cond) { |
152 AllowDeferredHandleDereference using_raw_address; | 149 AllowDeferredHandleDereference using_raw_address; |
153 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); | 150 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); |
154 } | 151 } |
155 | 152 |
156 | 153 |
157 void MacroAssembler::Call(Handle<Code> code, | 154 void MacroAssembler::Call(Handle<Code> code, |
158 RelocInfo::Mode rmode, | 155 RelocInfo::Mode rmode, |
159 TypeFeedbackId ast_id, | 156 TypeFeedbackId ast_id, |
160 Condition cond, | 157 Condition cond) { |
161 TargetAddressStorageMode mode) { | 158 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 159 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 160 |
| 161 #ifdef DEBUG |
| 162 // Check the expected size before generating code to ensure we assume the same |
| 163 // constant pool availability (e.g., whether constant pool is full or not). |
| 164 int expected_size = CallSize(code, rmode, ast_id, cond); |
162 Label start; | 165 Label start; |
163 bind(&start); | 166 bind(&start); |
164 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 167 #endif |
| 168 |
165 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { | 169 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { |
166 SetRecordedAstId(ast_id); | 170 SetRecordedAstId(ast_id); |
167 rmode = RelocInfo::CODE_TARGET_WITH_ID; | 171 rmode = RelocInfo::CODE_TARGET_WITH_ID; |
168 } | 172 } |
169 // 'code' is always generated ARM code, never THUMB code | 173 AllowDeferredHandleDereference using_raw_address; |
170 AllowDeferredHandleDereference embedding_raw_address; | 174 Call(reinterpret_cast<Address>(code.location()), rmode, cond); |
171 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode); | 175 ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); |
172 } | 176 } |
173 | 177 |
174 | 178 |
175 void MacroAssembler::Ret(Condition cond) { | 179 void MacroAssembler::Ret(Condition cond) { |
176 bx(lr, cond); | 180 ASSERT(cond == al); |
| 181 blr(); |
177 } | 182 } |
178 | 183 |
179 | 184 |
180 void MacroAssembler::Drop(int count, Condition cond) { | 185 void MacroAssembler::Drop(int count, Condition cond) { |
| 186 ASSERT(cond == al); |
181 if (count > 0) { | 187 if (count > 0) { |
182 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond); | 188 Add(sp, sp, count * kPointerSize, r0); |
183 } | 189 } |
184 } | 190 } |
185 | 191 |
186 | 192 |
187 void MacroAssembler::Ret(int drop, Condition cond) { | 193 void MacroAssembler::Ret(int drop, Condition cond) { |
188 Drop(drop, cond); | 194 Drop(drop, cond); |
189 Ret(cond); | 195 Ret(cond); |
190 } | 196 } |
191 | 197 |
192 | 198 |
193 void MacroAssembler::Swap(Register reg1, | |
194 Register reg2, | |
195 Register scratch, | |
196 Condition cond) { | |
197 if (scratch.is(no_reg)) { | |
198 eor(reg1, reg1, Operand(reg2), LeaveCC, cond); | |
199 eor(reg2, reg2, Operand(reg1), LeaveCC, cond); | |
200 eor(reg1, reg1, Operand(reg2), LeaveCC, cond); | |
201 } else { | |
202 mov(scratch, reg1, LeaveCC, cond); | |
203 mov(reg1, reg2, LeaveCC, cond); | |
204 mov(reg2, scratch, LeaveCC, cond); | |
205 } | |
206 } | |
207 | |
208 | |
209 void MacroAssembler::Call(Label* target) { | 199 void MacroAssembler::Call(Label* target) { |
210 bl(target); | 200 b(target, SetLK); |
211 } | 201 } |
212 | 202 |
213 | 203 |
214 void MacroAssembler::Push(Handle<Object> handle) { | 204 void MacroAssembler::Push(Handle<Object> handle) { |
215 mov(ip, Operand(handle)); | 205 mov(ip, Operand(handle)); |
216 push(ip); | 206 push(ip); |
217 } | 207 } |
218 | 208 |
219 | 209 |
220 void MacroAssembler::Move(Register dst, Handle<Object> value) { | 210 void MacroAssembler::Move(Register dst, Handle<Object> value) { |
221 AllowDeferredHandleDereference smi_check; | 211 AllowDeferredHandleDereference smi_check; |
222 if (value->IsSmi()) { | 212 if (value->IsSmi()) { |
223 mov(dst, Operand(value)); | 213 LoadSmiLiteral(dst, reinterpret_cast<Smi *>(*value)); |
224 } else { | 214 } else { |
225 ASSERT(value->IsHeapObject()); | 215 ASSERT(value->IsHeapObject()); |
226 if (isolate()->heap()->InNewSpace(*value)) { | 216 if (isolate()->heap()->InNewSpace(*value)) { |
227 Handle<Cell> cell = isolate()->factory()->NewCell(value); | 217 Handle<Cell> cell = isolate()->factory()->NewCell(value); |
228 mov(dst, Operand(cell)); | 218 mov(dst, Operand(cell)); |
229 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset)); | 219 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset)); |
230 } else { | 220 } else { |
231 mov(dst, Operand(value)); | 221 mov(dst, Operand(value)); |
232 } | 222 } |
233 } | 223 } |
234 } | 224 } |
235 | 225 |
236 | 226 |
237 void MacroAssembler::Move(Register dst, Register src, Condition cond) { | 227 void MacroAssembler::Move(Register dst, Register src, Condition cond) { |
| 228 ASSERT(cond == al); |
238 if (!dst.is(src)) { | 229 if (!dst.is(src)) { |
239 mov(dst, src, LeaveCC, cond); | 230 mr(dst, src); |
240 } | 231 } |
241 } | 232 } |
242 | 233 |
243 | 234 |
244 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { | 235 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { |
245 if (!dst.is(src)) { | 236 if (!dst.is(src)) { |
246 vmov(dst, src); | 237 fmr(dst, src); |
247 } | 238 } |
248 } | 239 } |
249 | 240 |
250 | 241 |
251 void MacroAssembler::Mls(Register dst, Register src1, Register src2, | 242 void MacroAssembler::MultiPush(RegList regs) { |
252 Register srcA, Condition cond) { | 243 int16_t num_to_push = NumberOfBitsSet(regs); |
253 if (CpuFeatures::IsSupported(MLS)) { | 244 int16_t stack_offset = num_to_push * kPointerSize; |
254 CpuFeatureScope scope(this, MLS); | 245 |
255 mls(dst, src1, src2, srcA, cond); | 246 subi(sp, sp, Operand(stack_offset)); |
256 } else { | 247 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
257 ASSERT(!dst.is(srcA)); | 248 if ((regs & (1 << i)) != 0) { |
258 mul(ip, src1, src2, LeaveCC, cond); | 249 stack_offset -= kPointerSize; |
259 sub(dst, srcA, ip, LeaveCC, cond); | 250 StoreP(ToRegister(i), MemOperand(sp, stack_offset)); |
| 251 } |
260 } | 252 } |
261 } | 253 } |
262 | 254 |
263 | 255 |
264 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, | 256 void MacroAssembler::MultiPop(RegList regs) { |
265 Condition cond) { | 257 int16_t stack_offset = 0; |
266 if (!src2.is_reg() && | 258 |
267 !src2.must_output_reloc_info(this) && | 259 for (int16_t i = 0; i < kNumRegisters; i++) { |
268 src2.immediate() == 0) { | 260 if ((regs & (1 << i)) != 0) { |
269 mov(dst, Operand::Zero(), LeaveCC, cond); | 261 LoadP(ToRegister(i), MemOperand(sp, stack_offset)); |
270 } else if (!(src2.instructions_required(this) == 1) && | 262 stack_offset += kPointerSize; |
271 !src2.must_output_reloc_info(this) && | 263 } |
272 CpuFeatures::IsSupported(ARMv7) && | |
273 IsPowerOf2(src2.immediate() + 1)) { | |
274 ubfx(dst, src1, 0, | |
275 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); | |
276 } else { | |
277 and_(dst, src1, src2, LeaveCC, cond); | |
278 } | 264 } |
279 } | 265 addi(sp, sp, Operand(stack_offset)); |
280 | |
281 | |
282 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, | |
283 Condition cond) { | |
284 ASSERT(lsb < 32); | |
285 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
286 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
287 and_(dst, src1, Operand(mask), LeaveCC, cond); | |
288 if (lsb != 0) { | |
289 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); | |
290 } | |
291 } else { | |
292 ubfx(dst, src1, lsb, width, cond); | |
293 } | |
294 } | |
295 | |
296 | |
297 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, | |
298 Condition cond) { | |
299 ASSERT(lsb < 32); | |
300 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
301 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
302 and_(dst, src1, Operand(mask), LeaveCC, cond); | |
303 int shift_up = 32 - lsb - width; | |
304 int shift_down = lsb + shift_up; | |
305 if (shift_up != 0) { | |
306 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); | |
307 } | |
308 if (shift_down != 0) { | |
309 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); | |
310 } | |
311 } else { | |
312 sbfx(dst, src1, lsb, width, cond); | |
313 } | |
314 } | |
315 | |
316 | |
317 void MacroAssembler::Bfi(Register dst, | |
318 Register src, | |
319 Register scratch, | |
320 int lsb, | |
321 int width, | |
322 Condition cond) { | |
323 ASSERT(0 <= lsb && lsb < 32); | |
324 ASSERT(0 <= width && width < 32); | |
325 ASSERT(lsb + width < 32); | |
326 ASSERT(!scratch.is(dst)); | |
327 if (width == 0) return; | |
328 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
329 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
330 bic(dst, dst, Operand(mask)); | |
331 and_(scratch, src, Operand((1 << width) - 1)); | |
332 mov(scratch, Operand(scratch, LSL, lsb)); | |
333 orr(dst, dst, scratch); | |
334 } else { | |
335 bfi(dst, src, lsb, width, cond); | |
336 } | |
337 } | |
338 | |
339 | |
340 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, | |
341 Condition cond) { | |
342 ASSERT(lsb < 32); | |
343 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
344 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | |
345 bic(dst, src, Operand(mask)); | |
346 } else { | |
347 Move(dst, src, cond); | |
348 bfc(dst, lsb, width, cond); | |
349 } | |
350 } | |
351 | |
352 | |
353 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, | |
354 Condition cond) { | |
355 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | |
356 ASSERT(!dst.is(pc) && !src.rm().is(pc)); | |
357 ASSERT((satpos >= 0) && (satpos <= 31)); | |
358 | |
359 // These asserts are required to ensure compatibility with the ARMv7 | |
360 // implementation. | |
361 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL)); | |
362 ASSERT(src.rs().is(no_reg)); | |
363 | |
364 Label done; | |
365 int satval = (1 << satpos) - 1; | |
366 | |
367 if (cond != al) { | |
368 b(NegateCondition(cond), &done); // Skip saturate if !condition. | |
369 } | |
370 if (!(src.is_reg() && dst.is(src.rm()))) { | |
371 mov(dst, src); | |
372 } | |
373 tst(dst, Operand(~satval)); | |
374 b(eq, &done); | |
375 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative. | |
376 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. | |
377 bind(&done); | |
378 } else { | |
379 usat(dst, satpos, src, cond); | |
380 } | |
381 } | |
382 | |
383 | |
384 void MacroAssembler::Load(Register dst, | |
385 const MemOperand& src, | |
386 Representation r) { | |
387 ASSERT(!r.IsDouble()); | |
388 if (r.IsInteger8()) { | |
389 ldrsb(dst, src); | |
390 } else if (r.IsUInteger8()) { | |
391 ldrb(dst, src); | |
392 } else if (r.IsInteger16()) { | |
393 ldrsh(dst, src); | |
394 } else if (r.IsUInteger16()) { | |
395 ldrh(dst, src); | |
396 } else { | |
397 ldr(dst, src); | |
398 } | |
399 } | |
400 | |
401 | |
402 void MacroAssembler::Store(Register src, | |
403 const MemOperand& dst, | |
404 Representation r) { | |
405 ASSERT(!r.IsDouble()); | |
406 if (r.IsInteger8() || r.IsUInteger8()) { | |
407 strb(src, dst); | |
408 } else if (r.IsInteger16() || r.IsUInteger16()) { | |
409 strh(src, dst); | |
410 } else { | |
411 if (r.IsHeapObject()) { | |
412 AssertNotSmi(src); | |
413 } else if (r.IsSmi()) { | |
414 AssertSmi(src); | |
415 } | |
416 str(src, dst); | |
417 } | |
418 } | 266 } |
419 | 267 |
420 | 268 |
421 void MacroAssembler::LoadRoot(Register destination, | 269 void MacroAssembler::LoadRoot(Register destination, |
422 Heap::RootListIndex index, | 270 Heap::RootListIndex index, |
423 Condition cond) { | 271 Condition cond) { |
424 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && | 272 ASSERT(cond == al); |
425 isolate()->heap()->RootCanBeTreatedAsConstant(index) && | 273 LoadP(destination, MemOperand(kRootRegister, |
426 !predictable_code_size()) { | 274 index << kPointerSizeLog2), r0); |
427 // The CPU supports fast immediate values, and this root will never | |
428 // change. We will load it as a relocatable immediate value. | |
429 Handle<Object> root(&isolate()->heap()->roots_array_start()[index]); | |
430 mov(destination, Operand(root), LeaveCC, cond); | |
431 return; | |
432 } | |
433 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); | |
434 } | 275 } |
435 | 276 |
436 | 277 |
437 void MacroAssembler::StoreRoot(Register source, | 278 void MacroAssembler::StoreRoot(Register source, |
438 Heap::RootListIndex index, | 279 Heap::RootListIndex index, |
439 Condition cond) { | 280 Condition cond) { |
440 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); | 281 ASSERT(cond == al); |
| 282 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0); |
441 } | 283 } |
442 | 284 |
443 | 285 |
444 void MacroAssembler::InNewSpace(Register object, | 286 void MacroAssembler::InNewSpace(Register object, |
445 Register scratch, | 287 Register scratch, |
446 Condition cond, | 288 Condition cond, |
447 Label* branch) { | 289 Label* branch) { |
| 290 // N.B. scratch may be same register as object |
448 ASSERT(cond == eq || cond == ne); | 291 ASSERT(cond == eq || cond == ne); |
449 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); | 292 mov(r0, Operand(ExternalReference::new_space_mask(isolate()))); |
450 cmp(scratch, Operand(ExternalReference::new_space_start(isolate()))); | 293 and_(scratch, object, r0); |
| 294 mov(r0, Operand(ExternalReference::new_space_start(isolate()))); |
| 295 cmp(scratch, r0); |
451 b(cond, branch); | 296 b(cond, branch); |
452 } | 297 } |
453 | 298 |
454 | 299 |
455 void MacroAssembler::RecordWriteField( | 300 void MacroAssembler::RecordWriteField( |
456 Register object, | 301 Register object, |
457 int offset, | 302 int offset, |
458 Register value, | 303 Register value, |
459 Register dst, | 304 Register dst, |
460 LinkRegisterStatus lr_status, | 305 LinkRegisterStatus lr_status, |
461 SaveFPRegsMode save_fp, | 306 SaveFPRegsMode save_fp, |
462 RememberedSetAction remembered_set_action, | 307 RememberedSetAction remembered_set_action, |
463 SmiCheck smi_check, | 308 SmiCheck smi_check, |
464 PointersToHereCheck pointers_to_here_check_for_value) { | 309 PointersToHereCheck pointers_to_here_check_for_value) { |
465 // First, check if a write barrier is even needed. The tests below | 310 // First, check if a write barrier is even needed. The tests below |
466 // catch stores of Smis. | 311 // catch stores of Smis. |
467 Label done; | 312 Label done; |
468 | 313 |
469 // Skip barrier if writing a smi. | 314 // Skip barrier if writing a smi. |
470 if (smi_check == INLINE_SMI_CHECK) { | 315 if (smi_check == INLINE_SMI_CHECK) { |
471 JumpIfSmi(value, &done); | 316 JumpIfSmi(value, &done); |
472 } | 317 } |
473 | 318 |
474 // Although the object register is tagged, the offset is relative to the start | 319 // Although the object register is tagged, the offset is relative to the start |
475 // of the object, so so offset must be a multiple of kPointerSize. | 320 // of the object, so so offset must be a multiple of kPointerSize. |
476 ASSERT(IsAligned(offset, kPointerSize)); | 321 ASSERT(IsAligned(offset, kPointerSize)); |
477 | 322 |
478 add(dst, object, Operand(offset - kHeapObjectTag)); | 323 Add(dst, object, offset - kHeapObjectTag, r0); |
479 if (emit_debug_code()) { | 324 if (emit_debug_code()) { |
480 Label ok; | 325 Label ok; |
481 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); | 326 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1)); |
482 b(eq, &ok); | 327 beq(&ok, cr0); |
483 stop("Unaligned cell in write barrier"); | 328 stop("Unaligned cell in write barrier"); |
484 bind(&ok); | 329 bind(&ok); |
485 } | 330 } |
486 | 331 |
487 RecordWrite(object, | 332 RecordWrite(object, |
488 dst, | 333 dst, |
489 value, | 334 value, |
490 lr_status, | 335 lr_status, |
491 save_fp, | 336 save_fp, |
492 remembered_set_action, | 337 remembered_set_action, |
493 OMIT_SMI_CHECK, | 338 OMIT_SMI_CHECK, |
494 pointers_to_here_check_for_value); | 339 pointers_to_here_check_for_value); |
495 | 340 |
496 bind(&done); | 341 bind(&done); |
497 | 342 |
498 // Clobber clobbered input registers when running with the debug-code flag | 343 // Clobber clobbered input registers when running with the debug-code flag |
499 // turned on to provoke errors. | 344 // turned on to provoke errors. |
500 if (emit_debug_code()) { | 345 if (emit_debug_code()) { |
501 mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); | 346 mov(value, Operand(BitCast<intptr_t>(kZapValue + 4))); |
502 mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); | 347 mov(dst, Operand(BitCast<intptr_t>(kZapValue + 8))); |
503 } | 348 } |
504 } | 349 } |
505 | 350 |
506 | 351 |
507 // Will clobber 4 registers: object, map, dst, ip. The | 352 // Will clobber 4 registers: object, map, dst, ip. The |
508 // register 'object' contains a heap object pointer. | 353 // register 'object' contains a heap object pointer. |
509 void MacroAssembler::RecordWriteForMap(Register object, | 354 void MacroAssembler::RecordWriteForMap(Register object, |
510 Register map, | 355 Register map, |
511 Register dst, | 356 Register dst, |
512 LinkRegisterStatus lr_status, | 357 LinkRegisterStatus lr_status, |
513 SaveFPRegsMode fp_mode) { | 358 SaveFPRegsMode fp_mode) { |
514 if (emit_debug_code()) { | 359 if (emit_debug_code()) { |
515 ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset)); | 360 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset)); |
516 cmp(dst, Operand(isolate()->factory()->meta_map())); | 361 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0); |
517 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 362 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
518 } | 363 } |
519 | 364 |
520 if (!FLAG_incremental_marking) { | 365 if (!FLAG_incremental_marking) { |
521 return; | 366 return; |
522 } | 367 } |
523 | 368 |
524 // Count number of write barriers in generated code. | 369 // Count number of write barriers in generated code. |
525 isolate()->counters()->write_barriers_static()->Increment(); | 370 isolate()->counters()->write_barriers_static()->Increment(); |
526 // TODO(mstarzinger): Dynamic counter missing. | 371 // TODO(mstarzinger): Dynamic counter missing. |
527 | 372 |
528 if (emit_debug_code()) { | 373 if (emit_debug_code()) { |
529 ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset)); | 374 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset)); |
530 cmp(ip, map); | 375 cmp(ip, map); |
531 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 376 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
532 } | 377 } |
533 | 378 |
534 Label done; | 379 Label done; |
535 | 380 |
536 // A single check of the map's pages interesting flag suffices, since it is | 381 // A single check of the map's pages interesting flag suffices, since it is |
537 // only set during incremental collection, and then it's also guaranteed that | 382 // only set during incremental collection, and then it's also guaranteed that |
538 // the from object's page's interesting flag is also set. This optimization | 383 // the from object's page's interesting flag is also set. This optimization |
539 // relies on the fact that maps can never be in new space. | 384 // relies on the fact that maps can never be in new space. |
540 CheckPageFlag(map, | 385 CheckPageFlag(map, |
541 map, // Used as scratch. | 386 map, // Used as scratch. |
542 MemoryChunk::kPointersToHereAreInterestingMask, | 387 MemoryChunk::kPointersToHereAreInterestingMask, |
543 eq, | 388 eq, |
544 &done); | 389 &done); |
545 | 390 |
546 add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); | 391 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); |
547 if (emit_debug_code()) { | 392 if (emit_debug_code()) { |
548 Label ok; | 393 Label ok; |
549 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); | 394 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1)); |
550 b(eq, &ok); | 395 beq(&ok, cr0); |
551 stop("Unaligned cell in write barrier"); | 396 stop("Unaligned cell in write barrier"); |
552 bind(&ok); | 397 bind(&ok); |
553 } | 398 } |
554 | 399 |
555 // Record the actual write. | 400 // Record the actual write. |
556 if (lr_status == kLRHasNotBeenSaved) { | 401 if (lr_status == kLRHasNotBeenSaved) { |
557 push(lr); | 402 mflr(r0); |
| 403 push(r0); |
558 } | 404 } |
559 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, | 405 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, |
560 fp_mode); | 406 fp_mode); |
561 CallStub(&stub); | 407 CallStub(&stub); |
562 if (lr_status == kLRHasNotBeenSaved) { | 408 if (lr_status == kLRHasNotBeenSaved) { |
563 pop(lr); | 409 pop(r0); |
| 410 mtlr(r0); |
564 } | 411 } |
565 | 412 |
566 bind(&done); | 413 bind(&done); |
567 | 414 |
568 // Clobber clobbered registers when running with the debug-code flag | 415 // Clobber clobbered registers when running with the debug-code flag |
569 // turned on to provoke errors. | 416 // turned on to provoke errors. |
570 if (emit_debug_code()) { | 417 if (emit_debug_code()) { |
571 mov(dst, Operand(BitCast<int32_t>(kZapValue + 12))); | 418 mov(dst, Operand(BitCast<intptr_t>(kZapValue + 12))); |
572 mov(map, Operand(BitCast<int32_t>(kZapValue + 16))); | 419 mov(map, Operand(BitCast<intptr_t>(kZapValue + 16))); |
573 } | 420 } |
574 } | 421 } |
575 | 422 |
576 | 423 |
577 // Will clobber 4 registers: object, address, scratch, ip. The | 424 // Will clobber 4 registers: object, address, scratch, ip. The |
578 // register 'object' contains a heap object pointer. The heap object | 425 // register 'object' contains a heap object pointer. The heap object |
579 // tag is shifted away. | 426 // tag is shifted away. |
580 void MacroAssembler::RecordWrite( | 427 void MacroAssembler::RecordWrite( |
581 Register object, | 428 Register object, |
582 Register address, | 429 Register address, |
583 Register value, | 430 Register value, |
584 LinkRegisterStatus lr_status, | 431 LinkRegisterStatus lr_status, |
585 SaveFPRegsMode fp_mode, | 432 SaveFPRegsMode fp_mode, |
586 RememberedSetAction remembered_set_action, | 433 RememberedSetAction remembered_set_action, |
587 SmiCheck smi_check, | 434 SmiCheck smi_check, |
588 PointersToHereCheck pointers_to_here_check_for_value) { | 435 PointersToHereCheck pointers_to_here_check_for_value) { |
589 ASSERT(!object.is(value)); | 436 ASSERT(!object.is(value)); |
590 if (emit_debug_code()) { | 437 if (emit_debug_code()) { |
591 ldr(ip, MemOperand(address)); | 438 LoadP(ip, MemOperand(address)); |
592 cmp(ip, value); | 439 cmp(ip, value); |
593 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 440 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
594 } | 441 } |
595 | 442 |
596 if (remembered_set_action == OMIT_REMEMBERED_SET && | 443 if (remembered_set_action == OMIT_REMEMBERED_SET && |
597 !FLAG_incremental_marking) { | 444 !FLAG_incremental_marking) { |
598 return; | 445 return; |
599 } | 446 } |
600 | 447 |
601 // Count number of write barriers in generated code. | 448 // Count number of write barriers in generated code. |
(...skipping 16 matching lines...) Expand all Loading... |
618 &done); | 465 &done); |
619 } | 466 } |
620 CheckPageFlag(object, | 467 CheckPageFlag(object, |
621 value, // Used as scratch. | 468 value, // Used as scratch. |
622 MemoryChunk::kPointersFromHereAreInterestingMask, | 469 MemoryChunk::kPointersFromHereAreInterestingMask, |
623 eq, | 470 eq, |
624 &done); | 471 &done); |
625 | 472 |
626 // Record the actual write. | 473 // Record the actual write. |
627 if (lr_status == kLRHasNotBeenSaved) { | 474 if (lr_status == kLRHasNotBeenSaved) { |
628 push(lr); | 475 mflr(r0); |
| 476 push(r0); |
629 } | 477 } |
630 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, | 478 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, |
631 fp_mode); | 479 fp_mode); |
632 CallStub(&stub); | 480 CallStub(&stub); |
633 if (lr_status == kLRHasNotBeenSaved) { | 481 if (lr_status == kLRHasNotBeenSaved) { |
634 pop(lr); | 482 pop(r0); |
| 483 mtlr(r0); |
635 } | 484 } |
636 | 485 |
637 bind(&done); | 486 bind(&done); |
638 | 487 |
639 // Clobber clobbered registers when running with the debug-code flag | 488 // Clobber clobbered registers when running with the debug-code flag |
640 // turned on to provoke errors. | 489 // turned on to provoke errors. |
641 if (emit_debug_code()) { | 490 if (emit_debug_code()) { |
642 mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); | 491 mov(address, Operand(BitCast<intptr_t>(kZapValue + 12))); |
643 mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); | 492 mov(value, Operand(BitCast<intptr_t>(kZapValue + 16))); |
644 } | 493 } |
645 } | 494 } |
646 | 495 |
647 | 496 |
648 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | 497 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. |
649 Register address, | 498 Register address, |
650 Register scratch, | 499 Register scratch, |
651 SaveFPRegsMode fp_mode, | 500 SaveFPRegsMode fp_mode, |
652 RememberedSetFinalAction and_then) { | 501 RememberedSetFinalAction and_then) { |
653 Label done; | 502 Label done; |
654 if (emit_debug_code()) { | 503 if (emit_debug_code()) { |
655 Label ok; | 504 Label ok; |
656 JumpIfNotInNewSpace(object, scratch, &ok); | 505 JumpIfNotInNewSpace(object, scratch, &ok); |
657 stop("Remembered set pointer is in new space"); | 506 stop("Remembered set pointer is in new space"); |
658 bind(&ok); | 507 bind(&ok); |
659 } | 508 } |
660 // Load store buffer top. | 509 // Load store buffer top. |
661 ExternalReference store_buffer = | 510 ExternalReference store_buffer = |
662 ExternalReference::store_buffer_top(isolate()); | 511 ExternalReference::store_buffer_top(isolate()); |
663 mov(ip, Operand(store_buffer)); | 512 mov(ip, Operand(store_buffer)); |
664 ldr(scratch, MemOperand(ip)); | 513 LoadP(scratch, MemOperand(ip)); |
665 // Store pointer to buffer and increment buffer top. | 514 // Store pointer to buffer and increment buffer top. |
666 str(address, MemOperand(scratch, kPointerSize, PostIndex)); | 515 StoreP(address, MemOperand(scratch)); |
| 516 addi(scratch, scratch, Operand(kPointerSize)); |
667 // Write back new top of buffer. | 517 // Write back new top of buffer. |
668 str(scratch, MemOperand(ip)); | 518 StoreP(scratch, MemOperand(ip)); |
669 // Call stub on end of buffer. | 519 // Call stub on end of buffer. |
670 // Check for end of buffer. | 520 // Check for end of buffer. |
671 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); | 521 mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit)); |
| 522 and_(r0, scratch, r0, SetRC); |
| 523 |
672 if (and_then == kFallThroughAtEnd) { | 524 if (and_then == kFallThroughAtEnd) { |
673 b(eq, &done); | 525 beq(&done, cr0); |
674 } else { | 526 } else { |
675 ASSERT(and_then == kReturnAtEnd); | 527 ASSERT(and_then == kReturnAtEnd); |
676 Ret(eq); | 528 beq(&done, cr0); |
677 } | 529 } |
678 push(lr); | 530 mflr(r0); |
| 531 push(r0); |
679 StoreBufferOverflowStub store_buffer_overflow = | 532 StoreBufferOverflowStub store_buffer_overflow = |
680 StoreBufferOverflowStub(isolate(), fp_mode); | 533 StoreBufferOverflowStub(isolate(), fp_mode); |
681 CallStub(&store_buffer_overflow); | 534 CallStub(&store_buffer_overflow); |
682 pop(lr); | 535 pop(r0); |
| 536 mtlr(r0); |
683 bind(&done); | 537 bind(&done); |
684 if (and_then == kReturnAtEnd) { | 538 if (and_then == kReturnAtEnd) { |
685 Ret(); | 539 Ret(); |
686 } | 540 } |
687 } | 541 } |
688 | 542 |
689 | 543 |
690 void MacroAssembler::PushFixedFrame(Register marker_reg) { | 544 void MacroAssembler::PushFixedFrame(Register marker_reg) { |
691 ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code()); | 545 mflr(r0); |
692 stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | | 546 #if V8_OOL_CONSTANT_POOL |
693 cp.bit() | | 547 if (marker_reg.is_valid()) { |
694 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | | 548 Push(r0, fp, kConstantPoolRegister, cp, marker_reg); |
695 fp.bit() | | 549 } else { |
696 lr.bit()); | 550 Push(r0, fp, kConstantPoolRegister, cp); |
| 551 } |
| 552 #else |
| 553 if (marker_reg.is_valid()) { |
| 554 Push(r0, fp, cp, marker_reg); |
| 555 } else { |
| 556 Push(r0, fp, cp); |
| 557 } |
| 558 #endif |
697 } | 559 } |
698 | 560 |
699 | 561 |
700 void MacroAssembler::PopFixedFrame(Register marker_reg) { | 562 void MacroAssembler::PopFixedFrame(Register marker_reg) { |
701 ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code()); | 563 #if V8_OOL_CONSTANT_POOL |
702 ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | | 564 if (marker_reg.is_valid()) { |
703 cp.bit() | | 565 Pop(r0, fp, kConstantPoolRegister, cp, marker_reg); |
704 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | | 566 } else { |
705 fp.bit() | | 567 Pop(r0, fp, kConstantPoolRegister, cp); |
706 lr.bit()); | 568 } |
| 569 #else |
| 570 if (marker_reg.is_valid()) { |
| 571 Pop(r0, fp, cp, marker_reg); |
| 572 } else { |
| 573 Pop(r0, fp, cp); |
| 574 } |
| 575 #endif |
| 576 mtlr(r0); |
707 } | 577 } |
708 | 578 |
709 | 579 |
710 // Push and pop all registers that can hold pointers. | 580 // Push and pop all registers that can hold pointers. |
711 void MacroAssembler::PushSafepointRegisters() { | 581 void MacroAssembler::PushSafepointRegisters() { |
712 // Safepoints expect a block of contiguous register values starting with r0: | |
713 ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); | |
714 // Safepoints expect a block of kNumSafepointRegisters values on the | 582 // Safepoints expect a block of kNumSafepointRegisters values on the |
715 // stack, so adjust the stack for unsaved registers. | 583 // stack, so adjust the stack for unsaved registers. |
716 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 584 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
717 ASSERT(num_unsaved >= 0); | 585 ASSERT(num_unsaved >= 0); |
718 sub(sp, sp, Operand(num_unsaved * kPointerSize)); | 586 if (num_unsaved > 0) { |
719 stm(db_w, sp, kSafepointSavedRegisters); | 587 subi(sp, sp, Operand(num_unsaved * kPointerSize)); |
| 588 } |
| 589 MultiPush(kSafepointSavedRegisters); |
720 } | 590 } |
721 | 591 |
722 | 592 |
723 void MacroAssembler::PopSafepointRegisters() { | 593 void MacroAssembler::PopSafepointRegisters() { |
724 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 594 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
725 ldm(ia_w, sp, kSafepointSavedRegisters); | 595 MultiPop(kSafepointSavedRegisters); |
726 add(sp, sp, Operand(num_unsaved * kPointerSize)); | 596 if (num_unsaved > 0) { |
| 597 addi(sp, sp, Operand(num_unsaved * kPointerSize)); |
| 598 } |
727 } | 599 } |
728 | 600 |
729 | 601 |
730 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { | 602 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { |
731 str(src, SafepointRegisterSlot(dst)); | 603 StoreP(src, SafepointRegisterSlot(dst)); |
732 } | 604 } |
733 | 605 |
734 | 606 |
735 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { | 607 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { |
736 ldr(dst, SafepointRegisterSlot(src)); | 608 LoadP(dst, SafepointRegisterSlot(src)); |
737 } | 609 } |
738 | 610 |
739 | 611 |
740 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 612 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
741 // The registers are pushed starting with the highest encoding, | 613 // The registers are pushed starting with the highest encoding, |
742 // which means that lowest encodings are closest to the stack pointer. | 614 // which means that lowest encodings are closest to the stack pointer. |
743 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); | 615 RegList regs = kSafepointSavedRegisters; |
744 return reg_code; | 616 int index = 0; |
| 617 |
| 618 ASSERT(reg_code >= 0 && reg_code < kNumRegisters); |
| 619 |
| 620 for (int16_t i = 0; i < reg_code; i++) { |
| 621 if ((regs & (1 << i)) != 0) { |
| 622 index++; |
| 623 } |
| 624 } |
| 625 |
| 626 return index; |
745 } | 627 } |
746 | 628 |
747 | 629 |
748 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { | 630 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { |
749 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); | 631 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); |
750 } | 632 } |
751 | 633 |
752 | 634 |
753 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { | 635 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { |
754 // Number of d-regs not known at snapshot time. | |
755 ASSERT(!serializer_enabled()); | |
756 // General purpose registers are pushed last on the stack. | 636 // General purpose registers are pushed last on the stack. |
757 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; | 637 int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize; |
758 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; | 638 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; |
759 return MemOperand(sp, doubles_size + register_offset); | 639 return MemOperand(sp, doubles_size + register_offset); |
760 } | 640 } |
761 | 641 |
762 | 642 |
763 void MacroAssembler::Ldrd(Register dst1, Register dst2, | 643 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, |
764 const MemOperand& src, Condition cond) { | 644 const DoubleRegister src) { |
765 ASSERT(src.rm().is(no_reg)); | 645 Label done; |
766 ASSERT(!dst1.is(lr)); // r14. | 646 |
767 | 647 // Test for NaN |
768 // V8 does not use this addressing mode, so the fallback code | 648 fcmpu(src, src); |
769 // below doesn't support it yet. | 649 |
770 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); | 650 if (dst.is(src)) { |
771 | 651 bordered(&done); |
772 // Generate two ldr instructions if ldrd is not available. | |
773 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && | |
774 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) { | |
775 CpuFeatureScope scope(this, ARMv7); | |
776 ldrd(dst1, dst2, src, cond); | |
777 } else { | 652 } else { |
778 if ((src.am() == Offset) || (src.am() == NegOffset)) { | 653 Label is_nan; |
779 MemOperand src2(src); | 654 bunordered(&is_nan); |
780 src2.set_offset(src2.offset() + 4); | 655 fmr(dst, src); |
781 if (dst1.is(src.rn())) { | 656 b(&done); |
782 ldr(dst2, src2, cond); | 657 bind(&is_nan); |
783 ldr(dst1, src, cond); | 658 } |
784 } else { | 659 |
785 ldr(dst1, src, cond); | 660 // Replace with canonical NaN. |
786 ldr(dst2, src2, cond); | 661 double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double(); |
787 } | 662 LoadDoubleLiteral(dst, nan_value, r0); |
788 } else { // PostIndex or NegPostIndex. | 663 |
789 ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex)); | 664 bind(&done); |
790 if (dst1.is(src.rn())) { | 665 } |
791 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond); | 666 |
792 ldr(dst1, src, cond); | 667 |
793 } else { | 668 void MacroAssembler::ConvertIntToDouble(Register src, |
794 MemOperand src2(src); | 669 DoubleRegister double_dst) { |
795 src2.set_offset(src2.offset() - 4); | 670 ASSERT(!src.is(r0)); |
796 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond); | 671 |
797 ldr(dst2, src2, cond); | 672 subi(sp, sp, Operand(8)); // reserve one temporary double on the stack |
798 } | 673 |
799 } | 674 // sign-extend src to 64-bit and store it to temp double on the stack |
800 } | 675 #if V8_TARGET_ARCH_PPC64 |
801 } | 676 extsw(r0, src); |
802 | 677 std(r0, MemOperand(sp, 0)); |
803 | 678 #else |
804 void MacroAssembler::Strd(Register src1, Register src2, | 679 srawi(r0, src, 31); |
805 const MemOperand& dst, Condition cond) { | 680 stw(r0, MemOperand(sp, Register::kExponentOffset)); |
806 ASSERT(dst.rm().is(no_reg)); | 681 stw(src, MemOperand(sp, Register::kMantissaOffset)); |
807 ASSERT(!src1.is(lr)); // r14. | 682 #endif |
808 | 683 |
809 // V8 does not use this addressing mode, so the fallback code | 684 // load into FPR |
810 // below doesn't support it yet. | 685 nop(); // LHS/RAW optimization |
811 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); | 686 lfd(double_dst, MemOperand(sp, 0)); |
812 | 687 |
813 // Generate two str instructions if strd is not available. | 688 addi(sp, sp, Operand(8)); // restore stack |
814 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && | 689 |
815 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) { | 690 // convert to double |
816 CpuFeatureScope scope(this, ARMv7); | 691 fcfid(double_dst, double_dst); |
817 strd(src1, src2, dst, cond); | 692 } |
| 693 |
| 694 |
| 695 void MacroAssembler::ConvertUnsignedIntToDouble(Register src, |
| 696 DoubleRegister double_dst) { |
| 697 ASSERT(!src.is(r0)); |
| 698 |
| 699 subi(sp, sp, Operand(8)); // reserve one temporary double on the stack |
| 700 |
| 701 // zero-extend src to 64-bit and store it to temp double on the stack |
| 702 #if V8_TARGET_ARCH_PPC64 |
| 703 clrldi(r0, src, Operand(32)); |
| 704 std(r0, MemOperand(sp, 0)); |
| 705 #else |
| 706 li(r0, Operand::Zero()); |
| 707 stw(r0, MemOperand(sp, Register::kExponentOffset)); |
| 708 stw(src, MemOperand(sp, Register::kMantissaOffset)); |
| 709 #endif |
| 710 |
| 711 // load into FPR |
| 712 nop(); // LHS/RAW optimization |
| 713 lfd(double_dst, MemOperand(sp, 0)); |
| 714 |
| 715 addi(sp, sp, Operand(8)); // restore stack |
| 716 |
| 717 // convert to double |
| 718 fcfid(double_dst, double_dst); |
| 719 } |
| 720 |
| 721 |
| 722 void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst, |
| 723 const Register src, |
| 724 const Register int_scratch) { |
| 725 subi(sp, sp, Operand(8)); // reserve one temporary double on the stack |
| 726 |
| 727 // sign-extend src to 64-bit and store it to temp double on the stack |
| 728 #if V8_TARGET_ARCH_PPC64 |
| 729 extsw(int_scratch, src); |
| 730 std(int_scratch, MemOperand(sp, 0)); |
| 731 #else |
| 732 srawi(int_scratch, src, 31); |
| 733 stw(int_scratch, MemOperand(sp, Register::kExponentOffset)); |
| 734 stw(src, MemOperand(sp, Register::kMantissaOffset)); |
| 735 #endif |
| 736 |
| 737 // load sign-extended src into FPR |
| 738 nop(); // LHS/RAW optimization |
| 739 lfd(dst, MemOperand(sp, 0)); |
| 740 |
| 741 addi(sp, sp, Operand(8)); // restore stack |
| 742 |
| 743 fcfid(dst, dst); |
| 744 frsp(dst, dst); |
| 745 } |
| 746 |
| 747 |
| 748 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, |
| 749 const Register dst, |
| 750 #if !V8_TARGET_ARCH_PPC64 |
| 751 const Register dst_hi, |
| 752 #endif |
| 753 const DoubleRegister double_dst, |
| 754 FPRoundingMode rounding_mode) { |
| 755 if (rounding_mode == kRoundToZero) { |
| 756 fctidz(double_dst, double_input); |
818 } else { | 757 } else { |
819 MemOperand dst2(dst); | 758 SetRoundingMode(rounding_mode); |
820 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { | 759 fctid(double_dst, double_input); |
821 dst2.set_offset(dst2.offset() + 4); | 760 ResetRoundingMode(); |
822 str(src1, dst, cond); | 761 } |
823 str(src2, dst2, cond); | 762 |
824 } else { // PostIndex or NegPostIndex. | 763 stfdu(double_dst, MemOperand(sp, -kDoubleSize)); |
825 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); | 764 nop(); // LHS/RAW optimization |
826 dst2.set_offset(dst2.offset() - 4); | 765 #if V8_TARGET_ARCH_PPC64 |
827 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); | 766 ld(dst, MemOperand(sp, 0)); |
828 str(src2, dst2, cond); | 767 #else |
829 } | 768 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset)); |
830 } | 769 lwz(dst, MemOperand(sp, Register::kMantissaOffset)); |
831 } | 770 #endif |
832 | 771 addi(sp, sp, Operand(kDoubleSize)); |
833 | 772 } |
834 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { | 773 |
835 // If needed, restore wanted bits of FPSCR. | 774 |
836 Label fpscr_done; | 775 #if V8_OOL_CONSTANT_POOL |
837 vmrs(scratch); | |
838 if (emit_debug_code()) { | |
839 Label rounding_mode_correct; | |
840 tst(scratch, Operand(kVFPRoundingModeMask)); | |
841 b(eq, &rounding_mode_correct); | |
842 // Don't call Assert here, since Runtime_Abort could re-enter here. | |
843 stop("Default rounding mode not set"); | |
844 bind(&rounding_mode_correct); | |
845 } | |
846 tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); | |
847 b(ne, &fpscr_done); | |
848 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); | |
849 vmsr(scratch); | |
850 bind(&fpscr_done); | |
851 } | |
852 | |
853 | |
854 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, | |
855 const DwVfpRegister src, | |
856 const Condition cond) { | |
857 vsub(dst, src, kDoubleRegZero, cond); | |
858 } | |
859 | |
860 | |
861 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, | |
862 const DwVfpRegister src2, | |
863 const Condition cond) { | |
864 // Compare and move FPSCR flags to the normal condition flags. | |
865 VFPCompareAndLoadFlags(src1, src2, pc, cond); | |
866 } | |
867 | |
868 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, | |
869 const double src2, | |
870 const Condition cond) { | |
871 // Compare and move FPSCR flags to the normal condition flags. | |
872 VFPCompareAndLoadFlags(src1, src2, pc, cond); | |
873 } | |
874 | |
875 | |
876 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, | |
877 const DwVfpRegister src2, | |
878 const Register fpscr_flags, | |
879 const Condition cond) { | |
880 // Compare and load FPSCR. | |
881 vcmp(src1, src2, cond); | |
882 vmrs(fpscr_flags, cond); | |
883 } | |
884 | |
885 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, | |
886 const double src2, | |
887 const Register fpscr_flags, | |
888 const Condition cond) { | |
889 // Compare and load FPSCR. | |
890 vcmp(src1, src2, cond); | |
891 vmrs(fpscr_flags, cond); | |
892 } | |
893 | |
894 void MacroAssembler::Vmov(const DwVfpRegister dst, | |
895 const double imm, | |
896 const Register scratch) { | |
897 static const DoubleRepresentation minus_zero(-0.0); | |
898 static const DoubleRepresentation zero(0.0); | |
899 DoubleRepresentation value_rep(imm); | |
900 // Handle special values first. | |
901 if (value_rep == zero) { | |
902 vmov(dst, kDoubleRegZero); | |
903 } else if (value_rep == minus_zero) { | |
904 vneg(dst, kDoubleRegZero); | |
905 } else { | |
906 vmov(dst, imm, scratch); | |
907 } | |
908 } | |
909 | |
910 | |
911 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) { | |
912 if (src.code() < 16) { | |
913 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); | |
914 vmov(dst, loc.high()); | |
915 } else { | |
916 vmov(dst, VmovIndexHi, src); | |
917 } | |
918 } | |
919 | |
920 | |
921 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) { | |
922 if (dst.code() < 16) { | |
923 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); | |
924 vmov(loc.high(), src); | |
925 } else { | |
926 vmov(dst, VmovIndexHi, src); | |
927 } | |
928 } | |
929 | |
930 | |
931 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) { | |
932 if (src.code() < 16) { | |
933 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); | |
934 vmov(dst, loc.low()); | |
935 } else { | |
936 vmov(dst, VmovIndexLo, src); | |
937 } | |
938 } | |
939 | |
940 | |
941 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { | |
942 if (dst.code() < 16) { | |
943 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); | |
944 vmov(loc.low(), src); | |
945 } else { | |
946 vmov(dst, VmovIndexLo, src); | |
947 } | |
948 } | |
949 | |
950 | |
951 void MacroAssembler::LoadConstantPoolPointerRegister() { | 776 void MacroAssembler::LoadConstantPoolPointerRegister() { |
952 if (FLAG_enable_ool_constant_pool) { | 777 ConstantPoolUnavailableScope constant_pool_unavailable(this); |
953 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize - | 778 uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset(); |
954 pc_offset() - Instruction::kPCReadOffset; | 779 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize; |
955 ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset)); | 780 mov(kConstantPoolRegister, |
956 ldr(pp, MemOperand(pc, constant_pool_offset)); | 781 Operand(code_start, RelocInfo::INTERNAL_REFERENCE)); |
957 } | 782 LoadP(kConstantPoolRegister, |
958 } | 783 MemOperand(kConstantPoolRegister, constant_pool_offset)); |
| 784 } |
| 785 #endif |
959 | 786 |
960 | 787 |
961 void MacroAssembler::StubPrologue() { | 788 void MacroAssembler::StubPrologue() { |
962 PushFixedFrame(); | 789 PushFixedFrame(); |
963 Push(Smi::FromInt(StackFrame::STUB)); | 790 Push(Smi::FromInt(StackFrame::STUB)); |
964 // Adjust FP to point to saved FP. | 791 // Adjust FP to point to saved FP. |
965 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 792 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
966 if (FLAG_enable_ool_constant_pool) { | 793 #if V8_OOL_CONSTANT_POOL |
967 LoadConstantPoolPointerRegister(); | 794 LoadConstantPoolPointerRegister(); |
968 set_constant_pool_available(true); | 795 set_constant_pool_available(true); |
969 } | 796 #endif |
970 } | 797 } |
971 | 798 |
972 | 799 |
973 void MacroAssembler::Prologue(bool code_pre_aging) { | 800 void MacroAssembler::Prologue(bool code_pre_aging) { |
974 { PredictableCodeSizeScope predictible_code_size_scope( | 801 { PredictableCodeSizeScope predictible_code_size_scope( |
975 this, kNoCodeAgeSequenceLength); | 802 this, kNoCodeAgeSequenceLength); |
976 // The following three instructions must remain together and unmodified | 803 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); |
| 804 // The following instructions must remain together and unmodified |
977 // for code aging to work properly. | 805 // for code aging to work properly. |
978 if (code_pre_aging) { | 806 if (code_pre_aging) { |
979 // Pre-age the code. | 807 // Pre-age the code. |
| 808 // This matches the code found in PatchPlatformCodeAge() |
980 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); | 809 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); |
981 add(r0, pc, Operand(-8)); | 810 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start()); |
982 ldr(pc, MemOperand(pc, -4)); | 811 mflr(ip); |
983 emit_code_stub_address(stub); | 812 mov(r3, Operand(target)); |
| 813 Call(r3); |
| 814 for (int i = 0; i < kCodeAgingSequenceNops; i++) { |
| 815 nop(); |
| 816 } |
984 } else { | 817 } else { |
985 PushFixedFrame(r1); | 818 // This matches the code found in GetNoCodeAgeSequence() |
986 nop(ip.code()); | 819 PushFixedFrame(r4); |
987 // Adjust FP to point to saved FP. | 820 // Adjust fp to point to saved fp. |
988 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 821 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
| 822 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) { |
| 823 nop(); |
| 824 } |
989 } | 825 } |
990 } | 826 } |
991 if (FLAG_enable_ool_constant_pool) { | 827 #if V8_OOL_CONSTANT_POOL |
992 LoadConstantPoolPointerRegister(); | 828 LoadConstantPoolPointerRegister(); |
993 set_constant_pool_available(true); | 829 set_constant_pool_available(true); |
994 } | 830 #endif |
995 } | 831 } |
996 | 832 |
997 | 833 |
998 void MacroAssembler::EnterFrame(StackFrame::Type type, | 834 void MacroAssembler::EnterFrame(StackFrame::Type type, |
999 bool load_constant_pool) { | 835 bool load_constant_pool) { |
1000 // r0-r3: preserved | |
1001 PushFixedFrame(); | 836 PushFixedFrame(); |
1002 if (FLAG_enable_ool_constant_pool && load_constant_pool) { | 837 #if V8_OOL_CONSTANT_POOL |
| 838 if (load_constant_pool) { |
1003 LoadConstantPoolPointerRegister(); | 839 LoadConstantPoolPointerRegister(); |
1004 } | 840 } |
1005 mov(ip, Operand(Smi::FromInt(type))); | 841 #endif |
1006 push(ip); | 842 LoadSmiLiteral(r0, Smi::FromInt(type)); |
1007 mov(ip, Operand(CodeObject())); | 843 push(r0); |
1008 push(ip); | 844 mov(r0, Operand(CodeObject())); |
| 845 push(r0); |
1009 // Adjust FP to point to saved FP. | 846 // Adjust FP to point to saved FP. |
1010 add(fp, sp, | 847 addi(fp, sp, |
1011 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); | 848 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); |
1012 } | 849 } |
1013 | 850 |
1014 | 851 |
1015 int MacroAssembler::LeaveFrame(StackFrame::Type type) { | 852 int MacroAssembler::LeaveFrame(StackFrame::Type type) { |
1016 // r0: preserved | 853 // r3: preserved |
1017 // r1: preserved | 854 // r4: preserved |
1018 // r2: preserved | 855 // r5: preserved |
1019 | 856 |
1020 // Drop the execution stack down to the frame pointer and restore | 857 // Drop the execution stack down to the frame pointer and restore |
1021 // the caller frame pointer, return address and constant pool pointer | 858 // the caller frame pointer, return address and constant pool pointer. |
1022 // (if FLAG_enable_ool_constant_pool). | |
1023 int frame_ends; | 859 int frame_ends; |
1024 if (FLAG_enable_ool_constant_pool) { | 860 #if V8_OOL_CONSTANT_POOL |
1025 add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset)); | 861 addi(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset)); |
1026 frame_ends = pc_offset(); | 862 frame_ends = pc_offset(); |
1027 ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit()); | 863 Pop(r0, fp, kConstantPoolRegister); |
1028 } else { | 864 #else |
1029 mov(sp, fp); | 865 mr(sp, fp); |
1030 frame_ends = pc_offset(); | 866 frame_ends = pc_offset(); |
1031 ldm(ia_w, sp, fp.bit() | lr.bit()); | 867 Pop(r0, fp); |
1032 } | 868 #endif |
| 869 mtlr(r0); |
1033 return frame_ends; | 870 return frame_ends; |
1034 } | 871 } |
1035 | 872 |
1036 | 873 |
| 874 // ExitFrame layout (probably wrongish.. needs updating) |
| 875 // |
| 876 // SP -> previousSP |
| 877 // LK reserved |
| 878 // code |
| 879 // sp_on_exit (for debug?) |
| 880 // oldSP->prev SP |
| 881 // LK |
| 882 // <parameters on stack> |
| 883 |
| 884 // Prior to calling EnterExitFrame, we've got a bunch of parameters |
| 885 // on the stack that we need to wrap a real frame around.. so first |
| 886 // we reserve a slot for LK and push the previous SP which is captured |
| 887 // in the fp register (r31) |
| 888 // Then - we buy a new frame |
| 889 |
1037 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { | 890 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { |
1038 // Set up the frame structure on the stack. | 891 // Set up the frame structure on the stack. |
1039 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); | 892 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); |
1040 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); | 893 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); |
1041 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); | 894 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); |
1042 Push(lr, fp); | 895 ASSERT(stack_space > 0); |
1043 mov(fp, Operand(sp)); // Set up new frame pointer. | 896 |
| 897 // This is an opportunity to build a frame to wrap |
| 898 // all of the pushes that have happened inside of V8 |
| 899 // since we were called from C code |
| 900 |
| 901 // replicate ARM frame - TODO make this more closely follow PPC ABI |
| 902 mflr(r0); |
| 903 Push(r0, fp); |
| 904 mr(fp, sp); |
1044 // Reserve room for saved entry sp and code object. | 905 // Reserve room for saved entry sp and code object. |
1045 sub(sp, sp, Operand(ExitFrameConstants::kFrameSize)); | 906 subi(sp, sp, Operand(ExitFrameConstants::kFrameSize)); |
| 907 |
1046 if (emit_debug_code()) { | 908 if (emit_debug_code()) { |
1047 mov(ip, Operand::Zero()); | 909 li(r8, Operand::Zero()); |
1048 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 910 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
1049 } | 911 } |
1050 if (FLAG_enable_ool_constant_pool) { | 912 #if V8_OOL_CONSTANT_POOL |
1051 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); | 913 StoreP(kConstantPoolRegister, |
1052 } | 914 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); |
1053 mov(ip, Operand(CodeObject())); | 915 #endif |
1054 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); | 916 mov(r8, Operand(CodeObject())); |
| 917 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); |
1055 | 918 |
1056 // Save the frame pointer and the context in top. | 919 // Save the frame pointer and the context in top. |
1057 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 920 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
1058 str(fp, MemOperand(ip)); | 921 StoreP(fp, MemOperand(r8)); |
1059 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 922 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
1060 str(cp, MemOperand(ip)); | 923 StoreP(cp, MemOperand(r8)); |
1061 | 924 |
1062 // Optionally save all double registers. | 925 // Optionally save all volatile double registers. |
1063 if (save_doubles) { | 926 if (save_doubles) { |
1064 SaveFPRegs(sp, ip); | 927 SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters); |
1065 // Note that d0 will be accessible at | 928 // Note that d0 will be accessible at |
1066 // fp - ExitFrameConstants::kFrameSize - | 929 // fp - ExitFrameConstants::kFrameSize - |
1067 // DwVfpRegister::kMaxNumRegisters * kDoubleSize, | 930 // kNumVolatileRegisters * kDoubleSize, |
1068 // since the sp slot, code slot and constant pool slot (if | 931 // since the sp slot and code slot were pushed after the fp. |
1069 // FLAG_enable_ool_constant_pool) were pushed after the fp. | 932 } |
1070 } | 933 |
1071 | 934 addi(sp, sp, Operand(-stack_space * kPointerSize)); |
1072 // Reserve place for the return address and stack space and align the frame | 935 |
1073 // preparing for calling the runtime function. | 936 // Allocate and align the frame preparing for calling the runtime |
1074 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 937 // function. |
1075 sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); | 938 const int frame_alignment = ActivationFrameAlignment(); |
1076 if (frame_alignment > 0) { | 939 if (frame_alignment > kPointerSize) { |
1077 ASSERT(IsPowerOf2(frame_alignment)); | 940 ASSERT(IsPowerOf2(frame_alignment)); |
1078 and_(sp, sp, Operand(-frame_alignment)); | 941 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); |
1079 } | 942 } |
| 943 li(r0, Operand::Zero()); |
| 944 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize)); |
1080 | 945 |
1081 // Set the exit frame sp value to point just before the return address | 946 // Set the exit frame sp value to point just before the return address |
1082 // location. | 947 // location. |
1083 add(ip, sp, Operand(kPointerSize)); | 948 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize)); |
1084 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 949 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
1085 } | 950 } |
1086 | 951 |
1087 | 952 |
1088 void MacroAssembler::InitializeNewString(Register string, | 953 void MacroAssembler::InitializeNewString(Register string, |
1089 Register length, | 954 Register length, |
1090 Heap::RootListIndex map_index, | 955 Heap::RootListIndex map_index, |
1091 Register scratch1, | 956 Register scratch1, |
1092 Register scratch2) { | 957 Register scratch2) { |
1093 SmiTag(scratch1, length); | 958 SmiTag(scratch1, length); |
1094 LoadRoot(scratch2, map_index); | 959 LoadRoot(scratch2, map_index); |
1095 str(scratch1, FieldMemOperand(string, String::kLengthOffset)); | 960 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0); |
1096 mov(scratch1, Operand(String::kEmptyHashField)); | 961 li(scratch1, Operand(String::kEmptyHashField)); |
1097 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); | 962 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0); |
1098 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); | 963 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0); |
1099 } | 964 } |
1100 | 965 |
1101 | 966 |
1102 int MacroAssembler::ActivationFrameAlignment() { | 967 int MacroAssembler::ActivationFrameAlignment() { |
1103 #if V8_HOST_ARCH_ARM | 968 #if !defined(USE_SIMULATOR) |
1104 // Running on the real platform. Use the alignment as mandated by the local | 969 // Running on the real platform. Use the alignment as mandated by the local |
1105 // environment. | 970 // environment. |
1106 // Note: This will break if we ever start generating snapshots on one ARM | 971 // Note: This will break if we ever start generating snapshots on one PPC |
1107 // platform for another ARM platform with a different alignment. | 972 // platform for another PPC platform with a different alignment. |
1108 return base::OS::ActivationFrameAlignment(); | 973 return base::OS::ActivationFrameAlignment(); |
1109 #else // V8_HOST_ARCH_ARM | 974 #else // Simulated |
1110 // If we are using the simulator then we should always align to the expected | 975 // If we are using the simulator then we should always align to the expected |
1111 // alignment. As the simulator is used to generate snapshots we do not know | 976 // alignment. As the simulator is used to generate snapshots we do not know |
1112 // if the target platform will need alignment, so this is controlled from a | 977 // if the target platform will need alignment, so this is controlled from a |
1113 // flag. | 978 // flag. |
1114 return FLAG_sim_stack_alignment; | 979 return FLAG_sim_stack_alignment; |
1115 #endif // V8_HOST_ARCH_ARM | 980 #endif |
1116 } | 981 } |
1117 | 982 |
1118 | 983 |
1119 void MacroAssembler::LeaveExitFrame(bool save_doubles, | 984 void MacroAssembler::LeaveExitFrame(bool save_doubles, |
1120 Register argument_count, | 985 Register argument_count, |
1121 bool restore_context) { | 986 bool restore_context) { |
| 987 #if V8_OOL_CONSTANT_POOL |
1122 ConstantPoolUnavailableScope constant_pool_unavailable(this); | 988 ConstantPoolUnavailableScope constant_pool_unavailable(this); |
1123 | 989 #endif |
1124 // Optionally restore all double registers. | 990 // Optionally restore all double registers. |
1125 if (save_doubles) { | 991 if (save_doubles) { |
1126 // Calculate the stack location of the saved doubles and restore them. | 992 // Calculate the stack location of the saved doubles and restore them. |
1127 const int offset = ExitFrameConstants::kFrameSize; | 993 const int kNumRegs = DoubleRegister::kNumVolatileRegisters; |
1128 sub(r3, fp, | 994 const int offset = (ExitFrameConstants::kFrameSize + |
1129 Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); | 995 kNumRegs * kDoubleSize); |
1130 RestoreFPRegs(r3, ip); | 996 addi(r6, fp, Operand(-offset)); |
| 997 RestoreFPRegs(r6, 0, kNumRegs); |
1131 } | 998 } |
1132 | 999 |
1133 // Clear top frame. | 1000 // Clear top frame. |
1134 mov(r3, Operand::Zero()); | 1001 li(r6, Operand::Zero()); |
1135 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 1002 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
1136 str(r3, MemOperand(ip)); | 1003 StoreP(r6, MemOperand(ip)); |
1137 | 1004 |
1138 // Restore current context from top and clear it in debug mode. | 1005 // Restore current context from top and clear it in debug mode. |
1139 if (restore_context) { | 1006 if (restore_context) { |
1140 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 1007 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
1141 ldr(cp, MemOperand(ip)); | 1008 LoadP(cp, MemOperand(ip)); |
1142 } | 1009 } |
1143 #ifdef DEBUG | 1010 #ifdef DEBUG |
1144 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 1011 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
1145 str(r3, MemOperand(ip)); | 1012 StoreP(r6, MemOperand(ip)); |
1146 #endif | 1013 #endif |
1147 | 1014 |
1148 // Tear down the exit frame, pop the arguments, and return. | 1015 // Tear down the exit frame, pop the arguments, and return. |
1149 if (FLAG_enable_ool_constant_pool) { | 1016 #if V8_OOL_CONSTANT_POOL |
1150 ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); | 1017 LoadP(kConstantPoolRegister, |
1151 } | 1018 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); |
1152 mov(sp, Operand(fp)); | 1019 #endif |
1153 ldm(ia_w, sp, fp.bit() | lr.bit()); | 1020 mr(sp, fp); |
| 1021 pop(fp); |
| 1022 pop(r0); |
| 1023 mtlr(r0); |
| 1024 |
1154 if (argument_count.is_valid()) { | 1025 if (argument_count.is_valid()) { |
1155 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); | 1026 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2)); |
| 1027 add(sp, sp, argument_count); |
1156 } | 1028 } |
1157 } | 1029 } |
1158 | 1030 |
1159 | 1031 |
1160 void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) { | 1032 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { |
1161 if (use_eabi_hardfloat()) { | 1033 Move(dst, d1); |
1162 Move(dst, d0); | |
1163 } else { | |
1164 vmov(dst, r0, r1); | |
1165 } | |
1166 } | 1034 } |
1167 | 1035 |
1168 | 1036 |
1169 // On ARM this is just a synonym to make the purpose clear. | 1037 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { |
1170 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) { | 1038 Move(dst, d1); |
1171 MovFromFloatResult(dst); | |
1172 } | 1039 } |
1173 | 1040 |
1174 | 1041 |
1175 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | 1042 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
1176 const ParameterCount& actual, | 1043 const ParameterCount& actual, |
1177 Handle<Code> code_constant, | 1044 Handle<Code> code_constant, |
1178 Register code_reg, | 1045 Register code_reg, |
1179 Label* done, | 1046 Label* done, |
1180 bool* definitely_mismatches, | 1047 bool* definitely_mismatches, |
1181 InvokeFlag flag, | 1048 InvokeFlag flag, |
1182 const CallWrapper& call_wrapper) { | 1049 const CallWrapper& call_wrapper) { |
1183 bool definitely_matches = false; | 1050 bool definitely_matches = false; |
1184 *definitely_mismatches = false; | 1051 *definitely_mismatches = false; |
1185 Label regular_invoke; | 1052 Label regular_invoke; |
1186 | 1053 |
1187 // Check whether the expected and actual arguments count match. If not, | 1054 // Check whether the expected and actual arguments count match. If not, |
1188 // setup registers according to contract with ArgumentsAdaptorTrampoline: | 1055 // setup registers according to contract with ArgumentsAdaptorTrampoline: |
1189 // r0: actual arguments count | 1056 // r3: actual arguments count |
1190 // r1: function (passed through to callee) | 1057 // r4: function (passed through to callee) |
1191 // r2: expected arguments count | 1058 // r5: expected arguments count |
1192 | 1059 |
1193 // The code below is made a lot easier because the calling code already sets | 1060 // The code below is made a lot easier because the calling code already sets |
1194 // up actual and expected registers according to the contract if values are | 1061 // up actual and expected registers according to the contract if values are |
1195 // passed in registers. | 1062 // passed in registers. |
1196 ASSERT(actual.is_immediate() || actual.reg().is(r0)); | 1063 |
1197 ASSERT(expected.is_immediate() || expected.reg().is(r2)); | 1064 // roohack - remove these 3 checks temporarily |
1198 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); | 1065 // ASSERT(actual.is_immediate() || actual.reg().is(r3)); |
| 1066 // ASSERT(expected.is_immediate() || expected.reg().is(r5)); |
| 1067 // ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) |
| 1068 // || code_reg.is(r6)); |
1199 | 1069 |
1200 if (expected.is_immediate()) { | 1070 if (expected.is_immediate()) { |
1201 ASSERT(actual.is_immediate()); | 1071 ASSERT(actual.is_immediate()); |
1202 if (expected.immediate() == actual.immediate()) { | 1072 if (expected.immediate() == actual.immediate()) { |
1203 definitely_matches = true; | 1073 definitely_matches = true; |
1204 } else { | 1074 } else { |
1205 mov(r0, Operand(actual.immediate())); | 1075 mov(r3, Operand(actual.immediate())); |
1206 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; | 1076 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
1207 if (expected.immediate() == sentinel) { | 1077 if (expected.immediate() == sentinel) { |
1208 // Don't worry about adapting arguments for builtins that | 1078 // Don't worry about adapting arguments for builtins that |
1209 // don't want that done. Skip adaption code by making it look | 1079 // don't want that done. Skip adaption code by making it look |
1210 // like we have a match between expected and actual number of | 1080 // like we have a match between expected and actual number of |
1211 // arguments. | 1081 // arguments. |
1212 definitely_matches = true; | 1082 definitely_matches = true; |
1213 } else { | 1083 } else { |
1214 *definitely_mismatches = true; | 1084 *definitely_mismatches = true; |
1215 mov(r2, Operand(expected.immediate())); | 1085 mov(r5, Operand(expected.immediate())); |
1216 } | 1086 } |
1217 } | 1087 } |
1218 } else { | 1088 } else { |
1219 if (actual.is_immediate()) { | 1089 if (actual.is_immediate()) { |
1220 cmp(expected.reg(), Operand(actual.immediate())); | 1090 cmpi(expected.reg(), Operand(actual.immediate())); |
1221 b(eq, ®ular_invoke); | 1091 beq(®ular_invoke); |
1222 mov(r0, Operand(actual.immediate())); | 1092 mov(r3, Operand(actual.immediate())); |
1223 } else { | 1093 } else { |
1224 cmp(expected.reg(), Operand(actual.reg())); | 1094 cmp(expected.reg(), actual.reg()); |
1225 b(eq, ®ular_invoke); | 1095 beq(®ular_invoke); |
1226 } | 1096 } |
1227 } | 1097 } |
1228 | 1098 |
1229 if (!definitely_matches) { | 1099 if (!definitely_matches) { |
1230 if (!code_constant.is_null()) { | 1100 if (!code_constant.is_null()) { |
1231 mov(r3, Operand(code_constant)); | 1101 mov(r6, Operand(code_constant)); |
1232 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1102 addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); |
1233 } | 1103 } |
1234 | 1104 |
1235 Handle<Code> adaptor = | 1105 Handle<Code> adaptor = |
1236 isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 1106 isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
1237 if (flag == CALL_FUNCTION) { | 1107 if (flag == CALL_FUNCTION) { |
1238 call_wrapper.BeforeCall(CallSize(adaptor)); | 1108 call_wrapper.BeforeCall(CallSize(adaptor)); |
1239 Call(adaptor); | 1109 Call(adaptor); |
1240 call_wrapper.AfterCall(); | 1110 call_wrapper.AfterCall(); |
1241 if (!*definitely_mismatches) { | 1111 if (!*definitely_mismatches) { |
1242 b(done); | 1112 b(done); |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1279 } | 1149 } |
1280 | 1150 |
1281 | 1151 |
1282 void MacroAssembler::InvokeFunction(Register fun, | 1152 void MacroAssembler::InvokeFunction(Register fun, |
1283 const ParameterCount& actual, | 1153 const ParameterCount& actual, |
1284 InvokeFlag flag, | 1154 InvokeFlag flag, |
1285 const CallWrapper& call_wrapper) { | 1155 const CallWrapper& call_wrapper) { |
1286 // You can't call a function without a valid frame. | 1156 // You can't call a function without a valid frame. |
1287 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 1157 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
1288 | 1158 |
1289 // Contract with called JS functions requires that function is passed in r1. | 1159 // Contract with called JS functions requires that function is passed in r4. |
1290 ASSERT(fun.is(r1)); | 1160 ASSERT(fun.is(r4)); |
1291 | 1161 |
1292 Register expected_reg = r2; | 1162 Register expected_reg = r5; |
1293 Register code_reg = r3; | 1163 Register code_reg = r6; |
1294 | 1164 |
1295 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 1165 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); |
1296 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 1166 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); |
1297 ldr(expected_reg, | 1167 LoadWordArith(expected_reg, |
1298 FieldMemOperand(code_reg, | 1168 FieldMemOperand(code_reg, |
1299 SharedFunctionInfo::kFormalParameterCountOffset)); | 1169 SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1170 #if !defined(V8_TARGET_ARCH_PPC64) |
1300 SmiUntag(expected_reg); | 1171 SmiUntag(expected_reg); |
1301 ldr(code_reg, | 1172 #endif |
1302 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 1173 LoadP(code_reg, |
| 1174 FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
1303 | 1175 |
1304 ParameterCount expected(expected_reg); | 1176 ParameterCount expected(expected_reg); |
1305 InvokeCode(code_reg, expected, actual, flag, call_wrapper); | 1177 InvokeCode(code_reg, expected, actual, flag, call_wrapper); |
1306 } | 1178 } |
1307 | 1179 |
1308 | 1180 |
1309 void MacroAssembler::InvokeFunction(Register function, | 1181 void MacroAssembler::InvokeFunction(Register function, |
1310 const ParameterCount& expected, | 1182 const ParameterCount& expected, |
1311 const ParameterCount& actual, | 1183 const ParameterCount& actual, |
1312 InvokeFlag flag, | 1184 InvokeFlag flag, |
1313 const CallWrapper& call_wrapper) { | 1185 const CallWrapper& call_wrapper) { |
1314 // You can't call a function without a valid frame. | 1186 // You can't call a function without a valid frame. |
1315 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 1187 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
1316 | 1188 |
1317 // Contract with called JS functions requires that function is passed in r1. | 1189 // Contract with called JS functions requires that function is passed in r4. |
1318 ASSERT(function.is(r1)); | 1190 ASSERT(function.is(r4)); |
1319 | 1191 |
1320 // Get the function and setup the context. | 1192 // Get the function and setup the context. |
1321 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 1193 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); |
1322 | 1194 |
1323 // We call indirectly through the code field in the function to | 1195 // We call indirectly through the code field in the function to |
1324 // allow recompilation to take effect without changing any of the | 1196 // allow recompilation to take effect without changing any of the |
1325 // call sites. | 1197 // call sites. |
1326 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 1198 LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
1327 InvokeCode(r3, expected, actual, flag, call_wrapper); | 1199 InvokeCode(r6, expected, actual, flag, call_wrapper); |
1328 } | 1200 } |
1329 | 1201 |
1330 | 1202 |
1331 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 1203 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
1332 const ParameterCount& expected, | 1204 const ParameterCount& expected, |
1333 const ParameterCount& actual, | 1205 const ParameterCount& actual, |
1334 InvokeFlag flag, | 1206 InvokeFlag flag, |
1335 const CallWrapper& call_wrapper) { | 1207 const CallWrapper& call_wrapper) { |
1336 Move(r1, function); | 1208 Move(r4, function); |
1337 InvokeFunction(r1, expected, actual, flag, call_wrapper); | 1209 InvokeFunction(r4, expected, actual, flag, call_wrapper); |
1338 } | 1210 } |
1339 | 1211 |
1340 | 1212 |
1341 void MacroAssembler::IsObjectJSObjectType(Register heap_object, | 1213 void MacroAssembler::IsObjectJSObjectType(Register heap_object, |
1342 Register map, | 1214 Register map, |
1343 Register scratch, | 1215 Register scratch, |
1344 Label* fail) { | 1216 Label* fail) { |
1345 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); | 1217 LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); |
1346 IsInstanceJSObjectType(map, scratch, fail); | 1218 IsInstanceJSObjectType(map, scratch, fail); |
1347 } | 1219 } |
1348 | 1220 |
1349 | 1221 |
1350 void MacroAssembler::IsInstanceJSObjectType(Register map, | 1222 void MacroAssembler::IsInstanceJSObjectType(Register map, |
1351 Register scratch, | 1223 Register scratch, |
1352 Label* fail) { | 1224 Label* fail) { |
1353 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 1225 lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
1354 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 1226 cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
1355 b(lt, fail); | 1227 blt(fail); |
1356 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 1228 cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
1357 b(gt, fail); | 1229 bgt(fail); |
1358 } | 1230 } |
1359 | 1231 |
1360 | 1232 |
1361 void MacroAssembler::IsObjectJSStringType(Register object, | 1233 void MacroAssembler::IsObjectJSStringType(Register object, |
1362 Register scratch, | 1234 Register scratch, |
1363 Label* fail) { | 1235 Label* fail) { |
1364 ASSERT(kNotStringTag != 0); | 1236 ASSERT(kNotStringTag != 0); |
1365 | 1237 |
1366 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 1238 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
1367 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 1239 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
1368 tst(scratch, Operand(kIsNotStringMask)); | 1240 andi(r0, scratch, Operand(kIsNotStringMask)); |
1369 b(ne, fail); | 1241 bne(fail, cr0); |
1370 } | 1242 } |
1371 | 1243 |
1372 | 1244 |
1373 void MacroAssembler::IsObjectNameType(Register object, | 1245 void MacroAssembler::IsObjectNameType(Register object, |
1374 Register scratch, | 1246 Register scratch, |
1375 Label* fail) { | 1247 Label* fail) { |
1376 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 1248 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
1377 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 1249 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
1378 cmp(scratch, Operand(LAST_NAME_TYPE)); | 1250 cmpi(scratch, Operand(LAST_NAME_TYPE)); |
1379 b(hi, fail); | 1251 bgt(fail); |
1380 } | 1252 } |
1381 | 1253 |
1382 | 1254 |
1383 void MacroAssembler::DebugBreak() { | 1255 void MacroAssembler::DebugBreak() { |
1384 mov(r0, Operand::Zero()); | 1256 li(r3, Operand::Zero()); |
1385 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); | 1257 mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); |
1386 CEntryStub ces(isolate(), 1); | 1258 CEntryStub ces(isolate(), 1); |
1387 ASSERT(AllowThisStubCall(&ces)); | 1259 ASSERT(AllowThisStubCall(&ces)); |
1388 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); | 1260 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); |
1389 } | 1261 } |
1390 | 1262 |
1391 | 1263 |
1392 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, | 1264 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, |
1393 int handler_index) { | 1265 int handler_index) { |
1394 // Adjust this code if not the case. | 1266 // Adjust this code if not the case. |
1395 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 1267 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
1396 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 1268 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
1397 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 1269 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
1398 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 1270 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
1399 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 1271 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
1400 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 1272 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
1401 | 1273 |
1402 // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available. | 1274 // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available. |
1403 // We will build up the handler from the bottom by pushing on the stack. | 1275 // We want the stack to look like |
1404 // Set up the code object (r5) and the state (r6) for pushing. | 1276 // sp -> NextOffset |
| 1277 // CodeObject |
| 1278 // state |
| 1279 // context |
| 1280 // frame pointer |
| 1281 |
| 1282 // Link the current handler as the next handler. |
| 1283 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
| 1284 LoadP(r0, MemOperand(r8)); |
| 1285 StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize)); |
| 1286 // Set this new handler as the current one. |
| 1287 StoreP(sp, MemOperand(r8)); |
| 1288 |
| 1289 if (kind == StackHandler::JS_ENTRY) { |
| 1290 li(r8, Operand::Zero()); // NULL frame pointer. |
| 1291 StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset)); |
| 1292 LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context. |
| 1293 StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset)); |
| 1294 } else { |
| 1295 // still not sure if fp is right |
| 1296 StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset)); |
| 1297 StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset)); |
| 1298 } |
1405 unsigned state = | 1299 unsigned state = |
1406 StackHandler::IndexField::encode(handler_index) | | 1300 StackHandler::IndexField::encode(handler_index) | |
1407 StackHandler::KindField::encode(kind); | 1301 StackHandler::KindField::encode(kind); |
1408 mov(r5, Operand(CodeObject())); | 1302 LoadIntLiteral(r8, state); |
1409 mov(r6, Operand(state)); | 1303 StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset)); |
1410 | 1304 mov(r8, Operand(CodeObject())); |
1411 // Push the frame pointer, context, state, and code object. | 1305 StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset)); |
1412 if (kind == StackHandler::JS_ENTRY) { | |
1413 mov(cp, Operand(Smi::FromInt(0))); // Indicates no context. | |
1414 mov(ip, Operand::Zero()); // NULL frame pointer. | |
1415 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit()); | |
1416 } else { | |
1417 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit()); | |
1418 } | |
1419 | |
1420 // Link the current handler as the next handler. | |
1421 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | |
1422 ldr(r5, MemOperand(r6)); | |
1423 push(r5); | |
1424 // Set this new handler as the current one. | |
1425 str(sp, MemOperand(r6)); | |
1426 } | 1306 } |
1427 | 1307 |
1428 | 1308 |
1429 void MacroAssembler::PopTryHandler() { | 1309 void MacroAssembler::PopTryHandler() { |
1430 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 1310 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
1431 pop(r1); | 1311 pop(r4); |
1432 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 1312 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
1433 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); | 1313 addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); |
1434 str(r1, MemOperand(ip)); | 1314 StoreP(r4, MemOperand(ip)); |
1435 } | 1315 } |
1436 | 1316 |
1437 | 1317 |
| 1318 // PPC - make use of ip as a temporary register |
1438 void MacroAssembler::JumpToHandlerEntry() { | 1319 void MacroAssembler::JumpToHandlerEntry() { |
1439 // Compute the handler entry address and jump to it. The handler table is | 1320 // Compute the handler entry address and jump to it. The handler table is |
1440 // a fixed array of (smi-tagged) code offsets. | 1321 // a fixed array of (smi-tagged) code offsets. |
1441 // r0 = exception, r1 = code object, r2 = state. | 1322 // r3 = exception, r4 = code object, r5 = state. |
1442 | 1323 #if V8_OOL_CONSTANT_POOL |
1443 ConstantPoolUnavailableScope constant_pool_unavailable(this); | 1324 ConstantPoolUnavailableScope constant_pool_unavailable(this); |
1444 if (FLAG_enable_ool_constant_pool) { | 1325 LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset)); |
1445 ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool. | 1326 #endif |
1446 } | 1327 LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table. |
1447 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table. | 1328 addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
1448 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 1329 srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index. |
1449 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index. | 1330 slwi(ip, r5, Operand(kPointerSizeLog2)); |
1450 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset. | 1331 add(ip, r6, ip); |
1451 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. | 1332 LoadP(r5, MemOperand(ip)); // Smi-tagged offset. |
1452 add(pc, r1, Operand::SmiUntag(r2)); // Jump | 1333 addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. |
| 1334 SmiUntag(ip, r5); |
| 1335 add(r0, r4, ip); |
| 1336 mtctr(r0); |
| 1337 bctr(); |
1453 } | 1338 } |
1454 | 1339 |
1455 | 1340 |
1456 void MacroAssembler::Throw(Register value) { | 1341 void MacroAssembler::Throw(Register value) { |
1457 // Adjust this code if not the case. | 1342 // Adjust this code if not the case. |
1458 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 1343 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
1459 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 1344 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
1460 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 1345 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
1461 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 1346 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
1462 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 1347 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
1463 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 1348 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
| 1349 Label skip; |
1464 | 1350 |
1465 // The exception is expected in r0. | 1351 // The exception is expected in r3. |
1466 if (!value.is(r0)) { | 1352 if (!value.is(r3)) { |
1467 mov(r0, value); | 1353 mr(r3, value); |
1468 } | 1354 } |
1469 // Drop the stack pointer to the top of the top handler. | 1355 // Drop the stack pointer to the top of the top handler. |
1470 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 1356 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
1471 ldr(sp, MemOperand(r3)); | 1357 LoadP(sp, MemOperand(r6)); |
1472 // Restore the next handler. | 1358 // Restore the next handler. |
1473 pop(r2); | 1359 pop(r5); |
1474 str(r2, MemOperand(r3)); | 1360 StoreP(r5, MemOperand(r6)); |
1475 | 1361 |
1476 // Get the code object (r1) and state (r2). Restore the context and frame | 1362 // Get the code object (r4) and state (r5). Restore the context and frame |
1477 // pointer. | 1363 // pointer. |
1478 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit()); | 1364 pop(r4); |
| 1365 pop(r5); |
| 1366 pop(cp); |
| 1367 pop(fp); |
1479 | 1368 |
1480 // If the handler is a JS frame, restore the context to the frame. | 1369 // If the handler is a JS frame, restore the context to the frame. |
1481 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp | 1370 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp |
1482 // or cp. | 1371 // or cp. |
1483 tst(cp, cp); | 1372 cmpi(cp, Operand::Zero()); |
1484 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | 1373 beq(&skip); |
| 1374 StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 1375 bind(&skip); |
1485 | 1376 |
1486 JumpToHandlerEntry(); | 1377 JumpToHandlerEntry(); |
1487 } | 1378 } |
1488 | 1379 |
1489 | 1380 |
1490 void MacroAssembler::ThrowUncatchable(Register value) { | 1381 void MacroAssembler::ThrowUncatchable(Register value) { |
1491 // Adjust this code if not the case. | 1382 // Adjust this code if not the case. |
1492 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 1383 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
1493 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 1384 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
1494 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 1385 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
1495 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 1386 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
1496 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 1387 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
1497 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 1388 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
1498 | 1389 |
1499 // The exception is expected in r0. | 1390 // The exception is expected in r3. |
1500 if (!value.is(r0)) { | 1391 if (!value.is(r3)) { |
1501 mov(r0, value); | 1392 mr(r3, value); |
1502 } | 1393 } |
1503 // Drop the stack pointer to the top of the top stack handler. | 1394 // Drop the stack pointer to the top of the top stack handler. |
1504 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 1395 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
1505 ldr(sp, MemOperand(r3)); | 1396 LoadP(sp, MemOperand(r6)); |
1506 | 1397 |
1507 // Unwind the handlers until the ENTRY handler is found. | 1398 // Unwind the handlers until the ENTRY handler is found. |
1508 Label fetch_next, check_kind; | 1399 Label fetch_next, check_kind; |
1509 jmp(&check_kind); | 1400 b(&check_kind); |
1510 bind(&fetch_next); | 1401 bind(&fetch_next); |
1511 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); | 1402 LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); |
1512 | 1403 |
1513 bind(&check_kind); | 1404 bind(&check_kind); |
1514 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); | 1405 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); |
1515 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset)); | 1406 LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset)); |
1516 tst(r2, Operand(StackHandler::KindField::kMask)); | 1407 andi(r0, r5, Operand(StackHandler::KindField::kMask)); |
1517 b(ne, &fetch_next); | 1408 bne(&fetch_next, cr0); |
1518 | 1409 |
1519 // Set the top handler address to next handler past the top ENTRY handler. | 1410 // Set the top handler address to next handler past the top ENTRY handler. |
1520 pop(r2); | 1411 pop(r5); |
1521 str(r2, MemOperand(r3)); | 1412 StoreP(r5, MemOperand(r6)); |
1522 // Get the code object (r1) and state (r2). Clear the context and frame | 1413 // Get the code object (r4) and state (r5). Clear the context and frame |
1523 // pointer (0 was saved in the handler). | 1414 // pointer (0 was saved in the handler). |
1524 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit()); | 1415 pop(r4); |
| 1416 pop(r5); |
| 1417 pop(cp); |
| 1418 pop(fp); |
1525 | 1419 |
1526 JumpToHandlerEntry(); | 1420 JumpToHandlerEntry(); |
1527 } | 1421 } |
1528 | 1422 |
1529 | 1423 |
1530 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 1424 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
1531 Register scratch, | 1425 Register scratch, |
1532 Label* miss) { | 1426 Label* miss) { |
1533 Label same_contexts; | 1427 Label same_contexts; |
1534 | 1428 |
1535 ASSERT(!holder_reg.is(scratch)); | 1429 ASSERT(!holder_reg.is(scratch)); |
1536 ASSERT(!holder_reg.is(ip)); | 1430 ASSERT(!holder_reg.is(ip)); |
1537 ASSERT(!scratch.is(ip)); | 1431 ASSERT(!scratch.is(ip)); |
1538 | 1432 |
1539 // Load current lexical context from the stack frame. | 1433 // Load current lexical context from the stack frame. |
1540 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1434 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
1541 // In debug mode, make sure the lexical context is set. | 1435 // In debug mode, make sure the lexical context is set. |
1542 #ifdef DEBUG | 1436 #ifdef DEBUG |
1543 cmp(scratch, Operand::Zero()); | 1437 cmpi(scratch, Operand::Zero()); |
1544 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | 1438 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); |
1545 #endif | 1439 #endif |
1546 | 1440 |
1547 // Load the native context of the current context. | 1441 // Load the native context of the current context. |
1548 int offset = | 1442 int offset = |
1549 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; | 1443 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
1550 ldr(scratch, FieldMemOperand(scratch, offset)); | 1444 LoadP(scratch, FieldMemOperand(scratch, offset)); |
1551 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 1445 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
1552 | 1446 |
1553 // Check the context is a native context. | 1447 // Check the context is a native context. |
1554 if (emit_debug_code()) { | 1448 if (emit_debug_code()) { |
1555 // Cannot use ip as a temporary in this verification code. Due to the fact | 1449 // Cannot use ip as a temporary in this verification code. Due to the fact |
1556 // that ip is clobbered as part of cmp with an object Operand. | 1450 // that ip is clobbered as part of cmp with an object Operand. |
1557 push(holder_reg); // Temporarily save holder on the stack. | 1451 push(holder_reg); // Temporarily save holder on the stack. |
1558 // Read the first word and compare to the native_context_map. | 1452 // Read the first word and compare to the native_context_map. |
1559 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 1453 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
1560 LoadRoot(ip, Heap::kNativeContextMapRootIndex); | 1454 LoadRoot(ip, Heap::kNativeContextMapRootIndex); |
1561 cmp(holder_reg, ip); | 1455 cmp(holder_reg, ip); |
1562 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); | 1456 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); |
1563 pop(holder_reg); // Restore holder. | 1457 pop(holder_reg); // Restore holder. |
1564 } | 1458 } |
1565 | 1459 |
1566 // Check if both contexts are the same. | 1460 // Check if both contexts are the same. |
1567 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 1461 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); |
1568 cmp(scratch, Operand(ip)); | 1462 cmp(scratch, ip); |
1569 b(eq, &same_contexts); | 1463 beq(&same_contexts); |
1570 | 1464 |
1571 // Check the context is a native context. | 1465 // Check the context is a native context. |
1572 if (emit_debug_code()) { | 1466 if (emit_debug_code()) { |
1573 // Cannot use ip as a temporary in this verification code. Due to the fact | 1467 // Cannot use ip as a temporary in this verification code. Due to the fact |
1574 // that ip is clobbered as part of cmp with an object Operand. | 1468 // that ip is clobbered as part of cmp with an object Operand. |
1575 push(holder_reg); // Temporarily save holder on the stack. | 1469 push(holder_reg); // Temporarily save holder on the stack. |
1576 mov(holder_reg, ip); // Move ip to its holding place. | 1470 mr(holder_reg, ip); // Move ip to its holding place. |
1577 LoadRoot(ip, Heap::kNullValueRootIndex); | 1471 LoadRoot(ip, Heap::kNullValueRootIndex); |
1578 cmp(holder_reg, ip); | 1472 cmp(holder_reg, ip); |
1579 Check(ne, kJSGlobalProxyContextShouldNotBeNull); | 1473 Check(ne, kJSGlobalProxyContextShouldNotBeNull); |
1580 | 1474 |
1581 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); | 1475 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); |
1582 LoadRoot(ip, Heap::kNativeContextMapRootIndex); | 1476 LoadRoot(ip, Heap::kNativeContextMapRootIndex); |
1583 cmp(holder_reg, ip); | 1477 cmp(holder_reg, ip); |
1584 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); | 1478 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); |
1585 // Restore ip is not needed. ip is reloaded below. | 1479 // Restore ip is not needed. ip is reloaded below. |
1586 pop(holder_reg); // Restore holder. | 1480 pop(holder_reg); // Restore holder. |
1587 // Restore ip to holder's context. | 1481 // Restore ip to holder's context. |
1588 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 1482 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); |
1589 } | 1483 } |
1590 | 1484 |
1591 // Check that the security token in the calling global object is | 1485 // Check that the security token in the calling global object is |
1592 // compatible with the security token in the receiving global | 1486 // compatible with the security token in the receiving global |
1593 // object. | 1487 // object. |
1594 int token_offset = Context::kHeaderSize + | 1488 int token_offset = Context::kHeaderSize + |
1595 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 1489 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
1596 | 1490 |
1597 ldr(scratch, FieldMemOperand(scratch, token_offset)); | 1491 LoadP(scratch, FieldMemOperand(scratch, token_offset)); |
1598 ldr(ip, FieldMemOperand(ip, token_offset)); | 1492 LoadP(ip, FieldMemOperand(ip, token_offset)); |
1599 cmp(scratch, Operand(ip)); | 1493 cmp(scratch, ip); |
1600 b(ne, miss); | 1494 bne(miss); |
1601 | 1495 |
1602 bind(&same_contexts); | 1496 bind(&same_contexts); |
1603 } | 1497 } |
1604 | 1498 |
1605 | 1499 |
1606 // Compute the hash code from the untagged key. This must be kept in sync with | 1500 // Compute the hash code from the untagged key. This must be kept in sync with |
1607 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in | 1501 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in |
1608 // code-stub-hydrogen.cc | 1502 // code-stub-hydrogen.cc |
1609 void MacroAssembler::GetNumberHash(Register t0, Register scratch) { | 1503 void MacroAssembler::GetNumberHash(Register t0, Register scratch) { |
1610 // First of all we assign the hash seed to scratch. | 1504 // First of all we assign the hash seed to scratch. |
1611 LoadRoot(scratch, Heap::kHashSeedRootIndex); | 1505 LoadRoot(scratch, Heap::kHashSeedRootIndex); |
1612 SmiUntag(scratch); | 1506 SmiUntag(scratch); |
1613 | 1507 |
1614 // Xor original key with a seed. | 1508 // Xor original key with a seed. |
1615 eor(t0, t0, Operand(scratch)); | 1509 xor_(t0, t0, scratch); |
1616 | 1510 |
1617 // Compute the hash code from the untagged key. This must be kept in sync | 1511 // Compute the hash code from the untagged key. This must be kept in sync |
1618 // with ComputeIntegerHash in utils.h. | 1512 // with ComputeIntegerHash in utils.h. |
1619 // | 1513 // |
1620 // hash = ~hash + (hash << 15); | 1514 // hash = ~hash + (hash << 15); |
1621 mvn(scratch, Operand(t0)); | 1515 notx(scratch, t0); |
1622 add(t0, scratch, Operand(t0, LSL, 15)); | 1516 slwi(t0, t0, Operand(15)); |
| 1517 add(t0, scratch, t0); |
1623 // hash = hash ^ (hash >> 12); | 1518 // hash = hash ^ (hash >> 12); |
1624 eor(t0, t0, Operand(t0, LSR, 12)); | 1519 srwi(scratch, t0, Operand(12)); |
| 1520 xor_(t0, t0, scratch); |
1625 // hash = hash + (hash << 2); | 1521 // hash = hash + (hash << 2); |
1626 add(t0, t0, Operand(t0, LSL, 2)); | 1522 slwi(scratch, t0, Operand(2)); |
| 1523 add(t0, t0, scratch); |
1627 // hash = hash ^ (hash >> 4); | 1524 // hash = hash ^ (hash >> 4); |
1628 eor(t0, t0, Operand(t0, LSR, 4)); | 1525 srwi(scratch, t0, Operand(4)); |
| 1526 xor_(t0, t0, scratch); |
1629 // hash = hash * 2057; | 1527 // hash = hash * 2057; |
1630 mov(scratch, Operand(t0, LSL, 11)); | 1528 mr(r0, t0); |
1631 add(t0, t0, Operand(t0, LSL, 3)); | 1529 slwi(scratch, t0, Operand(3)); |
| 1530 add(t0, t0, scratch); |
| 1531 slwi(scratch, r0, Operand(11)); |
1632 add(t0, t0, scratch); | 1532 add(t0, t0, scratch); |
1633 // hash = hash ^ (hash >> 16); | 1533 // hash = hash ^ (hash >> 16); |
1634 eor(t0, t0, Operand(t0, LSR, 16)); | 1534 srwi(scratch, t0, Operand(16)); |
| 1535 xor_(t0, t0, scratch); |
1635 } | 1536 } |
1636 | 1537 |
1637 | 1538 |
1638 void MacroAssembler::LoadFromNumberDictionary(Label* miss, | 1539 void MacroAssembler::LoadFromNumberDictionary(Label* miss, |
1639 Register elements, | 1540 Register elements, |
1640 Register key, | 1541 Register key, |
1641 Register result, | 1542 Register result, |
1642 Register t0, | 1543 Register t0, |
1643 Register t1, | 1544 Register t1, |
1644 Register t2) { | 1545 Register t2) { |
(...skipping 15 matching lines...) Expand all Loading... |
1660 // t0 - holds the untagged key on entry and holds the hash once computed. | 1561 // t0 - holds the untagged key on entry and holds the hash once computed. |
1661 // | 1562 // |
1662 // t1 - used to hold the capacity mask of the dictionary | 1563 // t1 - used to hold the capacity mask of the dictionary |
1663 // | 1564 // |
1664 // t2 - used for the index into the dictionary. | 1565 // t2 - used for the index into the dictionary. |
1665 Label done; | 1566 Label done; |
1666 | 1567 |
1667 GetNumberHash(t0, t1); | 1568 GetNumberHash(t0, t1); |
1668 | 1569 |
1669 // Compute the capacity mask. | 1570 // Compute the capacity mask. |
1670 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); | 1571 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); |
1671 SmiUntag(t1); | 1572 SmiUntag(t1); |
1672 sub(t1, t1, Operand(1)); | 1573 subi(t1, t1, Operand(1)); |
1673 | 1574 |
1674 // Generate an unrolled loop that performs a few probes before giving up. | 1575 // Generate an unrolled loop that performs a few probes before giving up. |
1675 for (int i = 0; i < kNumberDictionaryProbes; i++) { | 1576 for (int i = 0; i < kNumberDictionaryProbes; i++) { |
1676 // Use t2 for index calculations and keep the hash intact in t0. | 1577 // Use t2 for index calculations and keep the hash intact in t0. |
1677 mov(t2, t0); | 1578 mr(t2, t0); |
1678 // Compute the masked index: (hash + i + i * i) & mask. | 1579 // Compute the masked index: (hash + i + i * i) & mask. |
1679 if (i > 0) { | 1580 if (i > 0) { |
1680 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); | 1581 addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); |
1681 } | 1582 } |
1682 and_(t2, t2, Operand(t1)); | 1583 and_(t2, t2, t1); |
1683 | 1584 |
1684 // Scale the index by multiplying by the element size. | 1585 // Scale the index by multiplying by the element size. |
1685 ASSERT(SeededNumberDictionary::kEntrySize == 3); | 1586 ASSERT(SeededNumberDictionary::kEntrySize == 3); |
1686 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 | 1587 slwi(ip, t2, Operand(1)); |
| 1588 add(t2, t2, ip); // t2 = t2 * 3 |
1687 | 1589 |
1688 // Check if the key is identical to the name. | 1590 // Check if the key is identical to the name. |
1689 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); | 1591 slwi(t2, t2, Operand(kPointerSizeLog2)); |
1690 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); | 1592 add(t2, elements, t2); |
1691 cmp(key, Operand(ip)); | 1593 LoadP(ip, |
| 1594 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); |
| 1595 cmp(key, ip); |
1692 if (i != kNumberDictionaryProbes - 1) { | 1596 if (i != kNumberDictionaryProbes - 1) { |
1693 b(eq, &done); | 1597 beq(&done); |
1694 } else { | 1598 } else { |
1695 b(ne, miss); | 1599 bne(miss); |
1696 } | 1600 } |
1697 } | 1601 } |
1698 | 1602 |
1699 bind(&done); | 1603 bind(&done); |
1700 // Check that the value is a normal property. | 1604 // Check that the value is a normal property. |
1701 // t2: elements + (index * kPointerSize) | 1605 // t2: elements + (index * kPointerSize) |
1702 const int kDetailsOffset = | 1606 const int kDetailsOffset = |
1703 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; | 1607 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; |
1704 ldr(t1, FieldMemOperand(t2, kDetailsOffset)); | 1608 LoadP(t1, FieldMemOperand(t2, kDetailsOffset)); |
1705 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); | 1609 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask)); |
1706 b(ne, miss); | 1610 and_(r0, t1, ip, SetRC); |
| 1611 bne(miss, cr0); |
1707 | 1612 |
1708 // Get the value at the masked, scaled index and return. | 1613 // Get the value at the masked, scaled index and return. |
1709 const int kValueOffset = | 1614 const int kValueOffset = |
1710 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 1615 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
1711 ldr(result, FieldMemOperand(t2, kValueOffset)); | 1616 LoadP(result, FieldMemOperand(t2, kValueOffset)); |
1712 } | 1617 } |
1713 | 1618 |
1714 | 1619 |
1715 void MacroAssembler::Allocate(int object_size, | 1620 void MacroAssembler::Allocate(int object_size, |
1716 Register result, | 1621 Register result, |
1717 Register scratch1, | 1622 Register scratch1, |
1718 Register scratch2, | 1623 Register scratch2, |
1719 Label* gc_required, | 1624 Label* gc_required, |
1720 AllocationFlags flags) { | 1625 AllocationFlags flags) { |
1721 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | 1626 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); |
1722 if (!FLAG_inline_new) { | 1627 if (!FLAG_inline_new) { |
1723 if (emit_debug_code()) { | 1628 if (emit_debug_code()) { |
1724 // Trash the registers to simulate an allocation failure. | 1629 // Trash the registers to simulate an allocation failure. |
1725 mov(result, Operand(0x7091)); | 1630 li(result, Operand(0x7091)); |
1726 mov(scratch1, Operand(0x7191)); | 1631 li(scratch1, Operand(0x7191)); |
1727 mov(scratch2, Operand(0x7291)); | 1632 li(scratch2, Operand(0x7291)); |
1728 } | 1633 } |
1729 jmp(gc_required); | 1634 b(gc_required); |
1730 return; | 1635 return; |
1731 } | 1636 } |
1732 | 1637 |
1733 ASSERT(!result.is(scratch1)); | 1638 ASSERT(!result.is(scratch1)); |
1734 ASSERT(!result.is(scratch2)); | 1639 ASSERT(!result.is(scratch2)); |
1735 ASSERT(!scratch1.is(scratch2)); | 1640 ASSERT(!scratch1.is(scratch2)); |
1736 ASSERT(!scratch1.is(ip)); | 1641 ASSERT(!scratch1.is(ip)); |
1737 ASSERT(!scratch2.is(ip)); | 1642 ASSERT(!scratch2.is(ip)); |
1738 | 1643 |
1739 // Make object size into bytes. | 1644 // Make object size into bytes. |
1740 if ((flags & SIZE_IN_WORDS) != 0) { | 1645 if ((flags & SIZE_IN_WORDS) != 0) { |
1741 object_size *= kPointerSize; | 1646 object_size *= kPointerSize; |
1742 } | 1647 } |
1743 ASSERT_EQ(0, object_size & kObjectAlignmentMask); | 1648 ASSERT_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask)); |
1744 | 1649 |
1745 // Check relative positions of allocation top and limit addresses. | 1650 // Check relative positions of allocation top and limit addresses. |
1746 // The values must be adjacent in memory to allow the use of LDM. | |
1747 // Also, assert that the registers are numbered such that the values | |
1748 // are loaded in the correct order. | |
1749 ExternalReference allocation_top = | 1651 ExternalReference allocation_top = |
1750 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 1652 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
1751 ExternalReference allocation_limit = | 1653 ExternalReference allocation_limit = |
1752 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1654 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1753 | 1655 |
1754 intptr_t top = | 1656 intptr_t top = |
1755 reinterpret_cast<intptr_t>(allocation_top.address()); | 1657 reinterpret_cast<intptr_t>(allocation_top.address()); |
1756 intptr_t limit = | 1658 intptr_t limit = |
1757 reinterpret_cast<intptr_t>(allocation_limit.address()); | 1659 reinterpret_cast<intptr_t>(allocation_limit.address()); |
1758 ASSERT((limit - top) == kPointerSize); | 1660 ASSERT((limit - top) == kPointerSize); |
1759 ASSERT(result.code() < ip.code()); | |
1760 | 1661 |
1761 // Set up allocation top address register. | 1662 // Set up allocation top address register. |
1762 Register topaddr = scratch1; | 1663 Register topaddr = scratch1; |
1763 mov(topaddr, Operand(allocation_top)); | 1664 mov(topaddr, Operand(allocation_top)); |
1764 | 1665 |
1765 // This code stores a temporary value in ip. This is OK, as the code below | 1666 // This code stores a temporary value in ip. This is OK, as the code below |
1766 // does not need ip for implicit literal generation. | 1667 // does not need ip for implicit literal generation. |
1767 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1668 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
1768 // Load allocation top into result and allocation limit into ip. | 1669 // Load allocation top into result and allocation limit into ip. |
1769 ldm(ia, topaddr, result.bit() | ip.bit()); | 1670 LoadP(result, MemOperand(topaddr)); |
| 1671 LoadP(ip, MemOperand(topaddr, kPointerSize)); |
1770 } else { | 1672 } else { |
1771 if (emit_debug_code()) { | 1673 if (emit_debug_code()) { |
1772 // Assert that result actually contains top on entry. ip is used | 1674 // Assert that result actually contains top on entry. ip is used |
1773 // immediately below so this use of ip does not cause difference with | 1675 // immediately below so this use of ip does not cause difference with |
1774 // respect to register content between debug and release mode. | 1676 // respect to register content between debug and release mode. |
1775 ldr(ip, MemOperand(topaddr)); | 1677 LoadP(ip, MemOperand(topaddr)); |
1776 cmp(result, ip); | 1678 cmp(result, ip); |
1777 Check(eq, kUnexpectedAllocationTop); | 1679 Check(eq, kUnexpectedAllocationTop); |
1778 } | 1680 } |
1779 // Load allocation limit into ip. Result already contains allocation top. | 1681 // Load allocation limit into ip. Result already contains allocation top. |
1780 ldr(ip, MemOperand(topaddr, limit - top)); | 1682 LoadP(ip, MemOperand(topaddr, limit - top), r0); |
1781 } | 1683 } |
1782 | 1684 |
1783 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1685 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1784 // Align the next allocation. Storing the filler map without checking top is | 1686 // Align the next allocation. Storing the filler map without checking top is |
1785 // safe in new-space because the limit of the heap is aligned there. | 1687 // safe in new-space because the limit of the heap is aligned there. |
1786 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1688 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
| 1689 #if V8_TARGET_ARCH_PPC64 |
| 1690 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 1691 #else |
1787 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1692 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
1788 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); | 1693 andi(scratch2, result, Operand(kDoubleAlignmentMask)); |
1789 Label aligned; | 1694 Label aligned; |
1790 b(eq, &aligned); | 1695 beq(&aligned, cr0); |
1791 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1696 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1792 cmp(result, Operand(ip)); | 1697 cmpl(result, ip); |
1793 b(hs, gc_required); | 1698 bge(gc_required); |
1794 } | 1699 } |
1795 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | 1700 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); |
1796 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); | 1701 stw(scratch2, MemOperand(result)); |
| 1702 addi(result, result, Operand(kDoubleSize / 2)); |
1797 bind(&aligned); | 1703 bind(&aligned); |
| 1704 #endif |
1798 } | 1705 } |
1799 | 1706 |
1800 // Calculate new top and bail out if new space is exhausted. Use result | 1707 // Calculate new top and bail out if new space is exhausted. Use result |
1801 // to calculate the new top. We must preserve the ip register at this | 1708 // to calculate the new top. |
1802 // point, so we cannot just use add(). | 1709 li(r0, Operand(-1)); |
1803 ASSERT(object_size > 0); | 1710 if (is_int16(object_size)) { |
1804 Register source = result; | 1711 addic(scratch2, result, Operand(object_size)); |
1805 Condition cond = al; | 1712 } else { |
1806 int shift = 0; | 1713 mov(scratch2, Operand(object_size)); |
1807 while (object_size != 0) { | 1714 addc(scratch2, result, scratch2); |
1808 if (((object_size >> shift) & 0x03) == 0) { | |
1809 shift += 2; | |
1810 } else { | |
1811 int bits = object_size & (0xff << shift); | |
1812 object_size -= bits; | |
1813 shift += 8; | |
1814 Operand bits_operand(bits); | |
1815 ASSERT(bits_operand.instructions_required(this) == 1); | |
1816 add(scratch2, source, bits_operand, SetCC, cond); | |
1817 source = scratch2; | |
1818 cond = cc; | |
1819 } | |
1820 } | 1715 } |
1821 b(cs, gc_required); | 1716 addze(r0, r0, LeaveOE, SetRC); |
1822 cmp(scratch2, Operand(ip)); | 1717 beq(gc_required, cr0); |
1823 b(hi, gc_required); | 1718 cmpl(scratch2, ip); |
1824 str(scratch2, MemOperand(topaddr)); | 1719 bgt(gc_required); |
| 1720 StoreP(scratch2, MemOperand(topaddr)); |
1825 | 1721 |
1826 // Tag object if requested. | 1722 // Tag object if requested. |
1827 if ((flags & TAG_OBJECT) != 0) { | 1723 if ((flags & TAG_OBJECT) != 0) { |
1828 add(result, result, Operand(kHeapObjectTag)); | 1724 addi(result, result, Operand(kHeapObjectTag)); |
1829 } | 1725 } |
1830 } | 1726 } |
1831 | 1727 |
1832 | 1728 |
1833 void MacroAssembler::Allocate(Register object_size, | 1729 void MacroAssembler::Allocate(Register object_size, |
1834 Register result, | 1730 Register result, |
1835 Register scratch1, | 1731 Register scratch1, |
1836 Register scratch2, | 1732 Register scratch2, |
1837 Label* gc_required, | 1733 Label* gc_required, |
1838 AllocationFlags flags) { | 1734 AllocationFlags flags) { |
1839 if (!FLAG_inline_new) { | 1735 if (!FLAG_inline_new) { |
1840 if (emit_debug_code()) { | 1736 if (emit_debug_code()) { |
1841 // Trash the registers to simulate an allocation failure. | 1737 // Trash the registers to simulate an allocation failure. |
1842 mov(result, Operand(0x7091)); | 1738 li(result, Operand(0x7091)); |
1843 mov(scratch1, Operand(0x7191)); | 1739 li(scratch1, Operand(0x7191)); |
1844 mov(scratch2, Operand(0x7291)); | 1740 li(scratch2, Operand(0x7291)); |
1845 } | 1741 } |
1846 jmp(gc_required); | 1742 b(gc_required); |
1847 return; | 1743 return; |
1848 } | 1744 } |
1849 | 1745 |
1850 // Assert that the register arguments are different and that none of | 1746 // Assert that the register arguments are different and that none of |
1851 // them are ip. ip is used explicitly in the code generated below. | 1747 // them are ip. ip is used explicitly in the code generated below. |
1852 ASSERT(!result.is(scratch1)); | 1748 ASSERT(!result.is(scratch1)); |
1853 ASSERT(!result.is(scratch2)); | 1749 ASSERT(!result.is(scratch2)); |
1854 ASSERT(!scratch1.is(scratch2)); | 1750 ASSERT(!scratch1.is(scratch2)); |
1855 ASSERT(!object_size.is(ip)); | 1751 ASSERT(!object_size.is(ip)); |
1856 ASSERT(!result.is(ip)); | 1752 ASSERT(!result.is(ip)); |
1857 ASSERT(!scratch1.is(ip)); | 1753 ASSERT(!scratch1.is(ip)); |
1858 ASSERT(!scratch2.is(ip)); | 1754 ASSERT(!scratch2.is(ip)); |
1859 | 1755 |
1860 // Check relative positions of allocation top and limit addresses. | 1756 // Check relative positions of allocation top and limit addresses. |
1861 // The values must be adjacent in memory to allow the use of LDM. | |
1862 // Also, assert that the registers are numbered such that the values | |
1863 // are loaded in the correct order. | |
1864 ExternalReference allocation_top = | 1757 ExternalReference allocation_top = |
1865 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 1758 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
1866 ExternalReference allocation_limit = | 1759 ExternalReference allocation_limit = |
1867 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1760 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1868 intptr_t top = | 1761 intptr_t top = |
1869 reinterpret_cast<intptr_t>(allocation_top.address()); | 1762 reinterpret_cast<intptr_t>(allocation_top.address()); |
1870 intptr_t limit = | 1763 intptr_t limit = |
1871 reinterpret_cast<intptr_t>(allocation_limit.address()); | 1764 reinterpret_cast<intptr_t>(allocation_limit.address()); |
1872 ASSERT((limit - top) == kPointerSize); | 1765 ASSERT((limit - top) == kPointerSize); |
1873 ASSERT(result.code() < ip.code()); | |
1874 | 1766 |
1875 // Set up allocation top address. | 1767 // Set up allocation top address. |
1876 Register topaddr = scratch1; | 1768 Register topaddr = scratch1; |
1877 mov(topaddr, Operand(allocation_top)); | 1769 mov(topaddr, Operand(allocation_top)); |
1878 | 1770 |
1879 // This code stores a temporary value in ip. This is OK, as the code below | 1771 // This code stores a temporary value in ip. This is OK, as the code below |
1880 // does not need ip for implicit literal generation. | 1772 // does not need ip for implicit literal generation. |
1881 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1773 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
1882 // Load allocation top into result and allocation limit into ip. | 1774 // Load allocation top into result and allocation limit into ip. |
1883 ldm(ia, topaddr, result.bit() | ip.bit()); | 1775 LoadP(result, MemOperand(topaddr)); |
| 1776 LoadP(ip, MemOperand(topaddr, kPointerSize)); |
1884 } else { | 1777 } else { |
1885 if (emit_debug_code()) { | 1778 if (emit_debug_code()) { |
1886 // Assert that result actually contains top on entry. ip is used | 1779 // Assert that result actually contains top on entry. ip is used |
1887 // immediately below so this use of ip does not cause difference with | 1780 // immediately below so this use of ip does not cause difference with |
1888 // respect to register content between debug and release mode. | 1781 // respect to register content between debug and release mode. |
1889 ldr(ip, MemOperand(topaddr)); | 1782 LoadP(ip, MemOperand(topaddr)); |
1890 cmp(result, ip); | 1783 cmp(result, ip); |
1891 Check(eq, kUnexpectedAllocationTop); | 1784 Check(eq, kUnexpectedAllocationTop); |
1892 } | 1785 } |
1893 // Load allocation limit into ip. Result already contains allocation top. | 1786 // Load allocation limit into ip. Result already contains allocation top. |
1894 ldr(ip, MemOperand(topaddr, limit - top)); | 1787 LoadP(ip, MemOperand(topaddr, limit - top)); |
1895 } | 1788 } |
1896 | 1789 |
1897 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1790 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1898 // Align the next allocation. Storing the filler map without checking top is | 1791 // Align the next allocation. Storing the filler map without checking top is |
1899 // safe in new-space because the limit of the heap is aligned there. | 1792 // safe in new-space because the limit of the heap is aligned there. |
1900 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1793 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
1901 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1794 #if V8_TARGET_ARCH_PPC64 |
1902 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); | 1795 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
| 1796 #else |
| 1797 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
| 1798 andi(scratch2, result, Operand(kDoubleAlignmentMask)); |
1903 Label aligned; | 1799 Label aligned; |
1904 b(eq, &aligned); | 1800 beq(&aligned, cr0); |
1905 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1801 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1906 cmp(result, Operand(ip)); | 1802 cmpl(result, ip); |
1907 b(hs, gc_required); | 1803 bge(gc_required); |
1908 } | 1804 } |
1909 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | 1805 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); |
1910 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); | 1806 stw(scratch2, MemOperand(result)); |
| 1807 addi(result, result, Operand(kDoubleSize / 2)); |
1911 bind(&aligned); | 1808 bind(&aligned); |
| 1809 #endif |
1912 } | 1810 } |
1913 | 1811 |
1914 // Calculate new top and bail out if new space is exhausted. Use result | 1812 // Calculate new top and bail out if new space is exhausted. Use result |
1915 // to calculate the new top. Object size may be in words so a shift is | 1813 // to calculate the new top. Object size may be in words so a shift is |
1916 // required to get the number of bytes. | 1814 // required to get the number of bytes. |
| 1815 li(r0, Operand(-1)); |
1917 if ((flags & SIZE_IN_WORDS) != 0) { | 1816 if ((flags & SIZE_IN_WORDS) != 0) { |
1918 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC); | 1817 ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2)); |
| 1818 addc(scratch2, result, scratch2); |
1919 } else { | 1819 } else { |
1920 add(scratch2, result, Operand(object_size), SetCC); | 1820 addc(scratch2, result, object_size); |
1921 } | 1821 } |
1922 b(cs, gc_required); | 1822 addze(r0, r0, LeaveOE, SetRC); |
1923 cmp(scratch2, Operand(ip)); | 1823 beq(gc_required, cr0); |
1924 b(hi, gc_required); | 1824 cmpl(scratch2, ip); |
| 1825 bgt(gc_required); |
1925 | 1826 |
1926 // Update allocation top. result temporarily holds the new top. | 1827 // Update allocation top. result temporarily holds the new top. |
1927 if (emit_debug_code()) { | 1828 if (emit_debug_code()) { |
1928 tst(scratch2, Operand(kObjectAlignmentMask)); | 1829 andi(r0, scratch2, Operand(kObjectAlignmentMask)); |
1929 Check(eq, kUnalignedAllocationInNewSpace); | 1830 Check(eq, kUnalignedAllocationInNewSpace, cr0); |
1930 } | 1831 } |
1931 str(scratch2, MemOperand(topaddr)); | 1832 StoreP(scratch2, MemOperand(topaddr)); |
1932 | 1833 |
1933 // Tag object if requested. | 1834 // Tag object if requested. |
1934 if ((flags & TAG_OBJECT) != 0) { | 1835 if ((flags & TAG_OBJECT) != 0) { |
1935 add(result, result, Operand(kHeapObjectTag)); | 1836 addi(result, result, Operand(kHeapObjectTag)); |
1936 } | 1837 } |
1937 } | 1838 } |
1938 | 1839 |
1939 | 1840 |
1940 void MacroAssembler::UndoAllocationInNewSpace(Register object, | 1841 void MacroAssembler::UndoAllocationInNewSpace(Register object, |
1941 Register scratch) { | 1842 Register scratch) { |
1942 ExternalReference new_space_allocation_top = | 1843 ExternalReference new_space_allocation_top = |
1943 ExternalReference::new_space_allocation_top_address(isolate()); | 1844 ExternalReference::new_space_allocation_top_address(isolate()); |
1944 | 1845 |
1945 // Make sure the object has no tag before resetting top. | 1846 // Make sure the object has no tag before resetting top. |
1946 and_(object, object, Operand(~kHeapObjectTagMask)); | 1847 mov(r0, Operand(~kHeapObjectTagMask)); |
| 1848 and_(object, object, r0); |
| 1849 // was.. and_(object, object, Operand(~kHeapObjectTagMask)); |
1947 #ifdef DEBUG | 1850 #ifdef DEBUG |
1948 // Check that the object un-allocated is below the current top. | 1851 // Check that the object un-allocated is below the current top. |
1949 mov(scratch, Operand(new_space_allocation_top)); | 1852 mov(scratch, Operand(new_space_allocation_top)); |
1950 ldr(scratch, MemOperand(scratch)); | 1853 LoadP(scratch, MemOperand(scratch)); |
1951 cmp(object, scratch); | 1854 cmp(object, scratch); |
1952 Check(lt, kUndoAllocationOfNonAllocatedMemory); | 1855 Check(lt, kUndoAllocationOfNonAllocatedMemory); |
1953 #endif | 1856 #endif |
1954 // Write the address of the object to un-allocate as the current top. | 1857 // Write the address of the object to un-allocate as the current top. |
1955 mov(scratch, Operand(new_space_allocation_top)); | 1858 mov(scratch, Operand(new_space_allocation_top)); |
1956 str(object, MemOperand(scratch)); | 1859 StoreP(object, MemOperand(scratch)); |
1957 } | 1860 } |
1958 | 1861 |
1959 | 1862 |
1960 void MacroAssembler::AllocateTwoByteString(Register result, | 1863 void MacroAssembler::AllocateTwoByteString(Register result, |
1961 Register length, | 1864 Register length, |
1962 Register scratch1, | 1865 Register scratch1, |
1963 Register scratch2, | 1866 Register scratch2, |
1964 Register scratch3, | 1867 Register scratch3, |
1965 Label* gc_required) { | 1868 Label* gc_required) { |
1966 // Calculate the number of bytes needed for the characters in the string while | 1869 // Calculate the number of bytes needed for the characters in the string while |
1967 // observing object alignment. | 1870 // observing object alignment. |
1968 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1871 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
1969 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars. | 1872 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars. |
1970 add(scratch1, scratch1, | 1873 addi(scratch1, scratch1, |
1971 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); | 1874 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); |
1972 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); | 1875 mov(r0, Operand(~kObjectAlignmentMask)); |
| 1876 and_(scratch1, scratch1, r0); |
1973 | 1877 |
1974 // Allocate two-byte string in new space. | 1878 // Allocate two-byte string in new space. |
1975 Allocate(scratch1, | 1879 Allocate(scratch1, |
1976 result, | 1880 result, |
1977 scratch2, | 1881 scratch2, |
1978 scratch3, | 1882 scratch3, |
1979 gc_required, | 1883 gc_required, |
1980 TAG_OBJECT); | 1884 TAG_OBJECT); |
1981 | 1885 |
1982 // Set the map, length and hash field. | 1886 // Set the map, length and hash field. |
1983 InitializeNewString(result, | 1887 InitializeNewString(result, |
1984 length, | 1888 length, |
1985 Heap::kStringMapRootIndex, | 1889 Heap::kStringMapRootIndex, |
1986 scratch1, | 1890 scratch1, |
1987 scratch2); | 1891 scratch2); |
1988 } | 1892 } |
1989 | 1893 |
1990 | 1894 |
1991 void MacroAssembler::AllocateAsciiString(Register result, | 1895 void MacroAssembler::AllocateAsciiString(Register result, |
1992 Register length, | 1896 Register length, |
1993 Register scratch1, | 1897 Register scratch1, |
1994 Register scratch2, | 1898 Register scratch2, |
1995 Register scratch3, | 1899 Register scratch3, |
1996 Label* gc_required) { | 1900 Label* gc_required) { |
1997 // Calculate the number of bytes needed for the characters in the string while | 1901 // Calculate the number of bytes needed for the characters in the string while |
1998 // observing object alignment. | 1902 // observing object alignment. |
1999 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1903 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
2000 ASSERT(kCharSize == 1); | 1904 ASSERT(kCharSize == 1); |
2001 add(scratch1, length, | 1905 addi(scratch1, length, |
2002 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); | 1906 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); |
2003 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); | 1907 li(r0, Operand(~kObjectAlignmentMask)); |
| 1908 and_(scratch1, scratch1, r0); |
2004 | 1909 |
2005 // Allocate ASCII string in new space. | 1910 // Allocate ASCII string in new space. |
2006 Allocate(scratch1, | 1911 Allocate(scratch1, |
2007 result, | 1912 result, |
2008 scratch2, | 1913 scratch2, |
2009 scratch3, | 1914 scratch3, |
2010 gc_required, | 1915 gc_required, |
2011 TAG_OBJECT); | 1916 TAG_OBJECT); |
2012 | 1917 |
2013 // Set the map, length and hash field. | 1918 // Set the map, length and hash field. |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2086 scratch2); | 1991 scratch2); |
2087 } | 1992 } |
2088 | 1993 |
2089 | 1994 |
2090 void MacroAssembler::CompareObjectType(Register object, | 1995 void MacroAssembler::CompareObjectType(Register object, |
2091 Register map, | 1996 Register map, |
2092 Register type_reg, | 1997 Register type_reg, |
2093 InstanceType type) { | 1998 InstanceType type) { |
2094 const Register temp = type_reg.is(no_reg) ? ip : type_reg; | 1999 const Register temp = type_reg.is(no_reg) ? ip : type_reg; |
2095 | 2000 |
2096 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 2001 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
2097 CompareInstanceType(map, temp, type); | 2002 CompareInstanceType(map, temp, type); |
2098 } | 2003 } |
2099 | 2004 |
2100 | 2005 |
2101 void MacroAssembler::CheckObjectTypeRange(Register object, | 2006 void MacroAssembler::CheckObjectTypeRange(Register object, |
2102 Register map, | 2007 Register map, |
2103 InstanceType min_type, | 2008 InstanceType min_type, |
2104 InstanceType max_type, | 2009 InstanceType max_type, |
2105 Label* false_label) { | 2010 Label* false_label) { |
2106 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); | 2011 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); |
2107 STATIC_ASSERT(LAST_TYPE < 256); | 2012 STATIC_ASSERT(LAST_TYPE < 256); |
2108 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 2013 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
2109 ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 2014 lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
2110 sub(ip, ip, Operand(min_type)); | 2015 subi(ip, ip, Operand(min_type)); |
2111 cmp(ip, Operand(max_type - min_type)); | 2016 cmpli(ip, Operand(max_type - min_type)); |
2112 b(hi, false_label); | 2017 bgt(false_label); |
2113 } | 2018 } |
2114 | 2019 |
2115 | 2020 |
2116 void MacroAssembler::CompareInstanceType(Register map, | 2021 void MacroAssembler::CompareInstanceType(Register map, |
2117 Register type_reg, | 2022 Register type_reg, |
2118 InstanceType type) { | 2023 InstanceType type) { |
2119 // Registers map and type_reg can be ip. These two lines assert | |
2120 // that ip can be used with the two instructions (the constants | |
2121 // will never need ip). | |
2122 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); | 2024 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); |
2123 STATIC_ASSERT(LAST_TYPE < 256); | 2025 STATIC_ASSERT(LAST_TYPE < 256); |
2124 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 2026 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
2125 cmp(type_reg, Operand(type)); | 2027 cmpi(type_reg, Operand(type)); |
2126 } | 2028 } |
2127 | 2029 |
2128 | 2030 |
2129 void MacroAssembler::CompareRoot(Register obj, | 2031 void MacroAssembler::CompareRoot(Register obj, |
2130 Heap::RootListIndex index) { | 2032 Heap::RootListIndex index) { |
2131 ASSERT(!obj.is(ip)); | 2033 ASSERT(!obj.is(ip)); |
2132 LoadRoot(ip, index); | 2034 LoadRoot(ip, index); |
2133 cmp(obj, ip); | 2035 cmp(obj, ip); |
2134 } | 2036 } |
2135 | 2037 |
2136 | 2038 |
2137 void MacroAssembler::CheckFastElements(Register map, | 2039 void MacroAssembler::CheckFastElements(Register map, |
2138 Register scratch, | 2040 Register scratch, |
2139 Label* fail) { | 2041 Label* fail) { |
2140 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 2042 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
2141 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 2043 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
2142 STATIC_ASSERT(FAST_ELEMENTS == 2); | 2044 STATIC_ASSERT(FAST_ELEMENTS == 2); |
2143 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 2045 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
2144 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 2046 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
2145 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); | 2047 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000); |
2146 b(hi, fail); | 2048 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); |
| 2049 bgt(fail); |
2147 } | 2050 } |
2148 | 2051 |
2149 | 2052 |
2150 void MacroAssembler::CheckFastObjectElements(Register map, | 2053 void MacroAssembler::CheckFastObjectElements(Register map, |
2151 Register scratch, | 2054 Register scratch, |
2152 Label* fail) { | 2055 Label* fail) { |
2153 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 2056 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
2154 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 2057 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
2155 STATIC_ASSERT(FAST_ELEMENTS == 2); | 2058 STATIC_ASSERT(FAST_ELEMENTS == 2); |
2156 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 2059 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
2157 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 2060 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
2158 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 2061 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
2159 b(ls, fail); | 2062 ble(fail); |
2160 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); | 2063 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); |
2161 b(hi, fail); | 2064 bgt(fail); |
2162 } | 2065 } |
2163 | 2066 |
2164 | 2067 |
2165 void MacroAssembler::CheckFastSmiElements(Register map, | 2068 void MacroAssembler::CheckFastSmiElements(Register map, |
2166 Register scratch, | 2069 Register scratch, |
2167 Label* fail) { | 2070 Label* fail) { |
2168 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 2071 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
2169 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 2072 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
2170 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 2073 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
2171 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 2074 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
2172 b(hi, fail); | 2075 bgt(fail); |
2173 } | 2076 } |
2174 | 2077 |
2175 | 2078 |
| 2079 |
2176 void MacroAssembler::StoreNumberToDoubleElements( | 2080 void MacroAssembler::StoreNumberToDoubleElements( |
2177 Register value_reg, | 2081 Register value_reg, |
2178 Register key_reg, | 2082 Register key_reg, |
2179 Register elements_reg, | 2083 Register elements_reg, |
2180 Register scratch1, | 2084 Register scratch1, |
2181 LowDwVfpRegister double_scratch, | 2085 DoubleRegister double_scratch, |
2182 Label* fail, | 2086 Label* fail, |
2183 int elements_offset) { | 2087 int elements_offset) { |
2184 Label smi_value, store; | 2088 Label smi_value, store; |
2185 | 2089 |
2186 // Handle smi values specially. | 2090 // Handle smi values specially. |
2187 JumpIfSmi(value_reg, &smi_value); | 2091 JumpIfSmi(value_reg, &smi_value); |
2188 | 2092 |
2189 // Ensure that the object is a heap number | 2093 // Ensure that the object is a heap number |
2190 CheckMap(value_reg, | 2094 CheckMap(value_reg, |
2191 scratch1, | 2095 scratch1, |
2192 isolate()->factory()->heap_number_map(), | 2096 isolate()->factory()->heap_number_map(), |
2193 fail, | 2097 fail, |
2194 DONT_DO_SMI_CHECK); | 2098 DONT_DO_SMI_CHECK); |
2195 | 2099 |
2196 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 2100 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
2197 // Force a canonical NaN. | 2101 // Force a canonical NaN. |
2198 if (emit_debug_code()) { | 2102 CanonicalizeNaN(double_scratch); |
2199 vmrs(ip); | |
2200 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); | |
2201 Assert(ne, kDefaultNaNModeNotSet); | |
2202 } | |
2203 VFPCanonicalizeNaN(double_scratch); | |
2204 b(&store); | 2103 b(&store); |
2205 | 2104 |
2206 bind(&smi_value); | 2105 bind(&smi_value); |
2207 SmiToDouble(double_scratch, value_reg); | 2106 SmiToDouble(double_scratch, value_reg); |
2208 | 2107 |
2209 bind(&store); | 2108 bind(&store); |
2210 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); | 2109 SmiToDoubleArrayOffset(scratch1, key_reg); |
2211 vstr(double_scratch, | 2110 add(scratch1, elements_reg, scratch1); |
| 2111 stfd(double_scratch, |
2212 FieldMemOperand(scratch1, | 2112 FieldMemOperand(scratch1, |
2213 FixedDoubleArray::kHeaderSize - elements_offset)); | 2113 FixedDoubleArray::kHeaderSize - elements_offset)); |
2214 } | 2114 } |
2215 | 2115 |
2216 | 2116 |
| 2117 void MacroAssembler::AddAndCheckForOverflow(Register dst, |
| 2118 Register left, |
| 2119 Register right, |
| 2120 Register overflow_dst, |
| 2121 Register scratch) { |
| 2122 ASSERT(!dst.is(overflow_dst)); |
| 2123 ASSERT(!dst.is(scratch)); |
| 2124 ASSERT(!overflow_dst.is(scratch)); |
| 2125 ASSERT(!overflow_dst.is(left)); |
| 2126 ASSERT(!overflow_dst.is(right)); |
| 2127 |
| 2128 // C = A+B; C overflows if A/B have same sign and C has diff sign than A |
| 2129 if (dst.is(left)) { |
| 2130 mr(scratch, left); // Preserve left. |
| 2131 add(dst, left, right); // Left is overwritten. |
| 2132 xor_(scratch, dst, scratch); // Original left. |
| 2133 xor_(overflow_dst, dst, right); |
| 2134 and_(overflow_dst, overflow_dst, scratch, SetRC); |
| 2135 } else if (dst.is(right)) { |
| 2136 mr(scratch, right); // Preserve right. |
| 2137 add(dst, left, right); // Right is overwritten. |
| 2138 xor_(scratch, dst, scratch); // Original right. |
| 2139 xor_(overflow_dst, dst, left); |
| 2140 and_(overflow_dst, overflow_dst, scratch, SetRC); |
| 2141 } else { |
| 2142 add(dst, left, right); |
| 2143 xor_(overflow_dst, dst, left); |
| 2144 xor_(scratch, dst, right); |
| 2145 and_(overflow_dst, scratch, overflow_dst, SetRC); |
| 2146 } |
| 2147 } |
| 2148 |
| 2149 void MacroAssembler::SubAndCheckForOverflow(Register dst, |
| 2150 Register left, |
| 2151 Register right, |
| 2152 Register overflow_dst, |
| 2153 Register scratch) { |
| 2154 ASSERT(!dst.is(overflow_dst)); |
| 2155 ASSERT(!dst.is(scratch)); |
| 2156 ASSERT(!overflow_dst.is(scratch)); |
| 2157 ASSERT(!overflow_dst.is(left)); |
| 2158 ASSERT(!overflow_dst.is(right)); |
| 2159 |
| 2160 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A |
| 2161 if (dst.is(left)) { |
| 2162 mr(scratch, left); // Preserve left. |
| 2163 sub(dst, left, right); // Left is overwritten. |
| 2164 xor_(overflow_dst, dst, scratch); |
| 2165 xor_(scratch, scratch, right); |
| 2166 and_(overflow_dst, overflow_dst, scratch, SetRC); |
| 2167 } else if (dst.is(right)) { |
| 2168 mr(scratch, right); // Preserve right. |
| 2169 sub(dst, left, right); // Right is overwritten. |
| 2170 xor_(overflow_dst, dst, left); |
| 2171 xor_(scratch, left, scratch); |
| 2172 and_(overflow_dst, overflow_dst, scratch, SetRC); |
| 2173 } else { |
| 2174 sub(dst, left, right); |
| 2175 xor_(overflow_dst, dst, left); |
| 2176 xor_(scratch, left, right); |
| 2177 and_(overflow_dst, scratch, overflow_dst, SetRC); |
| 2178 } |
| 2179 } |
| 2180 |
| 2181 |
2217 void MacroAssembler::CompareMap(Register obj, | 2182 void MacroAssembler::CompareMap(Register obj, |
2218 Register scratch, | 2183 Register scratch, |
2219 Handle<Map> map, | 2184 Handle<Map> map, |
2220 Label* early_success) { | 2185 Label* early_success) { |
2221 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2186 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2222 CompareMap(scratch, map, early_success); | 2187 CompareMap(scratch, map, early_success); |
2223 } | 2188 } |
2224 | 2189 |
2225 | 2190 |
2226 void MacroAssembler::CompareMap(Register obj_map, | 2191 void MacroAssembler::CompareMap(Register obj_map, |
2227 Handle<Map> map, | 2192 Handle<Map> map, |
2228 Label* early_success) { | 2193 Label* early_success) { |
2229 cmp(obj_map, Operand(map)); | 2194 mov(r0, Operand(map)); |
| 2195 cmp(obj_map, r0); |
2230 } | 2196 } |
2231 | 2197 |
2232 | 2198 |
2233 void MacroAssembler::CheckMap(Register obj, | 2199 void MacroAssembler::CheckMap(Register obj, |
2234 Register scratch, | 2200 Register scratch, |
2235 Handle<Map> map, | 2201 Handle<Map> map, |
2236 Label* fail, | 2202 Label* fail, |
2237 SmiCheckType smi_check_type) { | 2203 SmiCheckType smi_check_type) { |
2238 if (smi_check_type == DO_SMI_CHECK) { | 2204 if (smi_check_type == DO_SMI_CHECK) { |
2239 JumpIfSmi(obj, fail); | 2205 JumpIfSmi(obj, fail); |
2240 } | 2206 } |
2241 | 2207 |
2242 Label success; | 2208 Label success; |
2243 CompareMap(obj, scratch, map, &success); | 2209 CompareMap(obj, scratch, map, &success); |
2244 b(ne, fail); | 2210 bne(fail); |
2245 bind(&success); | 2211 bind(&success); |
2246 } | 2212 } |
2247 | 2213 |
2248 | 2214 |
2249 void MacroAssembler::CheckMap(Register obj, | 2215 void MacroAssembler::CheckMap(Register obj, |
2250 Register scratch, | 2216 Register scratch, |
2251 Heap::RootListIndex index, | 2217 Heap::RootListIndex index, |
2252 Label* fail, | 2218 Label* fail, |
2253 SmiCheckType smi_check_type) { | 2219 SmiCheckType smi_check_type) { |
2254 if (smi_check_type == DO_SMI_CHECK) { | 2220 if (smi_check_type == DO_SMI_CHECK) { |
2255 JumpIfSmi(obj, fail); | 2221 JumpIfSmi(obj, fail); |
2256 } | 2222 } |
2257 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2223 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2258 LoadRoot(ip, index); | 2224 LoadRoot(ip, index); |
2259 cmp(scratch, ip); | 2225 cmp(scratch, ip); |
2260 b(ne, fail); | 2226 bne(fail); |
2261 } | 2227 } |
2262 | 2228 |
2263 | 2229 |
2264 void MacroAssembler::DispatchMap(Register obj, | 2230 void MacroAssembler::DispatchMap(Register obj, |
2265 Register scratch, | 2231 Register scratch, |
2266 Handle<Map> map, | 2232 Handle<Map> map, |
2267 Handle<Code> success, | 2233 Handle<Code> success, |
2268 SmiCheckType smi_check_type) { | 2234 SmiCheckType smi_check_type) { |
2269 Label fail; | 2235 Label fail; |
2270 if (smi_check_type == DO_SMI_CHECK) { | 2236 if (smi_check_type == DO_SMI_CHECK) { |
2271 JumpIfSmi(obj, &fail); | 2237 JumpIfSmi(obj, &fail); |
2272 } | 2238 } |
2273 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2239 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2274 mov(ip, Operand(map)); | 2240 mov(ip, Operand(map)); |
2275 cmp(scratch, ip); | 2241 cmp(scratch, ip); |
2276 Jump(success, RelocInfo::CODE_TARGET, eq); | 2242 bne(&fail); |
| 2243 Jump(success, RelocInfo::CODE_TARGET, al); |
2277 bind(&fail); | 2244 bind(&fail); |
2278 } | 2245 } |
2279 | 2246 |
2280 | 2247 |
2281 void MacroAssembler::TryGetFunctionPrototype(Register function, | 2248 void MacroAssembler::TryGetFunctionPrototype(Register function, |
2282 Register result, | 2249 Register result, |
2283 Register scratch, | 2250 Register scratch, |
2284 Label* miss, | 2251 Label* miss, |
2285 bool miss_on_bound_function) { | 2252 bool miss_on_bound_function) { |
2286 Label non_instance; | 2253 Label non_instance; |
2287 if (miss_on_bound_function) { | 2254 if (miss_on_bound_function) { |
2288 // Check that the receiver isn't a smi. | 2255 // Check that the receiver isn't a smi. |
2289 JumpIfSmi(function, miss); | 2256 JumpIfSmi(function, miss); |
2290 | 2257 |
2291 // Check that the function really is a function. Load map into result reg. | 2258 // Check that the function really is a function. Load map into result reg. |
2292 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); | 2259 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); |
2293 b(ne, miss); | 2260 bne(miss); |
2294 | 2261 |
2295 ldr(scratch, | 2262 LoadP(scratch, |
2296 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | 2263 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
2297 ldr(scratch, | 2264 lwz(scratch, |
2298 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); | 2265 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); |
2299 tst(scratch, | 2266 TestBit(scratch, |
2300 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); | 2267 #if V8_TARGET_ARCH_PPC64 |
2301 b(ne, miss); | 2268 SharedFunctionInfo::kBoundFunction, |
| 2269 #else |
| 2270 SharedFunctionInfo::kBoundFunction + kSmiTagSize, |
| 2271 #endif |
| 2272 r0); |
| 2273 bne(miss, cr0); |
2302 | 2274 |
2303 // Make sure that the function has an instance prototype. | 2275 // Make sure that the function has an instance prototype. |
2304 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); | 2276 lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); |
2305 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); | 2277 andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype)); |
2306 b(ne, &non_instance); | 2278 bne(&non_instance, cr0); |
2307 } | 2279 } |
2308 | 2280 |
2309 // Get the prototype or initial map from the function. | 2281 // Get the prototype or initial map from the function. |
2310 ldr(result, | 2282 LoadP(result, |
2311 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2283 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
2312 | 2284 |
2313 // If the prototype or initial map is the hole, don't return it and | 2285 // If the prototype or initial map is the hole, don't return it and |
2314 // simply miss the cache instead. This will allow us to allocate a | 2286 // simply miss the cache instead. This will allow us to allocate a |
2315 // prototype object on-demand in the runtime system. | 2287 // prototype object on-demand in the runtime system. |
2316 LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2288 LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
2317 cmp(result, ip); | 2289 cmp(result, ip); |
2318 b(eq, miss); | 2290 beq(miss); |
2319 | 2291 |
2320 // If the function does not have an initial map, we're done. | 2292 // If the function does not have an initial map, we're done. |
2321 Label done; | 2293 Label done; |
2322 CompareObjectType(result, scratch, scratch, MAP_TYPE); | 2294 CompareObjectType(result, scratch, scratch, MAP_TYPE); |
2323 b(ne, &done); | 2295 bne(&done); |
2324 | 2296 |
2325 // Get the prototype from the initial map. | 2297 // Get the prototype from the initial map. |
2326 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 2298 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
2327 | 2299 |
2328 if (miss_on_bound_function) { | 2300 if (miss_on_bound_function) { |
2329 jmp(&done); | 2301 b(&done); |
2330 | 2302 |
2331 // Non-instance prototype: Fetch prototype from constructor field | 2303 // Non-instance prototype: Fetch prototype from constructor field |
2332 // in initial map. | 2304 // in initial map. |
2333 bind(&non_instance); | 2305 bind(&non_instance); |
2334 ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | 2306 LoadP(result, FieldMemOperand(result, Map::kConstructorOffset)); |
2335 } | 2307 } |
2336 | 2308 |
2337 // All done. | 2309 // All done. |
2338 bind(&done); | 2310 bind(&done); |
2339 } | 2311 } |
2340 | 2312 |
2341 | 2313 |
2342 void MacroAssembler::CallStub(CodeStub* stub, | 2314 void MacroAssembler::CallStub(CodeStub* stub, |
2343 TypeFeedbackId ast_id, | 2315 TypeFeedbackId ast_id, |
2344 Condition cond) { | 2316 Condition cond) { |
(...skipping 12 matching lines...) Expand all Loading... |
2357 } | 2329 } |
2358 | 2330 |
2359 | 2331 |
2360 void MacroAssembler::CallApiFunctionAndReturn( | 2332 void MacroAssembler::CallApiFunctionAndReturn( |
2361 Register function_address, | 2333 Register function_address, |
2362 ExternalReference thunk_ref, | 2334 ExternalReference thunk_ref, |
2363 int stack_space, | 2335 int stack_space, |
2364 MemOperand return_value_operand, | 2336 MemOperand return_value_operand, |
2365 MemOperand* context_restore_operand) { | 2337 MemOperand* context_restore_operand) { |
2366 ExternalReference next_address = | 2338 ExternalReference next_address = |
2367 ExternalReference::handle_scope_next_address(isolate()); | 2339 ExternalReference::handle_scope_next_address(isolate()); |
2368 const int kNextOffset = 0; | 2340 const int kNextOffset = 0; |
2369 const int kLimitOffset = AddressOffset( | 2341 const int kLimitOffset = AddressOffset( |
2370 ExternalReference::handle_scope_limit_address(isolate()), | 2342 ExternalReference::handle_scope_limit_address(isolate()), |
2371 next_address); | 2343 next_address); |
2372 const int kLevelOffset = AddressOffset( | 2344 const int kLevelOffset = AddressOffset( |
2373 ExternalReference::handle_scope_level_address(isolate()), | 2345 ExternalReference::handle_scope_level_address(isolate()), |
2374 next_address); | 2346 next_address); |
2375 | 2347 |
2376 ASSERT(function_address.is(r1) || function_address.is(r2)); | 2348 ASSERT(function_address.is(r4) || function_address.is(r5)); |
| 2349 Register scratch = r6; |
2377 | 2350 |
2378 Label profiler_disabled; | 2351 Label profiler_disabled; |
2379 Label end_profiler_check; | 2352 Label end_profiler_check; |
2380 mov(r9, Operand(ExternalReference::is_profiling_address(isolate()))); | 2353 mov(scratch, Operand(ExternalReference::is_profiling_address(isolate()))); |
2381 ldrb(r9, MemOperand(r9, 0)); | 2354 lbz(scratch, MemOperand(scratch, 0)); |
2382 cmp(r9, Operand(0)); | 2355 cmpi(scratch, Operand::Zero()); |
2383 b(eq, &profiler_disabled); | 2356 beq(&profiler_disabled); |
2384 | 2357 |
2385 // Additional parameter is the address of the actual callback. | 2358 // Additional parameter is the address of the actual callback. |
2386 mov(r3, Operand(thunk_ref)); | 2359 mov(scratch, Operand(thunk_ref)); |
2387 jmp(&end_profiler_check); | 2360 jmp(&end_profiler_check); |
2388 | 2361 |
2389 bind(&profiler_disabled); | 2362 bind(&profiler_disabled); |
2390 Move(r3, function_address); | 2363 mr(scratch, function_address); |
2391 bind(&end_profiler_check); | 2364 bind(&end_profiler_check); |
2392 | 2365 |
2393 // Allocate HandleScope in callee-save registers. | 2366 // Allocate HandleScope in callee-save registers. |
2394 mov(r9, Operand(next_address)); | 2367 // r17 - next_address |
2395 ldr(r4, MemOperand(r9, kNextOffset)); | 2368 // r14 - next_address->kNextOffset |
2396 ldr(r5, MemOperand(r9, kLimitOffset)); | 2369 // r15 - next_address->kLimitOffset |
2397 ldr(r6, MemOperand(r9, kLevelOffset)); | 2370 // r16 - next_address->kLevelOffset |
2398 add(r6, r6, Operand(1)); | 2371 mov(r17, Operand(next_address)); |
2399 str(r6, MemOperand(r9, kLevelOffset)); | 2372 LoadP(r14, MemOperand(r17, kNextOffset)); |
| 2373 LoadP(r15, MemOperand(r17, kLimitOffset)); |
| 2374 lwz(r16, MemOperand(r17, kLevelOffset)); |
| 2375 addi(r16, r16, Operand(1)); |
| 2376 stw(r16, MemOperand(r17, kLevelOffset)); |
2400 | 2377 |
2401 if (FLAG_log_timer_events) { | 2378 if (FLAG_log_timer_events) { |
2402 FrameScope frame(this, StackFrame::MANUAL); | 2379 FrameScope frame(this, StackFrame::MANUAL); |
2403 PushSafepointRegisters(); | 2380 PushSafepointRegisters(); |
2404 PrepareCallCFunction(1, r0); | 2381 PrepareCallCFunction(1, r3); |
2405 mov(r0, Operand(ExternalReference::isolate_address(isolate()))); | 2382 mov(r3, Operand(ExternalReference::isolate_address(isolate()))); |
2406 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); | 2383 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); |
2407 PopSafepointRegisters(); | 2384 PopSafepointRegisters(); |
2408 } | 2385 } |
2409 | 2386 |
2410 // Native call returns to the DirectCEntry stub which redirects to the | 2387 // Native call returns to the DirectCEntry stub which redirects to the |
2411 // return address pushed on stack (could have moved after GC). | 2388 // return address pushed on stack (could have moved after GC). |
2412 // DirectCEntry stub itself is generated early and never moves. | 2389 // DirectCEntry stub itself is generated early and never moves. |
2413 DirectCEntryStub stub(isolate()); | 2390 DirectCEntryStub stub(isolate()); |
2414 stub.GenerateCall(this, r3); | 2391 stub.GenerateCall(this, scratch); |
2415 | 2392 |
2416 if (FLAG_log_timer_events) { | 2393 if (FLAG_log_timer_events) { |
2417 FrameScope frame(this, StackFrame::MANUAL); | 2394 FrameScope frame(this, StackFrame::MANUAL); |
2418 PushSafepointRegisters(); | 2395 PushSafepointRegisters(); |
2419 PrepareCallCFunction(1, r0); | 2396 PrepareCallCFunction(1, r3); |
2420 mov(r0, Operand(ExternalReference::isolate_address(isolate()))); | 2397 mov(r3, Operand(ExternalReference::isolate_address(isolate()))); |
2421 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); | 2398 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); |
2422 PopSafepointRegisters(); | 2399 PopSafepointRegisters(); |
2423 } | 2400 } |
2424 | 2401 |
2425 Label promote_scheduled_exception; | 2402 Label promote_scheduled_exception; |
2426 Label exception_handled; | 2403 Label exception_handled; |
2427 Label delete_allocated_handles; | 2404 Label delete_allocated_handles; |
2428 Label leave_exit_frame; | 2405 Label leave_exit_frame; |
2429 Label return_value_loaded; | 2406 Label return_value_loaded; |
2430 | 2407 |
2431 // load value from ReturnValue | 2408 // load value from ReturnValue |
2432 ldr(r0, return_value_operand); | 2409 LoadP(r3, return_value_operand); |
2433 bind(&return_value_loaded); | 2410 bind(&return_value_loaded); |
2434 // No more valid handles (the result handle was the last one). Restore | 2411 // No more valid handles (the result handle was the last one). Restore |
2435 // previous handle scope. | 2412 // previous handle scope. |
2436 str(r4, MemOperand(r9, kNextOffset)); | 2413 StoreP(r14, MemOperand(r17, kNextOffset)); |
2437 if (emit_debug_code()) { | 2414 if (emit_debug_code()) { |
2438 ldr(r1, MemOperand(r9, kLevelOffset)); | 2415 lwz(r4, MemOperand(r17, kLevelOffset)); |
2439 cmp(r1, r6); | 2416 cmp(r4, r16); |
2440 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); | 2417 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); |
2441 } | 2418 } |
2442 sub(r6, r6, Operand(1)); | 2419 subi(r16, r16, Operand(1)); |
2443 str(r6, MemOperand(r9, kLevelOffset)); | 2420 stw(r16, MemOperand(r17, kLevelOffset)); |
2444 ldr(ip, MemOperand(r9, kLimitOffset)); | 2421 LoadP(ip, MemOperand(r17, kLimitOffset)); |
2445 cmp(r5, ip); | 2422 cmp(r15, ip); |
2446 b(ne, &delete_allocated_handles); | 2423 bne(&delete_allocated_handles); |
2447 | 2424 |
2448 // Check if the function scheduled an exception. | 2425 // Check if the function scheduled an exception. |
2449 bind(&leave_exit_frame); | 2426 bind(&leave_exit_frame); |
2450 LoadRoot(r4, Heap::kTheHoleValueRootIndex); | 2427 LoadRoot(r14, Heap::kTheHoleValueRootIndex); |
2451 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate()))); | 2428 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate()))); |
2452 ldr(r5, MemOperand(ip)); | 2429 LoadP(r15, MemOperand(ip)); |
2453 cmp(r4, r5); | 2430 cmp(r14, r15); |
2454 b(ne, &promote_scheduled_exception); | 2431 bne(&promote_scheduled_exception); |
2455 bind(&exception_handled); | 2432 bind(&exception_handled); |
2456 | 2433 |
2457 bool restore_context = context_restore_operand != NULL; | 2434 bool restore_context = context_restore_operand != NULL; |
2458 if (restore_context) { | 2435 if (restore_context) { |
2459 ldr(cp, *context_restore_operand); | 2436 LoadP(cp, *context_restore_operand); |
2460 } | 2437 } |
2461 // LeaveExitFrame expects unwind space to be in a register. | 2438 // LeaveExitFrame expects unwind space to be in a register. |
2462 mov(r4, Operand(stack_space)); | 2439 mov(r14, Operand(stack_space)); |
2463 LeaveExitFrame(false, r4, !restore_context); | 2440 LeaveExitFrame(false, r14, !restore_context); |
2464 mov(pc, lr); | 2441 blr(); |
2465 | 2442 |
2466 bind(&promote_scheduled_exception); | 2443 bind(&promote_scheduled_exception); |
2467 { | 2444 { |
2468 FrameScope frame(this, StackFrame::INTERNAL); | 2445 FrameScope frame(this, StackFrame::INTERNAL); |
2469 CallExternalReference( | 2446 CallExternalReference( |
2470 ExternalReference(Runtime::kPromoteScheduledException, isolate()), | 2447 ExternalReference(Runtime::kPromoteScheduledException, isolate()), |
2471 0); | 2448 0); |
2472 } | 2449 } |
2473 jmp(&exception_handled); | 2450 jmp(&exception_handled); |
2474 | 2451 |
2475 // HandleScope limit has changed. Delete allocated extensions. | 2452 // HandleScope limit has changed. Delete allocated extensions. |
2476 bind(&delete_allocated_handles); | 2453 bind(&delete_allocated_handles); |
2477 str(r5, MemOperand(r9, kLimitOffset)); | 2454 StoreP(r15, MemOperand(r17, kLimitOffset)); |
2478 mov(r4, r0); | 2455 mr(r14, r3); |
2479 PrepareCallCFunction(1, r5); | 2456 PrepareCallCFunction(1, r15); |
2480 mov(r0, Operand(ExternalReference::isolate_address(isolate()))); | 2457 mov(r3, Operand(ExternalReference::isolate_address(isolate()))); |
2481 CallCFunction( | 2458 CallCFunction( |
2482 ExternalReference::delete_handle_scope_extensions(isolate()), 1); | 2459 ExternalReference::delete_handle_scope_extensions(isolate()), 1); |
2483 mov(r0, r4); | 2460 mr(r3, r14); |
2484 jmp(&leave_exit_frame); | 2461 b(&leave_exit_frame); |
2485 } | 2462 } |
2486 | 2463 |
2487 | 2464 |
2488 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | 2465 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
2489 return has_frame_ || !stub->SometimesSetsUpAFrame(); | 2466 return has_frame_ || !stub->SometimesSetsUpAFrame(); |
2490 } | 2467 } |
2491 | 2468 |
2492 | 2469 |
2493 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 2470 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
2494 // If the hash field contains an array index pick it out. The assert checks | 2471 // If the hash field contains an array index pick it out. The assert checks |
2495 // that the constants for the maximum number of digits for an array index | 2472 // that the constants for the maximum number of digits for an array index |
2496 // cached in the hash field and the number of bits reserved for it does not | 2473 // cached in the hash field and the number of bits reserved for it does not |
2497 // conflict. | 2474 // conflict. |
2498 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 2475 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < |
2499 (1 << String::kArrayIndexValueBits)); | 2476 (1 << String::kArrayIndexValueBits)); |
2500 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); | 2477 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); |
2501 } | 2478 } |
2502 | 2479 |
2503 | 2480 |
2504 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { | 2481 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) { |
2505 if (CpuFeatures::IsSupported(VFP3)) { | 2482 SmiUntag(ip, smi); |
2506 vmov(value.low(), smi); | 2483 ConvertIntToDouble(ip, value); |
2507 vcvt_f64_s32(value, 1); | |
2508 } else { | |
2509 SmiUntag(ip, smi); | |
2510 vmov(value.low(), ip); | |
2511 vcvt_f64_s32(value, value.low()); | |
2512 } | |
2513 } | 2484 } |
2514 | 2485 |
2515 | 2486 |
2516 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, | 2487 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input, |
2517 LowDwVfpRegister double_scratch) { | 2488 Register scratch1, |
2518 ASSERT(!double_input.is(double_scratch)); | 2489 Register scratch2, |
2519 vcvt_s32_f64(double_scratch.low(), double_input); | 2490 DoubleRegister double_scratch) { |
2520 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2491 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch); |
2521 VFPCompareAndSetFlags(double_input, double_scratch); | |
2522 } | 2492 } |
2523 | 2493 |
2524 | 2494 |
2525 void MacroAssembler::TryDoubleToInt32Exact(Register result, | 2495 void MacroAssembler::TryDoubleToInt32Exact(Register result, |
2526 DwVfpRegister double_input, | 2496 DoubleRegister double_input, |
2527 LowDwVfpRegister double_scratch) { | 2497 Register scratch, |
| 2498 DoubleRegister double_scratch) { |
| 2499 Label done; |
2528 ASSERT(!double_input.is(double_scratch)); | 2500 ASSERT(!double_input.is(double_scratch)); |
2529 vcvt_s32_f64(double_scratch.low(), double_input); | 2501 |
2530 vmov(result, double_scratch.low()); | 2502 ConvertDoubleToInt64(double_input, result, |
2531 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2503 #if !V8_TARGET_ARCH_PPC64 |
2532 VFPCompareAndSetFlags(double_input, double_scratch); | 2504 scratch, |
| 2505 #endif |
| 2506 double_scratch); |
| 2507 |
| 2508 #if V8_TARGET_ARCH_PPC64 |
| 2509 TestIfInt32(result, scratch, r0); |
| 2510 #else |
| 2511 TestIfInt32(scratch, result, r0); |
| 2512 #endif |
| 2513 bne(&done); |
| 2514 |
| 2515 // convert back and compare |
| 2516 fcfid(double_scratch, double_scratch); |
| 2517 fcmpu(double_scratch, double_input); |
| 2518 bind(&done); |
2533 } | 2519 } |
2534 | 2520 |
2535 | 2521 |
2536 void MacroAssembler::TryInt32Floor(Register result, | 2522 void MacroAssembler::TryInt32Floor(Register result, |
2537 DwVfpRegister double_input, | 2523 DoubleRegister double_input, |
2538 Register input_high, | 2524 Register input_high, |
2539 LowDwVfpRegister double_scratch, | 2525 Register scratch, |
| 2526 DoubleRegister double_scratch, |
2540 Label* done, | 2527 Label* done, |
2541 Label* exact) { | 2528 Label* exact) { |
2542 ASSERT(!result.is(input_high)); | 2529 ASSERT(!result.is(input_high)); |
2543 ASSERT(!double_input.is(double_scratch)); | 2530 ASSERT(!double_input.is(double_scratch)); |
2544 Label negative, exception; | 2531 Label exception; |
2545 | 2532 |
2546 VmovHigh(input_high, double_input); | 2533 // Move high word into input_high |
| 2534 stfdu(double_input, MemOperand(sp, -kDoubleSize)); |
| 2535 nop(); // LHS/RAW optimization |
| 2536 lwz(input_high, MemOperand(sp, Register::kExponentOffset)); |
| 2537 addi(sp, sp, Operand(kDoubleSize)); |
2547 | 2538 |
2548 // Test for NaN and infinities. | 2539 // Test for NaN/Inf |
2549 Sbfx(result, input_high, | 2540 ExtractBitMask(result, input_high, HeapNumber::kExponentMask); |
2550 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 2541 cmpli(result, Operand(0x7ff)); |
2551 cmp(result, Operand(-1)); | 2542 beq(&exception); |
2552 b(eq, &exception); | |
2553 // Test for values that can be exactly represented as a | |
2554 // signed 32-bit integer. | |
2555 TryDoubleToInt32Exact(result, double_input, double_scratch); | |
2556 // If exact, return (result already fetched). | |
2557 b(eq, exact); | |
2558 cmp(input_high, Operand::Zero()); | |
2559 b(mi, &negative); | |
2560 | 2543 |
2561 // Input is in ]+0, +inf[. | 2544 // Convert (rounding to -Inf) |
2562 // If result equals 0x7fffffff input was out of range or | 2545 ConvertDoubleToInt64(double_input, result, |
2563 // in ]0x7fffffff, 0x80000000[. We ignore this last case which | 2546 #if !V8_TARGET_ARCH_PPC64 |
2564 // could fits into an int32, that means we always think input was | 2547 scratch, |
2565 // out of range and always go to exception. | 2548 #endif |
2566 // If result < 0x7fffffff, go to done, result fetched. | 2549 double_scratch, |
2567 cmn(result, Operand(1)); | 2550 kRoundToMinusInf); |
2568 b(mi, &exception); | 2551 |
| 2552 // Test for overflow |
| 2553 #if V8_TARGET_ARCH_PPC64 |
| 2554 TestIfInt32(result, scratch, r0); |
| 2555 #else |
| 2556 TestIfInt32(scratch, result, r0); |
| 2557 #endif |
| 2558 bne(&exception); |
| 2559 |
| 2560 // Test for exactness |
| 2561 fcfid(double_scratch, double_scratch); |
| 2562 fcmpu(double_scratch, double_input); |
| 2563 beq(exact); |
2569 b(done); | 2564 b(done); |
2570 | 2565 |
2571 // Input is in ]-inf, -0[. | |
2572 // If x is a non integer negative number, | |
2573 // floor(x) <=> round_to_zero(x) - 1. | |
2574 bind(&negative); | |
2575 sub(result, result, Operand(1), SetCC); | |
2576 // If result is still negative, go to done, result fetched. | |
2577 // Else, we had an overflow and we fall through exception. | |
2578 b(mi, done); | |
2579 bind(&exception); | 2566 bind(&exception); |
2580 } | 2567 } |
2581 | 2568 |
| 2569 |
2582 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, | 2570 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, |
2583 DwVfpRegister double_input, | 2571 DoubleRegister double_input, |
2584 Label* done) { | 2572 Label* done) { |
2585 LowDwVfpRegister double_scratch = kScratchDoubleReg; | 2573 DoubleRegister double_scratch = kScratchDoubleReg; |
2586 vcvt_s32_f64(double_scratch.low(), double_input); | 2574 Register scratch = ip; |
2587 vmov(result, double_scratch.low()); | |
2588 | 2575 |
2589 // If result is not saturated (0x7fffffff or 0x80000000), we are done. | 2576 ConvertDoubleToInt64(double_input, result, |
2590 sub(ip, result, Operand(1)); | 2577 #if !V8_TARGET_ARCH_PPC64 |
2591 cmp(ip, Operand(0x7ffffffe)); | 2578 scratch, |
2592 b(lt, done); | 2579 #endif |
| 2580 double_scratch); |
| 2581 |
| 2582 // Test for overflow |
| 2583 #if V8_TARGET_ARCH_PPC64 |
| 2584 TestIfInt32(result, scratch, r0); |
| 2585 #else |
| 2586 TestIfInt32(scratch, result, r0); |
| 2587 #endif |
| 2588 beq(done); |
2593 } | 2589 } |
2594 | 2590 |
2595 | 2591 |
2596 void MacroAssembler::TruncateDoubleToI(Register result, | 2592 void MacroAssembler::TruncateDoubleToI(Register result, |
2597 DwVfpRegister double_input) { | 2593 DoubleRegister double_input) { |
2598 Label done; | 2594 Label done; |
2599 | 2595 |
2600 TryInlineTruncateDoubleToI(result, double_input, &done); | 2596 TryInlineTruncateDoubleToI(result, double_input, &done); |
2601 | 2597 |
2602 // If we fell through then inline version didn't succeed - call stub instead. | 2598 // If we fell through then inline version didn't succeed - call stub instead. |
2603 push(lr); | 2599 mflr(r0); |
2604 sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. | 2600 push(r0); |
2605 vstr(double_input, MemOperand(sp, 0)); | 2601 // Put input on stack. |
| 2602 stfdu(double_input, MemOperand(sp, -kDoubleSize)); |
2606 | 2603 |
2607 DoubleToIStub stub(isolate(), sp, result, 0, true, true); | 2604 DoubleToIStub stub(isolate(), sp, result, 0, true, true); |
2608 CallStub(&stub); | 2605 CallStub(&stub); |
2609 | 2606 |
2610 add(sp, sp, Operand(kDoubleSize)); | 2607 addi(sp, sp, Operand(kDoubleSize)); |
2611 pop(lr); | 2608 pop(r0); |
| 2609 mtlr(r0); |
2612 | 2610 |
2613 bind(&done); | 2611 bind(&done); |
2614 } | 2612 } |
2615 | 2613 |
2616 | 2614 |
2617 void MacroAssembler::TruncateHeapNumberToI(Register result, | 2615 void MacroAssembler::TruncateHeapNumberToI(Register result, |
2618 Register object) { | 2616 Register object) { |
2619 Label done; | 2617 Label done; |
2620 LowDwVfpRegister double_scratch = kScratchDoubleReg; | 2618 DoubleRegister double_scratch = kScratchDoubleReg; |
2621 ASSERT(!result.is(object)); | 2619 ASSERT(!result.is(object)); |
2622 | 2620 |
2623 vldr(double_scratch, | 2621 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); |
2624 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); | |
2625 TryInlineTruncateDoubleToI(result, double_scratch, &done); | 2622 TryInlineTruncateDoubleToI(result, double_scratch, &done); |
2626 | 2623 |
2627 // If we fell through then inline version didn't succeed - call stub instead. | 2624 // If we fell through then inline version didn't succeed - call stub instead. |
2628 push(lr); | 2625 mflr(r0); |
| 2626 push(r0); |
2629 DoubleToIStub stub(isolate(), | 2627 DoubleToIStub stub(isolate(), |
2630 object, | 2628 object, |
2631 result, | 2629 result, |
2632 HeapNumber::kValueOffset - kHeapObjectTag, | 2630 HeapNumber::kValueOffset - kHeapObjectTag, |
2633 true, | 2631 true, |
2634 true); | 2632 true); |
2635 CallStub(&stub); | 2633 CallStub(&stub); |
2636 pop(lr); | 2634 pop(r0); |
| 2635 mtlr(r0); |
2637 | 2636 |
2638 bind(&done); | 2637 bind(&done); |
2639 } | 2638 } |
2640 | 2639 |
2641 | 2640 |
2642 void MacroAssembler::TruncateNumberToI(Register object, | 2641 void MacroAssembler::TruncateNumberToI(Register object, |
2643 Register result, | 2642 Register result, |
2644 Register heap_number_map, | 2643 Register heap_number_map, |
2645 Register scratch1, | 2644 Register scratch1, |
2646 Label* not_number) { | 2645 Label* not_number) { |
2647 Label done; | 2646 Label done; |
2648 ASSERT(!result.is(object)); | 2647 ASSERT(!result.is(object)); |
2649 | 2648 |
2650 UntagAndJumpIfSmi(result, object, &done); | 2649 UntagAndJumpIfSmi(result, object, &done); |
2651 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 2650 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
2652 TruncateHeapNumberToI(result, object); | 2651 TruncateHeapNumberToI(result, object); |
2653 | 2652 |
2654 bind(&done); | 2653 bind(&done); |
2655 } | 2654 } |
2656 | 2655 |
2657 | 2656 |
2658 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2657 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
2659 Register src, | 2658 Register src, |
2660 int num_least_bits) { | 2659 int num_least_bits) { |
2661 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 2660 #if V8_TARGET_ARCH_PPC64 |
2662 ubfx(dst, src, kSmiTagSize, num_least_bits); | 2661 rldicl(dst, src, kBitsPerPointer - kSmiShift, |
2663 } else { | 2662 kBitsPerPointer - num_least_bits); |
2664 SmiUntag(dst, src); | 2663 #else |
2665 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 2664 rlwinm(dst, src, kBitsPerPointer - kSmiShift, |
2666 } | 2665 kBitsPerPointer - num_least_bits, 31); |
| 2666 #endif |
2667 } | 2667 } |
2668 | 2668 |
2669 | 2669 |
2670 void MacroAssembler::GetLeastBitsFromInt32(Register dst, | 2670 void MacroAssembler::GetLeastBitsFromInt32(Register dst, |
2671 Register src, | 2671 Register src, |
2672 int num_least_bits) { | 2672 int num_least_bits) { |
2673 and_(dst, src, Operand((1 << num_least_bits) - 1)); | 2673 rlwinm(dst, src, 0, 32 - num_least_bits, 31); |
2674 } | 2674 } |
2675 | 2675 |
2676 | 2676 |
2677 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 2677 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
2678 int num_arguments, | 2678 int num_arguments, |
2679 SaveFPRegsMode save_doubles) { | 2679 SaveFPRegsMode save_doubles) { |
2680 // All parameters are on the stack. r0 has the return value after call. | 2680 // All parameters are on the stack. r3 has the return value after call. |
2681 | 2681 |
2682 // If the expected number of arguments of the runtime function is | 2682 // If the expected number of arguments of the runtime function is |
2683 // constant, we check that the actual number of arguments match the | 2683 // constant, we check that the actual number of arguments match the |
2684 // expectation. | 2684 // expectation. |
2685 CHECK(f->nargs < 0 || f->nargs == num_arguments); | 2685 CHECK(f->nargs < 0 || f->nargs == num_arguments); |
2686 | 2686 |
2687 // TODO(1236192): Most runtime routines don't need the number of | 2687 // TODO(1236192): Most runtime routines don't need the number of |
2688 // arguments passed in because it is constant. At some point we | 2688 // arguments passed in because it is constant. At some point we |
2689 // should remove this need and make the runtime routine entry code | 2689 // should remove this need and make the runtime routine entry code |
2690 // smarter. | 2690 // smarter. |
2691 mov(r0, Operand(num_arguments)); | 2691 mov(r3, Operand(num_arguments)); |
2692 mov(r1, Operand(ExternalReference(f, isolate()))); | 2692 mov(r4, Operand(ExternalReference(f, isolate()))); |
2693 CEntryStub stub(isolate(), 1, save_doubles); | 2693 CEntryStub stub(isolate(), |
| 2694 #if V8_TARGET_ARCH_PPC64 |
| 2695 f->result_size, |
| 2696 #else |
| 2697 1, |
| 2698 #endif |
| 2699 save_doubles); |
2694 CallStub(&stub); | 2700 CallStub(&stub); |
2695 } | 2701 } |
2696 | 2702 |
2697 | 2703 |
2698 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | 2704 void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
2699 int num_arguments) { | 2705 int num_arguments) { |
2700 mov(r0, Operand(num_arguments)); | 2706 mov(r3, Operand(num_arguments)); |
2701 mov(r1, Operand(ext)); | 2707 mov(r4, Operand(ext)); |
2702 | 2708 |
2703 CEntryStub stub(isolate(), 1); | 2709 CEntryStub stub(isolate(), 1); |
2704 CallStub(&stub); | 2710 CallStub(&stub); |
2705 } | 2711 } |
2706 | 2712 |
2707 | 2713 |
2708 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, | 2714 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, |
2709 int num_arguments, | 2715 int num_arguments, |
2710 int result_size) { | 2716 int result_size) { |
2711 // TODO(1236192): Most runtime routines don't need the number of | 2717 // TODO(1236192): Most runtime routines don't need the number of |
2712 // arguments passed in because it is constant. At some point we | 2718 // arguments passed in because it is constant. At some point we |
2713 // should remove this need and make the runtime routine entry code | 2719 // should remove this need and make the runtime routine entry code |
2714 // smarter. | 2720 // smarter. |
2715 mov(r0, Operand(num_arguments)); | 2721 mov(r3, Operand(num_arguments)); |
2716 JumpToExternalReference(ext); | 2722 JumpToExternalReference(ext); |
2717 } | 2723 } |
2718 | 2724 |
2719 | 2725 |
2720 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, | 2726 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, |
2721 int num_arguments, | 2727 int num_arguments, |
2722 int result_size) { | 2728 int result_size) { |
2723 TailCallExternalReference(ExternalReference(fid, isolate()), | 2729 TailCallExternalReference(ExternalReference(fid, isolate()), |
2724 num_arguments, | 2730 num_arguments, |
2725 result_size); | 2731 result_size); |
2726 } | 2732 } |
2727 | 2733 |
2728 | 2734 |
2729 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { | 2735 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { |
2730 #if defined(__thumb__) | 2736 mov(r4, Operand(builtin)); |
2731 // Thumb mode builtin. | |
2732 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); | |
2733 #endif | |
2734 mov(r1, Operand(builtin)); | |
2735 CEntryStub stub(isolate(), 1); | 2737 CEntryStub stub(isolate(), 1); |
2736 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 2738 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
2737 } | 2739 } |
2738 | 2740 |
2739 | 2741 |
2740 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | 2742 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
2741 InvokeFlag flag, | 2743 InvokeFlag flag, |
2742 const CallWrapper& call_wrapper) { | 2744 const CallWrapper& call_wrapper) { |
2743 // You can't call a builtin without a valid frame. | 2745 // You can't call a builtin without a valid frame. |
2744 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 2746 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
2745 | 2747 |
2746 GetBuiltinEntry(r2, id); | 2748 GetBuiltinEntry(r5, id); |
2747 if (flag == CALL_FUNCTION) { | 2749 if (flag == CALL_FUNCTION) { |
2748 call_wrapper.BeforeCall(CallSize(r2)); | 2750 call_wrapper.BeforeCall(CallSize(r5)); |
2749 Call(r2); | 2751 Call(r5); |
2750 call_wrapper.AfterCall(); | 2752 call_wrapper.AfterCall(); |
2751 } else { | 2753 } else { |
2752 ASSERT(flag == JUMP_FUNCTION); | 2754 ASSERT(flag == JUMP_FUNCTION); |
2753 Jump(r2); | 2755 Jump(r5); |
2754 } | 2756 } |
2755 } | 2757 } |
2756 | 2758 |
2757 | 2759 |
2758 void MacroAssembler::GetBuiltinFunction(Register target, | 2760 void MacroAssembler::GetBuiltinFunction(Register target, |
2759 Builtins::JavaScript id) { | 2761 Builtins::JavaScript id) { |
2760 // Load the builtins object into target register. | 2762 // Load the builtins object into target register. |
2761 ldr(target, | 2763 LoadP(target, |
2762 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2764 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2763 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); | 2765 LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); |
2764 // Load the JavaScript builtin function from the builtins object. | 2766 // Load the JavaScript builtin function from the builtins object. |
2765 ldr(target, FieldMemOperand(target, | 2767 LoadP(target, |
2766 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | 2768 FieldMemOperand(target, |
| 2769 JSBuiltinsObject::OffsetOfFunctionWithId(id)), r0); |
2767 } | 2770 } |
2768 | 2771 |
2769 | 2772 |
2770 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | 2773 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
2771 ASSERT(!target.is(r1)); | 2774 ASSERT(!target.is(r4)); |
2772 GetBuiltinFunction(r1, id); | 2775 GetBuiltinFunction(r4, id); |
2773 // Load the code entry point from the builtins object. | 2776 // Load the code entry point from the builtins object. |
2774 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 2777 LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
2775 } | 2778 } |
2776 | 2779 |
2777 | 2780 |
2778 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | 2781 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
2779 Register scratch1, Register scratch2) { | 2782 Register scratch1, Register scratch2) { |
2780 if (FLAG_native_code_counters && counter->Enabled()) { | 2783 if (FLAG_native_code_counters && counter->Enabled()) { |
2781 mov(scratch1, Operand(value)); | 2784 mov(scratch1, Operand(value)); |
2782 mov(scratch2, Operand(ExternalReference(counter))); | 2785 mov(scratch2, Operand(ExternalReference(counter))); |
2783 str(scratch1, MemOperand(scratch2)); | 2786 stw(scratch1, MemOperand(scratch2)); |
2784 } | 2787 } |
2785 } | 2788 } |
2786 | 2789 |
2787 | 2790 |
2788 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, | 2791 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
2789 Register scratch1, Register scratch2) { | 2792 Register scratch1, Register scratch2) { |
2790 ASSERT(value > 0); | 2793 ASSERT(value > 0); |
2791 if (FLAG_native_code_counters && counter->Enabled()) { | 2794 if (FLAG_native_code_counters && counter->Enabled()) { |
2792 mov(scratch2, Operand(ExternalReference(counter))); | 2795 mov(scratch2, Operand(ExternalReference(counter))); |
2793 ldr(scratch1, MemOperand(scratch2)); | 2796 lwz(scratch1, MemOperand(scratch2)); |
2794 add(scratch1, scratch1, Operand(value)); | 2797 addi(scratch1, scratch1, Operand(value)); |
2795 str(scratch1, MemOperand(scratch2)); | 2798 stw(scratch1, MemOperand(scratch2)); |
2796 } | 2799 } |
2797 } | 2800 } |
2798 | 2801 |
2799 | 2802 |
2800 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | 2803 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
2801 Register scratch1, Register scratch2) { | 2804 Register scratch1, Register scratch2) { |
2802 ASSERT(value > 0); | 2805 ASSERT(value > 0); |
2803 if (FLAG_native_code_counters && counter->Enabled()) { | 2806 if (FLAG_native_code_counters && counter->Enabled()) { |
2804 mov(scratch2, Operand(ExternalReference(counter))); | 2807 mov(scratch2, Operand(ExternalReference(counter))); |
2805 ldr(scratch1, MemOperand(scratch2)); | 2808 lwz(scratch1, MemOperand(scratch2)); |
2806 sub(scratch1, scratch1, Operand(value)); | 2809 subi(scratch1, scratch1, Operand(value)); |
2807 str(scratch1, MemOperand(scratch2)); | 2810 stw(scratch1, MemOperand(scratch2)); |
2808 } | 2811 } |
2809 } | 2812 } |
2810 | 2813 |
2811 | 2814 |
2812 void MacroAssembler::Assert(Condition cond, BailoutReason reason) { | 2815 void MacroAssembler::Assert(Condition cond, BailoutReason reason, |
| 2816 CRegister cr) { |
2813 if (emit_debug_code()) | 2817 if (emit_debug_code()) |
2814 Check(cond, reason); | 2818 Check(cond, reason, cr); |
2815 } | 2819 } |
2816 | 2820 |
2817 | 2821 |
2818 void MacroAssembler::AssertFastElements(Register elements) { | 2822 void MacroAssembler::AssertFastElements(Register elements) { |
2819 if (emit_debug_code()) { | 2823 if (emit_debug_code()) { |
2820 ASSERT(!elements.is(ip)); | 2824 ASSERT(!elements.is(ip)); |
2821 Label ok; | 2825 Label ok; |
2822 push(elements); | 2826 push(elements); |
2823 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); | 2827 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); |
2824 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 2828 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
2825 cmp(elements, ip); | 2829 cmp(elements, ip); |
2826 b(eq, &ok); | 2830 beq(&ok); |
2827 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); | 2831 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); |
2828 cmp(elements, ip); | 2832 cmp(elements, ip); |
2829 b(eq, &ok); | 2833 beq(&ok); |
2830 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); | 2834 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); |
2831 cmp(elements, ip); | 2835 cmp(elements, ip); |
2832 b(eq, &ok); | 2836 beq(&ok); |
2833 Abort(kJSObjectWithFastElementsMapHasSlowElements); | 2837 Abort(kJSObjectWithFastElementsMapHasSlowElements); |
2834 bind(&ok); | 2838 bind(&ok); |
2835 pop(elements); | 2839 pop(elements); |
2836 } | 2840 } |
2837 } | 2841 } |
2838 | 2842 |
2839 | 2843 |
2840 void MacroAssembler::Check(Condition cond, BailoutReason reason) { | 2844 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) { |
2841 Label L; | 2845 Label L; |
2842 b(cond, &L); | 2846 b(cond, &L, cr); |
2843 Abort(reason); | 2847 Abort(reason); |
2844 // will not return here | 2848 // will not return here |
2845 bind(&L); | 2849 bind(&L); |
2846 } | 2850 } |
2847 | 2851 |
2848 | 2852 |
2849 void MacroAssembler::Abort(BailoutReason reason) { | 2853 void MacroAssembler::Abort(BailoutReason reason) { |
2850 Label abort_start; | 2854 Label abort_start; |
2851 bind(&abort_start); | 2855 bind(&abort_start); |
2852 #ifdef DEBUG | 2856 #ifdef DEBUG |
2853 const char* msg = GetBailoutReason(reason); | 2857 const char* msg = GetBailoutReason(reason); |
2854 if (msg != NULL) { | 2858 if (msg != NULL) { |
2855 RecordComment("Abort message: "); | 2859 RecordComment("Abort message: "); |
2856 RecordComment(msg); | 2860 RecordComment(msg); |
2857 } | 2861 } |
2858 | 2862 |
2859 if (FLAG_trap_on_abort) { | 2863 if (FLAG_trap_on_abort) { |
2860 stop(msg); | 2864 stop(msg); |
2861 return; | 2865 return; |
2862 } | 2866 } |
2863 #endif | 2867 #endif |
2864 | 2868 |
2865 mov(r0, Operand(Smi::FromInt(reason))); | 2869 LoadSmiLiteral(r0, Smi::FromInt(reason)); |
2866 push(r0); | 2870 push(r0); |
2867 | |
2868 // Disable stub call restrictions to always allow calls to abort. | 2871 // Disable stub call restrictions to always allow calls to abort. |
2869 if (!has_frame_) { | 2872 if (!has_frame_) { |
2870 // We don't actually want to generate a pile of code for this, so just | 2873 // We don't actually want to generate a pile of code for this, so just |
2871 // claim there is a stack frame, without generating one. | 2874 // claim there is a stack frame, without generating one. |
2872 FrameScope scope(this, StackFrame::NONE); | 2875 FrameScope scope(this, StackFrame::NONE); |
2873 CallRuntime(Runtime::kAbort, 1); | 2876 CallRuntime(Runtime::kAbort, 1); |
2874 } else { | 2877 } else { |
2875 CallRuntime(Runtime::kAbort, 1); | 2878 CallRuntime(Runtime::kAbort, 1); |
2876 } | 2879 } |
2877 // will not return here | 2880 // will not return here |
2878 if (is_const_pool_blocked()) { | |
2879 // If the calling code cares about the exact number of | |
2880 // instructions generated, we insert padding here to keep the size | |
2881 // of the Abort macro constant. | |
2882 static const int kExpectedAbortInstructions = 7; | |
2883 int abort_instructions = InstructionsGeneratedSince(&abort_start); | |
2884 ASSERT(abort_instructions <= kExpectedAbortInstructions); | |
2885 while (abort_instructions++ < kExpectedAbortInstructions) { | |
2886 nop(); | |
2887 } | |
2888 } | |
2889 } | 2881 } |
2890 | 2882 |
2891 | 2883 |
2892 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 2884 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
2893 if (context_chain_length > 0) { | 2885 if (context_chain_length > 0) { |
2894 // Move up the chain of contexts to the context containing the slot. | 2886 // Move up the chain of contexts to the context containing the slot. |
2895 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 2887 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
2896 for (int i = 1; i < context_chain_length; i++) { | 2888 for (int i = 1; i < context_chain_length; i++) { |
2897 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 2889 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
2898 } | 2890 } |
2899 } else { | 2891 } else { |
2900 // Slot is in the current function context. Move it into the | 2892 // Slot is in the current function context. Move it into the |
2901 // destination register in case we store into it (the write barrier | 2893 // destination register in case we store into it (the write barrier |
2902 // cannot be allowed to destroy the context in esi). | 2894 // cannot be allowed to destroy the context in esi). |
2903 mov(dst, cp); | 2895 mr(dst, cp); |
2904 } | 2896 } |
2905 } | 2897 } |
2906 | 2898 |
2907 | 2899 |
2908 void MacroAssembler::LoadTransitionedArrayMapConditional( | 2900 void MacroAssembler::LoadTransitionedArrayMapConditional( |
2909 ElementsKind expected_kind, | 2901 ElementsKind expected_kind, |
2910 ElementsKind transitioned_kind, | 2902 ElementsKind transitioned_kind, |
2911 Register map_in_out, | 2903 Register map_in_out, |
2912 Register scratch, | 2904 Register scratch, |
2913 Label* no_map_match) { | 2905 Label* no_map_match) { |
2914 // Load the global or builtins object from the current context. | 2906 // Load the global or builtins object from the current context. |
2915 ldr(scratch, | 2907 LoadP(scratch, |
2916 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2908 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2917 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 2909 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
2918 | 2910 |
2919 // Check that the function's map is the same as the expected cached map. | 2911 // Check that the function's map is the same as the expected cached map. |
2920 ldr(scratch, | 2912 LoadP(scratch, |
2921 MemOperand(scratch, | 2913 MemOperand(scratch, |
2922 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); | 2914 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); |
2923 size_t offset = expected_kind * kPointerSize + | 2915 size_t offset = expected_kind * kPointerSize + |
2924 FixedArrayBase::kHeaderSize; | 2916 FixedArrayBase::kHeaderSize; |
2925 ldr(ip, FieldMemOperand(scratch, offset)); | 2917 LoadP(ip, FieldMemOperand(scratch, offset)); |
2926 cmp(map_in_out, ip); | 2918 cmp(map_in_out, ip); |
2927 b(ne, no_map_match); | 2919 bne(no_map_match); |
2928 | 2920 |
2929 // Use the transitioned cached map. | 2921 // Use the transitioned cached map. |
2930 offset = transitioned_kind * kPointerSize + | 2922 offset = transitioned_kind * kPointerSize + |
2931 FixedArrayBase::kHeaderSize; | 2923 FixedArrayBase::kHeaderSize; |
2932 ldr(map_in_out, FieldMemOperand(scratch, offset)); | 2924 LoadP(map_in_out, FieldMemOperand(scratch, offset)); |
2933 } | 2925 } |
2934 | 2926 |
2935 | 2927 |
2936 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | 2928 void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
2937 // Load the global or builtins object from the current context. | 2929 // Load the global or builtins object from the current context. |
2938 ldr(function, | 2930 LoadP(function, |
2939 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2931 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2940 // Load the native context from the global or builtins object. | 2932 // Load the native context from the global or builtins object. |
2941 ldr(function, FieldMemOperand(function, | 2933 LoadP(function, FieldMemOperand(function, |
2942 GlobalObject::kNativeContextOffset)); | 2934 GlobalObject::kNativeContextOffset)); |
2943 // Load the function from the native context. | 2935 // Load the function from the native context. |
2944 ldr(function, MemOperand(function, Context::SlotOffset(index))); | 2936 LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0); |
2945 } | 2937 } |
2946 | 2938 |
2947 | 2939 |
2948 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, | 2940 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, |
2949 Register map, | 2941 Register map, |
2950 Register scratch) { | 2942 Register scratch) { |
2951 // Load the initial map. The global functions all have initial maps. | 2943 // Load the initial map. The global functions all have initial maps. |
2952 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2944 LoadP(map, |
| 2945 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
2953 if (emit_debug_code()) { | 2946 if (emit_debug_code()) { |
2954 Label ok, fail; | 2947 Label ok, fail; |
2955 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); | 2948 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); |
2956 b(&ok); | 2949 b(&ok); |
2957 bind(&fail); | 2950 bind(&fail); |
2958 Abort(kGlobalFunctionsMustHaveInitialMap); | 2951 Abort(kGlobalFunctionsMustHaveInitialMap); |
2959 bind(&ok); | 2952 bind(&ok); |
2960 } | 2953 } |
2961 } | 2954 } |
2962 | 2955 |
2963 | 2956 |
2964 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( | 2957 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( |
2965 Register reg, | 2958 Register reg, |
2966 Register scratch, | 2959 Register scratch, |
2967 Label* not_power_of_two_or_zero) { | 2960 Label* not_power_of_two_or_zero) { |
2968 sub(scratch, reg, Operand(1), SetCC); | 2961 subi(scratch, reg, Operand(1)); |
2969 b(mi, not_power_of_two_or_zero); | 2962 cmpi(scratch, Operand::Zero()); |
2970 tst(scratch, reg); | 2963 blt(not_power_of_two_or_zero); |
2971 b(ne, not_power_of_two_or_zero); | 2964 and_(r0, scratch, reg, SetRC); |
| 2965 bne(not_power_of_two_or_zero, cr0); |
2972 } | 2966 } |
2973 | 2967 |
2974 | 2968 |
2975 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg( | 2969 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg( |
2976 Register reg, | 2970 Register reg, |
2977 Register scratch, | 2971 Register scratch, |
2978 Label* zero_and_neg, | 2972 Label* zero_and_neg, |
2979 Label* not_power_of_two) { | 2973 Label* not_power_of_two) { |
2980 sub(scratch, reg, Operand(1), SetCC); | 2974 subi(scratch, reg, Operand(1)); |
2981 b(mi, zero_and_neg); | 2975 cmpi(scratch, Operand::Zero()); |
2982 tst(scratch, reg); | 2976 blt(zero_and_neg); |
2983 b(ne, not_power_of_two); | 2977 and_(r0, scratch, reg, SetRC); |
| 2978 bne(not_power_of_two, cr0); |
2984 } | 2979 } |
2985 | 2980 |
| 2981 #if !V8_TARGET_ARCH_PPC64 |
| 2982 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { |
| 2983 ASSERT(!reg.is(overflow)); |
| 2984 mr(overflow, reg); // Save original value. |
| 2985 SmiTag(reg); |
| 2986 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0. |
| 2987 } |
| 2988 |
| 2989 |
| 2990 void MacroAssembler::SmiTagCheckOverflow(Register dst, |
| 2991 Register src, |
| 2992 Register overflow) { |
| 2993 if (dst.is(src)) { |
| 2994 // Fall back to slower case. |
| 2995 SmiTagCheckOverflow(dst, overflow); |
| 2996 } else { |
| 2997 ASSERT(!dst.is(src)); |
| 2998 ASSERT(!dst.is(overflow)); |
| 2999 ASSERT(!src.is(overflow)); |
| 3000 SmiTag(dst, src); |
| 3001 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0. |
| 3002 } |
| 3003 } |
| 3004 #endif |
2986 | 3005 |
2987 void MacroAssembler::JumpIfNotBothSmi(Register reg1, | 3006 void MacroAssembler::JumpIfNotBothSmi(Register reg1, |
2988 Register reg2, | 3007 Register reg2, |
2989 Label* on_not_both_smi) { | 3008 Label* on_not_both_smi) { |
2990 STATIC_ASSERT(kSmiTag == 0); | 3009 STATIC_ASSERT(kSmiTag == 0); |
2991 tst(reg1, Operand(kSmiTagMask)); | 3010 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); |
2992 tst(reg2, Operand(kSmiTagMask), eq); | 3011 orx(r0, reg1, reg2, LeaveRC); |
2993 b(ne, on_not_both_smi); | 3012 JumpIfNotSmi(r0, on_not_both_smi); |
2994 } | 3013 } |
2995 | 3014 |
2996 | 3015 |
2997 void MacroAssembler::UntagAndJumpIfSmi( | 3016 void MacroAssembler::UntagAndJumpIfSmi( |
2998 Register dst, Register src, Label* smi_case) { | 3017 Register dst, Register src, Label* smi_case) { |
2999 STATIC_ASSERT(kSmiTag == 0); | 3018 STATIC_ASSERT(kSmiTag == 0); |
3000 SmiUntag(dst, src, SetCC); | 3019 STATIC_ASSERT(kSmiTagSize == 1); |
3001 b(cc, smi_case); // Shifter carry is not set for a smi. | 3020 TestBit(src, 0, r0); |
| 3021 SmiUntag(dst, src); |
| 3022 beq(smi_case, cr0); |
3002 } | 3023 } |
3003 | 3024 |
3004 | 3025 |
3005 void MacroAssembler::UntagAndJumpIfNotSmi( | 3026 void MacroAssembler::UntagAndJumpIfNotSmi( |
3006 Register dst, Register src, Label* non_smi_case) { | 3027 Register dst, Register src, Label* non_smi_case) { |
3007 STATIC_ASSERT(kSmiTag == 0); | 3028 STATIC_ASSERT(kSmiTag == 0); |
3008 SmiUntag(dst, src, SetCC); | 3029 STATIC_ASSERT(kSmiTagSize == 1); |
3009 b(cs, non_smi_case); // Shifter carry is set for a non-smi. | 3030 TestBit(src, 0, r0); |
| 3031 SmiUntag(dst, src); |
| 3032 bne(non_smi_case, cr0); |
3010 } | 3033 } |
3011 | 3034 |
3012 | 3035 |
3013 void MacroAssembler::JumpIfEitherSmi(Register reg1, | 3036 void MacroAssembler::JumpIfEitherSmi(Register reg1, |
3014 Register reg2, | 3037 Register reg2, |
3015 Label* on_either_smi) { | 3038 Label* on_either_smi) { |
3016 STATIC_ASSERT(kSmiTag == 0); | 3039 STATIC_ASSERT(kSmiTag == 0); |
3017 tst(reg1, Operand(kSmiTagMask)); | 3040 JumpIfSmi(reg1, on_either_smi); |
3018 tst(reg2, Operand(kSmiTagMask), ne); | 3041 JumpIfSmi(reg2, on_either_smi); |
3019 b(eq, on_either_smi); | |
3020 } | 3042 } |
3021 | 3043 |
3022 | 3044 |
3023 void MacroAssembler::AssertNotSmi(Register object) { | 3045 void MacroAssembler::AssertNotSmi(Register object) { |
3024 if (emit_debug_code()) { | 3046 if (emit_debug_code()) { |
3025 STATIC_ASSERT(kSmiTag == 0); | 3047 STATIC_ASSERT(kSmiTag == 0); |
3026 tst(object, Operand(kSmiTagMask)); | 3048 TestIfSmi(object, r0); |
3027 Check(ne, kOperandIsASmi); | 3049 Check(ne, kOperandIsASmi, cr0); |
3028 } | 3050 } |
3029 } | 3051 } |
3030 | 3052 |
3031 | 3053 |
3032 void MacroAssembler::AssertSmi(Register object) { | 3054 void MacroAssembler::AssertSmi(Register object) { |
3033 if (emit_debug_code()) { | 3055 if (emit_debug_code()) { |
3034 STATIC_ASSERT(kSmiTag == 0); | 3056 STATIC_ASSERT(kSmiTag == 0); |
3035 tst(object, Operand(kSmiTagMask)); | 3057 TestIfSmi(object, r0); |
3036 Check(eq, kOperandIsNotSmi); | 3058 Check(eq, kOperandIsNotSmi, cr0); |
3037 } | 3059 } |
3038 } | 3060 } |
3039 | 3061 |
3040 | 3062 |
3041 void MacroAssembler::AssertString(Register object) { | 3063 void MacroAssembler::AssertString(Register object) { |
3042 if (emit_debug_code()) { | 3064 if (emit_debug_code()) { |
3043 STATIC_ASSERT(kSmiTag == 0); | 3065 STATIC_ASSERT(kSmiTag == 0); |
3044 tst(object, Operand(kSmiTagMask)); | 3066 TestIfSmi(object, r0); |
3045 Check(ne, kOperandIsASmiAndNotAString); | 3067 Check(ne, kOperandIsASmiAndNotAString, cr0); |
3046 push(object); | 3068 push(object); |
3047 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 3069 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
3048 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); | 3070 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); |
3049 pop(object); | 3071 pop(object); |
3050 Check(lo, kOperandIsNotAString); | 3072 Check(lt, kOperandIsNotAString); |
3051 } | 3073 } |
3052 } | 3074 } |
3053 | 3075 |
3054 | 3076 |
3055 void MacroAssembler::AssertName(Register object) { | 3077 void MacroAssembler::AssertName(Register object) { |
3056 if (emit_debug_code()) { | 3078 if (emit_debug_code()) { |
3057 STATIC_ASSERT(kSmiTag == 0); | 3079 STATIC_ASSERT(kSmiTag == 0); |
3058 tst(object, Operand(kSmiTagMask)); | 3080 TestIfSmi(object, r0); |
3059 Check(ne, kOperandIsASmiAndNotAName); | 3081 Check(ne, kOperandIsASmiAndNotAName, cr0); |
3060 push(object); | 3082 push(object); |
3061 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 3083 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
3062 CompareInstanceType(object, object, LAST_NAME_TYPE); | 3084 CompareInstanceType(object, object, LAST_NAME_TYPE); |
3063 pop(object); | 3085 pop(object); |
3064 Check(le, kOperandIsNotAName); | 3086 Check(le, kOperandIsNotAName); |
3065 } | 3087 } |
3066 } | 3088 } |
3067 | 3089 |
3068 | 3090 |
3069 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, | 3091 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, |
3070 Register scratch) { | 3092 Register scratch) { |
3071 if (emit_debug_code()) { | 3093 if (emit_debug_code()) { |
3072 Label done_checking; | 3094 Label done_checking; |
3073 AssertNotSmi(object); | 3095 AssertNotSmi(object); |
3074 CompareRoot(object, Heap::kUndefinedValueRootIndex); | 3096 CompareRoot(object, Heap::kUndefinedValueRootIndex); |
3075 b(eq, &done_checking); | 3097 beq(&done_checking); |
3076 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 3098 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
3077 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex); | 3099 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex); |
3078 Assert(eq, kExpectedUndefinedOrCell); | 3100 Assert(eq, kExpectedUndefinedOrCell); |
3079 bind(&done_checking); | 3101 bind(&done_checking); |
3080 } | 3102 } |
3081 } | 3103 } |
3082 | 3104 |
3083 | 3105 |
3084 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { | 3106 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { |
3085 if (emit_debug_code()) { | 3107 if (emit_debug_code()) { |
3086 CompareRoot(reg, index); | 3108 CompareRoot(reg, index); |
3087 Check(eq, kHeapNumberMapRegisterClobbered); | 3109 Check(eq, kHeapNumberMapRegisterClobbered); |
3088 } | 3110 } |
3089 } | 3111 } |
3090 | 3112 |
3091 | 3113 |
3092 void MacroAssembler::JumpIfNotHeapNumber(Register object, | 3114 void MacroAssembler::JumpIfNotHeapNumber(Register object, |
3093 Register heap_number_map, | 3115 Register heap_number_map, |
3094 Register scratch, | 3116 Register scratch, |
3095 Label* on_not_heap_number) { | 3117 Label* on_not_heap_number) { |
3096 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 3118 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
3097 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 3119 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
3098 cmp(scratch, heap_number_map); | 3120 cmp(scratch, heap_number_map); |
3099 b(ne, on_not_heap_number); | 3121 bne(on_not_heap_number); |
3100 } | 3122 } |
3101 | 3123 |
3102 | 3124 |
3103 void MacroAssembler::LookupNumberStringCache(Register object, | 3125 void MacroAssembler::LookupNumberStringCache(Register object, |
3104 Register result, | 3126 Register result, |
3105 Register scratch1, | 3127 Register scratch1, |
3106 Register scratch2, | 3128 Register scratch2, |
3107 Register scratch3, | 3129 Register scratch3, |
3108 Label* not_found) { | 3130 Label* not_found) { |
3109 // Use of registers. Register result is used as a temporary. | 3131 // Use of registers. Register result is used as a temporary. |
3110 Register number_string_cache = result; | 3132 Register number_string_cache = result; |
3111 Register mask = scratch3; | 3133 Register mask = scratch3; |
3112 | 3134 |
3113 // Load the number string cache. | 3135 // Load the number string cache. |
3114 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | 3136 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
3115 | 3137 |
3116 // Make the hash mask from the length of the number string cache. It | 3138 // Make the hash mask from the length of the number string cache. It |
3117 // contains two elements (number and string) for each cache entry. | 3139 // contains two elements (number and string) for each cache entry. |
3118 ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); | 3140 LoadP(mask, FieldMemOperand(number_string_cache, |
| 3141 FixedArray::kLengthOffset)); |
3119 // Divide length by two (length is a smi). | 3142 // Divide length by two (length is a smi). |
3120 mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); | 3143 ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1); |
3121 sub(mask, mask, Operand(1)); // Make mask. | 3144 subi(mask, mask, Operand(1)); // Make mask. |
3122 | 3145 |
3123 // Calculate the entry in the number string cache. The hash value in the | 3146 // Calculate the entry in the number string cache. The hash value in the |
3124 // number string cache for smis is just the smi value, and the hash for | 3147 // number string cache for smis is just the smi value, and the hash for |
3125 // doubles is the xor of the upper and lower words. See | 3148 // doubles is the xor of the upper and lower words. See |
3126 // Heap::GetNumberStringCache. | 3149 // Heap::GetNumberStringCache. |
3127 Label is_smi; | 3150 Label is_smi; |
3128 Label load_result_from_cache; | 3151 Label load_result_from_cache; |
3129 JumpIfSmi(object, &is_smi); | 3152 JumpIfSmi(object, &is_smi); |
3130 CheckMap(object, | 3153 CheckMap(object, |
3131 scratch1, | 3154 scratch1, |
3132 Heap::kHeapNumberMapRootIndex, | 3155 Heap::kHeapNumberMapRootIndex, |
3133 not_found, | 3156 not_found, |
3134 DONT_DO_SMI_CHECK); | 3157 DONT_DO_SMI_CHECK); |
3135 | 3158 |
3136 STATIC_ASSERT(8 == kDoubleSize); | 3159 STATIC_ASSERT(8 == kDoubleSize); |
3137 add(scratch1, | 3160 lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
3138 object, | 3161 lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
3139 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 3162 xor_(scratch1, scratch1, scratch2); |
3140 ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); | 3163 and_(scratch1, scratch1, mask); |
3141 eor(scratch1, scratch1, Operand(scratch2)); | |
3142 and_(scratch1, scratch1, Operand(mask)); | |
3143 | 3164 |
3144 // Calculate address of entry in string cache: each entry consists | 3165 // Calculate address of entry in string cache: each entry consists |
3145 // of two pointer sized fields. | 3166 // of two pointer sized fields. |
3146 add(scratch1, | 3167 ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1)); |
3147 number_string_cache, | 3168 add(scratch1, number_string_cache, scratch1); |
3148 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | |
3149 | 3169 |
3150 Register probe = mask; | 3170 Register probe = mask; |
3151 ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 3171 LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
3152 JumpIfSmi(probe, not_found); | 3172 JumpIfSmi(probe, not_found); |
3153 sub(scratch2, object, Operand(kHeapObjectTag)); | 3173 lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset)); |
3154 vldr(d0, scratch2, HeapNumber::kValueOffset); | 3174 lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset)); |
3155 sub(probe, probe, Operand(kHeapObjectTag)); | 3175 fcmpu(d0, d1); |
3156 vldr(d1, probe, HeapNumber::kValueOffset); | 3176 bne(not_found); // The cache did not contain this value. |
3157 VFPCompareAndSetFlags(d0, d1); | |
3158 b(ne, not_found); // The cache did not contain this value. | |
3159 b(&load_result_from_cache); | 3177 b(&load_result_from_cache); |
3160 | 3178 |
3161 bind(&is_smi); | 3179 bind(&is_smi); |
3162 Register scratch = scratch1; | 3180 Register scratch = scratch1; |
3163 and_(scratch, mask, Operand(object, ASR, 1)); | 3181 SmiUntag(scratch, object); |
| 3182 and_(scratch, mask, scratch); |
3164 // Calculate address of entry in string cache: each entry consists | 3183 // Calculate address of entry in string cache: each entry consists |
3165 // of two pointer sized fields. | 3184 // of two pointer sized fields. |
3166 add(scratch, | 3185 ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1)); |
3167 number_string_cache, | 3186 add(scratch, number_string_cache, scratch); |
3168 Operand(scratch, LSL, kPointerSizeLog2 + 1)); | |
3169 | 3187 |
3170 // Check if the entry is the smi we are looking for. | 3188 // Check if the entry is the smi we are looking for. |
3171 ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | 3189 LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
3172 cmp(object, probe); | 3190 cmp(object, probe); |
3173 b(ne, not_found); | 3191 bne(not_found); |
3174 | 3192 |
3175 // Get the result from the cache. | 3193 // Get the result from the cache. |
3176 bind(&load_result_from_cache); | 3194 bind(&load_result_from_cache); |
3177 ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); | 3195 LoadP(result, |
| 3196 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); |
3178 IncrementCounter(isolate()->counters()->number_to_string_native(), | 3197 IncrementCounter(isolate()->counters()->number_to_string_native(), |
3179 1, | 3198 1, |
3180 scratch1, | 3199 scratch1, |
3181 scratch2); | 3200 scratch2); |
3182 } | 3201 } |
3183 | 3202 |
3184 | 3203 |
3185 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( | 3204 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( |
3186 Register first, | 3205 Register first, |
3187 Register second, | 3206 Register second, |
3188 Register scratch1, | 3207 Register scratch1, |
3189 Register scratch2, | 3208 Register scratch2, |
3190 Label* failure) { | 3209 Label* failure) { |
3191 // Test that both first and second are sequential ASCII strings. | 3210 // Test that both first and second are sequential ASCII strings. |
3192 // Assume that they are non-smis. | 3211 // Assume that they are non-smis. |
3193 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); | 3212 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); |
3194 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); | 3213 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); |
3195 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | 3214 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
3196 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); | 3215 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); |
3197 | 3216 |
3198 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, | 3217 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, |
3199 scratch2, | 3218 scratch2, |
3200 scratch1, | 3219 scratch1, |
3201 scratch2, | 3220 scratch2, |
3202 failure); | 3221 failure); |
3203 } | 3222 } |
3204 | 3223 |
3205 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, | 3224 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, |
3206 Register second, | 3225 Register second, |
3207 Register scratch1, | 3226 Register scratch1, |
3208 Register scratch2, | 3227 Register scratch2, |
3209 Label* failure) { | 3228 Label* failure) { |
3210 // Check that neither is a smi. | 3229 // Check that neither is a smi. |
3211 and_(scratch1, first, Operand(second)); | 3230 and_(scratch1, first, second); |
3212 JumpIfSmi(scratch1, failure); | 3231 JumpIfSmi(scratch1, failure); |
3213 JumpIfNonSmisNotBothSequentialAsciiStrings(first, | 3232 JumpIfNonSmisNotBothSequentialAsciiStrings(first, |
3214 second, | 3233 second, |
3215 scratch1, | 3234 scratch1, |
3216 scratch2, | 3235 scratch2, |
3217 failure); | 3236 failure); |
3218 } | 3237 } |
3219 | 3238 |
3220 | 3239 |
3221 void MacroAssembler::JumpIfNotUniqueName(Register reg, | 3240 void MacroAssembler::JumpIfNotUniqueName(Register reg, |
3222 Label* not_unique_name) { | 3241 Label* not_unique_name) { |
3223 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 3242 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
3224 Label succeed; | 3243 Label succeed; |
3225 tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 3244 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
3226 b(eq, &succeed); | 3245 beq(&succeed, cr0); |
3227 cmp(reg, Operand(SYMBOL_TYPE)); | 3246 cmpi(reg, Operand(SYMBOL_TYPE)); |
3228 b(ne, not_unique_name); | 3247 bne(not_unique_name); |
3229 | 3248 |
3230 bind(&succeed); | 3249 bind(&succeed); |
3231 } | 3250 } |
3232 | 3251 |
3233 | 3252 |
3234 // Allocates a heap number or jumps to the need_gc label if the young space | 3253 // Allocates a heap number or jumps to the need_gc label if the young space |
3235 // is full and a scavenge is needed. | 3254 // is full and a scavenge is needed. |
3236 void MacroAssembler::AllocateHeapNumber(Register result, | 3255 void MacroAssembler::AllocateHeapNumber(Register result, |
3237 Register scratch1, | 3256 Register scratch1, |
3238 Register scratch2, | 3257 Register scratch2, |
3239 Register heap_number_map, | 3258 Register heap_number_map, |
3240 Label* gc_required, | 3259 Label* gc_required, |
3241 TaggingMode tagging_mode, | 3260 TaggingMode tagging_mode, |
3242 MutableMode mode) { | 3261 MutableMode mode) { |
3243 // Allocate an object in the heap for the heap number and tag it as a heap | 3262 // Allocate an object in the heap for the heap number and tag it as a heap |
3244 // object. | 3263 // object. |
3245 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, | 3264 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, |
3246 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); | 3265 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); |
3247 | 3266 |
3248 Heap::RootListIndex map_index = mode == MUTABLE | 3267 Heap::RootListIndex map_index = mode == MUTABLE |
3249 ? Heap::kMutableHeapNumberMapRootIndex | 3268 ? Heap::kMutableHeapNumberMapRootIndex |
3250 : Heap::kHeapNumberMapRootIndex; | 3269 : Heap::kHeapNumberMapRootIndex; |
3251 AssertIsRoot(heap_number_map, map_index); | 3270 AssertIsRoot(heap_number_map, map_index); |
3252 | 3271 |
3253 // Store heap number map in the allocated object. | 3272 // Store heap number map in the allocated object. |
3254 if (tagging_mode == TAG_RESULT) { | 3273 if (tagging_mode == TAG_RESULT) { |
3255 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); | 3274 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset), |
| 3275 r0); |
3256 } else { | 3276 } else { |
3257 str(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); | 3277 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); |
3258 } | 3278 } |
3259 } | 3279 } |
3260 | 3280 |
3261 | 3281 |
3262 void MacroAssembler::AllocateHeapNumberWithValue(Register result, | 3282 void MacroAssembler::AllocateHeapNumberWithValue(Register result, |
3263 DwVfpRegister value, | 3283 DoubleRegister value, |
3264 Register scratch1, | 3284 Register scratch1, |
3265 Register scratch2, | 3285 Register scratch2, |
3266 Register heap_number_map, | 3286 Register heap_number_map, |
3267 Label* gc_required) { | 3287 Label* gc_required) { |
3268 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); | 3288 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); |
3269 sub(scratch1, result, Operand(kHeapObjectTag)); | 3289 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset)); |
3270 vstr(value, scratch1, HeapNumber::kValueOffset); | |
3271 } | 3290 } |
3272 | 3291 |
3273 | 3292 |
3274 // Copies a fixed number of fields of heap objects from src to dst. | 3293 // Copies a fixed number of fields of heap objects from src to dst. |
3275 void MacroAssembler::CopyFields(Register dst, | 3294 void MacroAssembler::CopyFields(Register dst, |
3276 Register src, | 3295 Register src, |
3277 LowDwVfpRegister double_scratch, | 3296 RegList temps, |
3278 int field_count) { | 3297 int field_count) { |
3279 int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize); | 3298 // At least one bit set in the first 15 registers. |
3280 for (int i = 0; i < double_count; i++) { | 3299 ASSERT((temps & ((1 << 15) - 1)) != 0); |
3281 vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes)); | 3300 ASSERT((temps & dst.bit()) == 0); |
3282 vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes)); | 3301 ASSERT((temps & src.bit()) == 0); |
| 3302 // Primitive implementation using only one temporary register. |
| 3303 |
| 3304 Register tmp = no_reg; |
| 3305 // Find a temp register in temps list. |
| 3306 for (int i = 0; i < 15; i++) { |
| 3307 if ((temps & (1 << i)) != 0) { |
| 3308 tmp.set_code(i); |
| 3309 break; |
| 3310 } |
3283 } | 3311 } |
| 3312 ASSERT(!tmp.is(no_reg)); |
3284 | 3313 |
3285 STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize); | 3314 for (int i = 0; i < field_count; i++) { |
3286 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes); | 3315 LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0); |
3287 | 3316 StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0); |
3288 int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize); | |
3289 if (remain != 0) { | |
3290 vldr(double_scratch.low(), | |
3291 FieldMemOperand(src, (field_count - 1) * kPointerSize)); | |
3292 vstr(double_scratch.low(), | |
3293 FieldMemOperand(dst, (field_count - 1) * kPointerSize)); | |
3294 } | 3317 } |
3295 } | 3318 } |
3296 | 3319 |
3297 | 3320 |
3298 void MacroAssembler::CopyBytes(Register src, | 3321 void MacroAssembler::CopyBytes(Register src, |
3299 Register dst, | 3322 Register dst, |
3300 Register length, | 3323 Register length, |
3301 Register scratch) { | 3324 Register scratch) { |
3302 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; | 3325 Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done; |
| 3326 |
| 3327 ASSERT(!scratch.is(r0)); |
| 3328 |
| 3329 cmpi(length, Operand::Zero()); |
| 3330 beq(&done); |
| 3331 |
| 3332 // Check src alignment and length to see whether word_loop is possible |
| 3333 andi(scratch, src, Operand(kPointerSize - 1)); |
| 3334 beq(&aligned, cr0); |
| 3335 subfic(scratch, scratch, Operand(kPointerSize * 2)); |
| 3336 cmp(length, scratch); |
| 3337 blt(&byte_loop); |
3303 | 3338 |
3304 // Align src before copying in word size chunks. | 3339 // Align src before copying in word size chunks. |
3305 cmp(length, Operand(kPointerSize)); | 3340 subi(scratch, scratch, Operand(kPointerSize)); |
3306 b(le, &byte_loop); | 3341 mtctr(scratch); |
| 3342 bind(&align_loop); |
| 3343 lbz(scratch, MemOperand(src)); |
| 3344 addi(src, src, Operand(1)); |
| 3345 subi(length, length, Operand(1)); |
| 3346 stb(scratch, MemOperand(dst)); |
| 3347 addi(dst, dst, Operand(1)); |
| 3348 bdnz(&align_loop); |
3307 | 3349 |
3308 bind(&align_loop_1); | 3350 bind(&aligned); |
3309 tst(src, Operand(kPointerSize - 1)); | 3351 |
3310 b(eq, &word_loop); | |
3311 ldrb(scratch, MemOperand(src, 1, PostIndex)); | |
3312 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3313 sub(length, length, Operand(1), SetCC); | |
3314 b(&align_loop_1); | |
3315 // Copy bytes in word size chunks. | 3352 // Copy bytes in word size chunks. |
| 3353 if (emit_debug_code()) { |
| 3354 andi(r0, src, Operand(kPointerSize - 1)); |
| 3355 Assert(eq, kExpectingAlignmentForCopyBytes, cr0); |
| 3356 } |
| 3357 |
| 3358 ShiftRightImm(scratch, length, Operand(kPointerSizeLog2)); |
| 3359 cmpi(scratch, Operand::Zero()); |
| 3360 beq(&byte_loop); |
| 3361 |
| 3362 mtctr(scratch); |
3316 bind(&word_loop); | 3363 bind(&word_loop); |
3317 if (emit_debug_code()) { | 3364 LoadP(scratch, MemOperand(src)); |
3318 tst(src, Operand(kPointerSize - 1)); | 3365 addi(src, src, Operand(kPointerSize)); |
3319 Assert(eq, kExpectingAlignmentForCopyBytes); | 3366 subi(length, length, Operand(kPointerSize)); |
| 3367 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { |
| 3368 // currently false for PPC - but possible future opt |
| 3369 StoreP(scratch, MemOperand(dst)); |
| 3370 addi(dst, dst, Operand(kPointerSize)); |
| 3371 } else { |
| 3372 #if V8_TARGET_LITTLE_ENDIAN |
| 3373 stb(scratch, MemOperand(dst, 0)); |
| 3374 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3375 stb(scratch, MemOperand(dst, 1)); |
| 3376 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3377 stb(scratch, MemOperand(dst, 2)); |
| 3378 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3379 stb(scratch, MemOperand(dst, 3)); |
| 3380 #if V8_TARGET_ARCH_PPC64 |
| 3381 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3382 stb(scratch, MemOperand(dst, 4)); |
| 3383 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3384 stb(scratch, MemOperand(dst, 5)); |
| 3385 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3386 stb(scratch, MemOperand(dst, 6)); |
| 3387 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3388 stb(scratch, MemOperand(dst, 7)); |
| 3389 #endif |
| 3390 #else |
| 3391 #if V8_TARGET_ARCH_PPC64 |
| 3392 stb(scratch, MemOperand(dst, 7)); |
| 3393 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3394 stb(scratch, MemOperand(dst, 6)); |
| 3395 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3396 stb(scratch, MemOperand(dst, 5)); |
| 3397 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3398 stb(scratch, MemOperand(dst, 4)); |
| 3399 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3400 #endif |
| 3401 stb(scratch, MemOperand(dst, 3)); |
| 3402 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3403 stb(scratch, MemOperand(dst, 2)); |
| 3404 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3405 stb(scratch, MemOperand(dst, 1)); |
| 3406 ShiftRightImm(scratch, scratch, Operand(8)); |
| 3407 stb(scratch, MemOperand(dst, 0)); |
| 3408 #endif |
| 3409 addi(dst, dst, Operand(kPointerSize)); |
3320 } | 3410 } |
3321 cmp(length, Operand(kPointerSize)); | 3411 bdnz(&word_loop); |
3322 b(lt, &byte_loop); | |
3323 ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); | |
3324 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { | |
3325 str(scratch, MemOperand(dst, kPointerSize, PostIndex)); | |
3326 } else { | |
3327 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3328 mov(scratch, Operand(scratch, LSR, 8)); | |
3329 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3330 mov(scratch, Operand(scratch, LSR, 8)); | |
3331 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3332 mov(scratch, Operand(scratch, LSR, 8)); | |
3333 strb(scratch, MemOperand(dst, 1, PostIndex)); | |
3334 } | |
3335 sub(length, length, Operand(kPointerSize)); | |
3336 b(&word_loop); | |
3337 | 3412 |
3338 // Copy the last bytes if any left. | 3413 // Copy the last bytes if any left. |
| 3414 cmpi(length, Operand::Zero()); |
| 3415 beq(&done); |
| 3416 |
3339 bind(&byte_loop); | 3417 bind(&byte_loop); |
3340 cmp(length, Operand::Zero()); | 3418 mtctr(length); |
3341 b(eq, &done); | |
3342 bind(&byte_loop_1); | 3419 bind(&byte_loop_1); |
3343 ldrb(scratch, MemOperand(src, 1, PostIndex)); | 3420 lbz(scratch, MemOperand(src)); |
3344 strb(scratch, MemOperand(dst, 1, PostIndex)); | 3421 addi(src, src, Operand(1)); |
3345 sub(length, length, Operand(1), SetCC); | 3422 stb(scratch, MemOperand(dst)); |
3346 b(ne, &byte_loop_1); | 3423 addi(dst, dst, Operand(1)); |
| 3424 bdnz(&byte_loop_1); |
| 3425 |
3347 bind(&done); | 3426 bind(&done); |
3348 } | 3427 } |
3349 | 3428 |
3350 | 3429 |
| 3430 void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset, |
| 3431 Register count, |
| 3432 Register filler) { |
| 3433 Label loop; |
| 3434 mtctr(count); |
| 3435 bind(&loop); |
| 3436 StoreP(filler, MemOperand(start_offset)); |
| 3437 addi(start_offset, start_offset, Operand(kPointerSize)); |
| 3438 bdnz(&loop); |
| 3439 } |
| 3440 |
3351 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, | 3441 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, |
3352 Register end_offset, | 3442 Register end_offset, |
3353 Register filler) { | 3443 Register filler) { |
3354 Label loop, entry; | 3444 Label done; |
3355 b(&entry); | 3445 sub(r0, end_offset, start_offset, LeaveOE, SetRC); |
3356 bind(&loop); | 3446 beq(&done, cr0); |
3357 str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); | 3447 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2)); |
3358 bind(&entry); | 3448 InitializeNFieldsWithFiller(start_offset, r0, filler); |
3359 cmp(start_offset, end_offset); | 3449 bind(&done); |
3360 b(lt, &loop); | |
3361 } | 3450 } |
3362 | 3451 |
3363 | 3452 |
3364 void MacroAssembler::CheckFor32DRegs(Register scratch) { | 3453 void MacroAssembler::SaveFPRegs(Register location, int first, int count) { |
3365 mov(scratch, Operand(ExternalReference::cpu_features())); | 3454 ASSERT(count > 0); |
3366 ldr(scratch, MemOperand(scratch)); | 3455 int cur = first; |
3367 tst(scratch, Operand(1u << VFP32DREGS)); | 3456 subi(location, location, Operand(count * kDoubleSize)); |
| 3457 for (int i = 0; i < count; i++) { |
| 3458 DoubleRegister reg = DoubleRegister::from_code(cur++); |
| 3459 stfd(reg, MemOperand(location, i * kDoubleSize)); |
| 3460 } |
3368 } | 3461 } |
3369 | 3462 |
3370 | 3463 |
3371 void MacroAssembler::SaveFPRegs(Register location, Register scratch) { | 3464 void MacroAssembler::RestoreFPRegs(Register location, int first, int count) { |
3372 CheckFor32DRegs(scratch); | 3465 ASSERT(count > 0); |
3373 vstm(db_w, location, d16, d31, ne); | 3466 int cur = first + count - 1; |
3374 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); | 3467 for (int i = count - 1; i >= 0; i--) { |
3375 vstm(db_w, location, d0, d15); | 3468 DoubleRegister reg = DoubleRegister::from_code(cur--); |
| 3469 lfd(reg, MemOperand(location, i * kDoubleSize)); |
| 3470 } |
| 3471 addi(location, location, Operand(count * kDoubleSize)); |
3376 } | 3472 } |
3377 | 3473 |
3378 | 3474 |
3379 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { | |
3380 CheckFor32DRegs(scratch); | |
3381 vldm(ia_w, location, d0, d15); | |
3382 vldm(ia_w, location, d16, d31, ne); | |
3383 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); | |
3384 } | |
3385 | |
3386 | |
3387 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( | 3475 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( |
3388 Register first, | 3476 Register first, |
3389 Register second, | 3477 Register second, |
3390 Register scratch1, | 3478 Register scratch1, |
3391 Register scratch2, | 3479 Register scratch2, |
3392 Label* failure) { | 3480 Label* failure) { |
3393 const int kFlatAsciiStringMask = | 3481 const int kFlatAsciiStringMask = |
3394 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | 3482 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
3395 const int kFlatAsciiStringTag = | 3483 const int kFlatAsciiStringTag = |
3396 kStringTag | kOneByteStringTag | kSeqStringTag; | 3484 kStringTag | kOneByteStringTag | kSeqStringTag; |
3397 and_(scratch1, first, Operand(kFlatAsciiStringMask)); | 3485 andi(scratch1, first, Operand(kFlatAsciiStringMask)); |
3398 and_(scratch2, second, Operand(kFlatAsciiStringMask)); | 3486 andi(scratch2, second, Operand(kFlatAsciiStringMask)); |
3399 cmp(scratch1, Operand(kFlatAsciiStringTag)); | 3487 cmpi(scratch1, Operand(kFlatAsciiStringTag)); |
3400 // Ignore second test if first test failed. | 3488 bne(failure); |
3401 cmp(scratch2, Operand(kFlatAsciiStringTag), eq); | 3489 cmpi(scratch2, Operand(kFlatAsciiStringTag)); |
3402 b(ne, failure); | 3490 bne(failure); |
3403 } | 3491 } |
3404 | 3492 |
3405 | 3493 |
3406 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, | 3494 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, |
3407 Register scratch, | 3495 Register scratch, |
3408 Label* failure) { | 3496 Label* failure) { |
3409 const int kFlatAsciiStringMask = | 3497 const int kFlatAsciiStringMask = |
3410 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | 3498 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
3411 const int kFlatAsciiStringTag = | 3499 const int kFlatAsciiStringTag = |
3412 kStringTag | kOneByteStringTag | kSeqStringTag; | 3500 kStringTag | kOneByteStringTag | kSeqStringTag; |
3413 and_(scratch, type, Operand(kFlatAsciiStringMask)); | 3501 andi(scratch, type, Operand(kFlatAsciiStringMask)); |
3414 cmp(scratch, Operand(kFlatAsciiStringTag)); | 3502 cmpi(scratch, Operand(kFlatAsciiStringTag)); |
3415 b(ne, failure); | 3503 bne(failure); |
3416 } | 3504 } |
3417 | 3505 |
3418 static const int kRegisterPassedArguments = 4; | 3506 static const int kRegisterPassedArguments = 8; |
3419 | 3507 |
3420 | 3508 |
3421 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, | 3509 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, |
3422 int num_double_arguments) { | 3510 int num_double_arguments) { |
3423 int stack_passed_words = 0; | 3511 int stack_passed_words = 0; |
3424 if (use_eabi_hardfloat()) { | 3512 if (num_double_arguments > DoubleRegister::NumRegisters()) { |
3425 // In the hard floating point calling convention, we can use | |
3426 // all double registers to pass doubles. | |
3427 if (num_double_arguments > DoubleRegister::NumRegisters()) { | |
3428 stack_passed_words += | 3513 stack_passed_words += |
3429 2 * (num_double_arguments - DoubleRegister::NumRegisters()); | 3514 2 * (num_double_arguments - DoubleRegister::NumRegisters()); |
3430 } | |
3431 } else { | |
3432 // In the soft floating point calling convention, every double | |
3433 // argument is passed using two registers. | |
3434 num_reg_arguments += 2 * num_double_arguments; | |
3435 } | 3515 } |
3436 // Up to four simple arguments are passed in registers r0..r3. | 3516 // Up to 8 simple arguments are passed in registers r3..r10. |
3437 if (num_reg_arguments > kRegisterPassedArguments) { | 3517 if (num_reg_arguments > kRegisterPassedArguments) { |
3438 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; | 3518 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; |
3439 } | 3519 } |
3440 return stack_passed_words; | 3520 return stack_passed_words; |
3441 } | 3521 } |
3442 | 3522 |
3443 | 3523 |
3444 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, | 3524 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, |
3445 Register index, | 3525 Register index, |
3446 Register value, | 3526 Register value, |
3447 uint32_t encoding_mask) { | 3527 uint32_t encoding_mask) { |
3448 Label is_object; | 3528 Label is_object; |
3449 SmiTst(string); | 3529 TestIfSmi(string, r0); |
3450 Check(ne, kNonObject); | 3530 Check(ne, kNonObject, cr0); |
3451 | 3531 |
3452 ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); | 3532 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset)); |
3453 ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); | 3533 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); |
3454 | 3534 |
3455 and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); | 3535 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); |
3456 cmp(ip, Operand(encoding_mask)); | 3536 cmpi(ip, Operand(encoding_mask)); |
3457 Check(eq, kUnexpectedStringType); | 3537 Check(eq, kUnexpectedStringType); |
3458 | 3538 |
3459 // The index is assumed to be untagged coming in, tag it to compare with the | 3539 // The index is assumed to be untagged coming in, tag it to compare with the |
3460 // string length without using a temp register, it is restored at the end of | 3540 // string length without using a temp register, it is restored at the end of |
3461 // this function. | 3541 // this function. |
| 3542 #if !V8_TARGET_ARCH_PPC64 |
3462 Label index_tag_ok, index_tag_bad; | 3543 Label index_tag_ok, index_tag_bad; |
3463 TrySmiTag(index, index, &index_tag_bad); | 3544 JumpIfNotSmiCandidate(index, r0, &index_tag_bad); |
| 3545 #endif |
| 3546 SmiTag(index, index); |
| 3547 #if !V8_TARGET_ARCH_PPC64 |
3464 b(&index_tag_ok); | 3548 b(&index_tag_ok); |
3465 bind(&index_tag_bad); | 3549 bind(&index_tag_bad); |
3466 Abort(kIndexIsTooLarge); | 3550 Abort(kIndexIsTooLarge); |
3467 bind(&index_tag_ok); | 3551 bind(&index_tag_ok); |
| 3552 #endif |
3468 | 3553 |
3469 ldr(ip, FieldMemOperand(string, String::kLengthOffset)); | 3554 LoadP(ip, FieldMemOperand(string, String::kLengthOffset)); |
3470 cmp(index, ip); | 3555 cmp(index, ip); |
3471 Check(lt, kIndexIsTooLarge); | 3556 Check(lt, kIndexIsTooLarge); |
3472 | 3557 |
3473 cmp(index, Operand(Smi::FromInt(0))); | 3558 ASSERT(Smi::FromInt(0) == 0); |
| 3559 cmpi(index, Operand::Zero()); |
3474 Check(ge, kIndexIsNegative); | 3560 Check(ge, kIndexIsNegative); |
3475 | 3561 |
3476 SmiUntag(index, index); | 3562 SmiUntag(index, index); |
3477 } | 3563 } |
3478 | 3564 |
3479 | 3565 |
3480 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3566 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3481 int num_double_arguments, | 3567 int num_double_arguments, |
3482 Register scratch) { | 3568 Register scratch) { |
3483 int frame_alignment = ActivationFrameAlignment(); | 3569 int frame_alignment = ActivationFrameAlignment(); |
3484 int stack_passed_arguments = CalculateStackPassedWords( | 3570 int stack_passed_arguments = CalculateStackPassedWords( |
3485 num_reg_arguments, num_double_arguments); | 3571 num_reg_arguments, num_double_arguments); |
| 3572 int stack_space = kNumRequiredStackFrameSlots; |
| 3573 |
3486 if (frame_alignment > kPointerSize) { | 3574 if (frame_alignment > kPointerSize) { |
3487 // Make stack end at alignment and make room for num_arguments - 4 words | 3575 // Make stack end at alignment and make room for stack arguments |
3488 // and the original value of sp. | 3576 // -- preserving original value of sp. |
3489 mov(scratch, sp); | 3577 mr(scratch, sp); |
3490 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); | 3578 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize)); |
3491 ASSERT(IsPowerOf2(frame_alignment)); | 3579 ASSERT(IsPowerOf2(frame_alignment)); |
3492 and_(sp, sp, Operand(-frame_alignment)); | 3580 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); |
3493 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3581 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
3494 } else { | 3582 } else { |
3495 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); | 3583 // Make room for stack arguments |
| 3584 stack_space += stack_passed_arguments; |
3496 } | 3585 } |
| 3586 |
| 3587 // Allocate frame with required slots to make ABI work. |
| 3588 li(r0, Operand::Zero()); |
| 3589 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize)); |
3497 } | 3590 } |
3498 | 3591 |
3499 | 3592 |
3500 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3593 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3501 Register scratch) { | 3594 Register scratch) { |
3502 PrepareCallCFunction(num_reg_arguments, 0, scratch); | 3595 PrepareCallCFunction(num_reg_arguments, 0, scratch); |
3503 } | 3596 } |
3504 | 3597 |
3505 | 3598 |
3506 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { | 3599 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { |
3507 ASSERT(src.is(d0)); | 3600 Move(d1, src); |
3508 if (!use_eabi_hardfloat()) { | 3601 } |
3509 vmov(r0, r1, src); | 3602 |
| 3603 |
| 3604 void MacroAssembler::MovToFloatResult(DoubleRegister src) { |
| 3605 Move(d1, src); |
| 3606 } |
| 3607 |
| 3608 |
| 3609 void MacroAssembler::MovToFloatParameters(DoubleRegister src1, |
| 3610 DoubleRegister src2) { |
| 3611 if (src2.is(d1)) { |
| 3612 ASSERT(!src1.is(d2)); |
| 3613 Move(d2, src2); |
| 3614 Move(d1, src1); |
| 3615 } else { |
| 3616 Move(d1, src1); |
| 3617 Move(d2, src2); |
3510 } | 3618 } |
3511 } | 3619 } |
3512 | 3620 |
3513 | |
3514 // On ARM this is just a synonym to make the purpose clear. | |
3515 void MacroAssembler::MovToFloatResult(DwVfpRegister src) { | |
3516 MovToFloatParameter(src); | |
3517 } | |
3518 | |
3519 | |
3520 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, | |
3521 DwVfpRegister src2) { | |
3522 ASSERT(src1.is(d0)); | |
3523 ASSERT(src2.is(d1)); | |
3524 if (!use_eabi_hardfloat()) { | |
3525 vmov(r0, r1, src1); | |
3526 vmov(r2, r3, src2); | |
3527 } | |
3528 } | |
3529 | |
3530 | 3621 |
3531 void MacroAssembler::CallCFunction(ExternalReference function, | 3622 void MacroAssembler::CallCFunction(ExternalReference function, |
3532 int num_reg_arguments, | 3623 int num_reg_arguments, |
3533 int num_double_arguments) { | 3624 int num_double_arguments) { |
3534 mov(ip, Operand(function)); | 3625 mov(ip, Operand(function)); |
3535 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); | 3626 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); |
3536 } | 3627 } |
3537 | 3628 |
3538 | 3629 |
3539 void MacroAssembler::CallCFunction(Register function, | 3630 void MacroAssembler::CallCFunction(Register function, |
(...skipping 12 matching lines...) Expand all Loading... |
3552 void MacroAssembler::CallCFunction(Register function, | 3643 void MacroAssembler::CallCFunction(Register function, |
3553 int num_arguments) { | 3644 int num_arguments) { |
3554 CallCFunction(function, num_arguments, 0); | 3645 CallCFunction(function, num_arguments, 0); |
3555 } | 3646 } |
3556 | 3647 |
3557 | 3648 |
3558 void MacroAssembler::CallCFunctionHelper(Register function, | 3649 void MacroAssembler::CallCFunctionHelper(Register function, |
3559 int num_reg_arguments, | 3650 int num_reg_arguments, |
3560 int num_double_arguments) { | 3651 int num_double_arguments) { |
3561 ASSERT(has_frame()); | 3652 ASSERT(has_frame()); |
3562 // Make sure that the stack is aligned before calling a C function unless | |
3563 // running in the simulator. The simulator has its own alignment check which | |
3564 // provides more information. | |
3565 #if V8_HOST_ARCH_ARM | |
3566 if (emit_debug_code()) { | |
3567 int frame_alignment = base::OS::ActivationFrameAlignment(); | |
3568 int frame_alignment_mask = frame_alignment - 1; | |
3569 if (frame_alignment > kPointerSize) { | |
3570 ASSERT(IsPowerOf2(frame_alignment)); | |
3571 Label alignment_as_expected; | |
3572 tst(sp, Operand(frame_alignment_mask)); | |
3573 b(eq, &alignment_as_expected); | |
3574 // Don't use Check here, as it will call Runtime_Abort possibly | |
3575 // re-entering here. | |
3576 stop("Unexpected alignment"); | |
3577 bind(&alignment_as_expected); | |
3578 } | |
3579 } | |
3580 #endif | |
3581 | |
3582 // Just call directly. The function called cannot cause a GC, or | 3653 // Just call directly. The function called cannot cause a GC, or |
3583 // allow preemption, so the return address in the link register | 3654 // allow preemption, so the return address in the link register |
3584 // stays correct. | 3655 // stays correct. |
3585 Call(function); | 3656 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) |
| 3657 // AIX uses a function descriptor. When calling C code be aware |
| 3658 // of this descriptor and pick up values from it |
| 3659 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize)); |
| 3660 LoadP(ip, MemOperand(function, 0)); |
| 3661 Register dest = ip; |
| 3662 #elif ABI_TOC_ADDRESSABILITY_VIA_IP |
| 3663 Move(ip, function); |
| 3664 Register dest = ip; |
| 3665 #else |
| 3666 Register dest = function; |
| 3667 #endif |
| 3668 |
| 3669 Call(dest); |
| 3670 |
| 3671 // Remove frame bought in PrepareCallCFunction |
3586 int stack_passed_arguments = CalculateStackPassedWords( | 3672 int stack_passed_arguments = CalculateStackPassedWords( |
3587 num_reg_arguments, num_double_arguments); | 3673 num_reg_arguments, num_double_arguments); |
| 3674 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments; |
3588 if (ActivationFrameAlignment() > kPointerSize) { | 3675 if (ActivationFrameAlignment() > kPointerSize) { |
3589 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3676 LoadP(sp, MemOperand(sp, stack_space * kPointerSize)); |
3590 } else { | 3677 } else { |
3591 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); | 3678 addi(sp, sp, Operand(stack_space * kPointerSize)); |
3592 } | 3679 } |
3593 } | 3680 } |
3594 | 3681 |
3595 | 3682 |
3596 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, | 3683 void MacroAssembler::FlushICache(Register address, size_t size, |
3597 Register result, | 3684 Register scratch) { |
3598 Register scratch) { | 3685 Label done; |
3599 Label small_constant_pool_load, load_result; | 3686 |
3600 ldr(result, MemOperand(ldr_location)); | 3687 dcbf(r0, address); |
3601 | 3688 sync(); |
3602 if (FLAG_enable_ool_constant_pool) { | 3689 icbi(r0, address); |
3603 // Check if this is an extended constant pool load. | 3690 isync(); |
3604 and_(scratch, result, Operand(GetConsantPoolLoadMask())); | 3691 |
3605 teq(scratch, Operand(GetConsantPoolLoadPattern())); | 3692 // This code handles ranges which cross a single cacheline boundary. |
3606 b(eq, &small_constant_pool_load); | 3693 // scratch is last cacheline which intersects range. |
3607 if (emit_debug_code()) { | 3694 const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size()); |
3608 // Check that the instruction sequence is: | 3695 |
3609 // movw reg, #offset_low | 3696 ASSERT(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2)); |
3610 // movt reg, #offset_high | 3697 addi(scratch, address, Operand(size - 1)); |
3611 // ldr reg, [pp, reg] | 3698 ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2)); |
3612 Instr patterns[] = {GetMovWPattern(), GetMovTPattern(), | 3699 cmpl(scratch, address); |
3613 GetLdrPpRegOffsetPattern()}; | 3700 ble(&done); |
3614 for (int i = 0; i < 3; i++) { | 3701 |
3615 ldr(result, MemOperand(ldr_location, i * kInstrSize)); | 3702 dcbf(r0, scratch); |
3616 and_(result, result, Operand(patterns[i])); | 3703 sync(); |
3617 cmp(result, Operand(patterns[i])); | 3704 icbi(r0, scratch); |
3618 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); | 3705 isync(); |
3619 } | 3706 |
3620 // Result was clobbered. Restore it. | 3707 bind(&done); |
3621 ldr(result, MemOperand(ldr_location)); | 3708 } |
3622 } | 3709 |
3623 | 3710 |
3624 // Get the offset into the constant pool. First extract movw immediate into | 3711 void MacroAssembler::SetRelocatedValue(Register location, |
3625 // result. | 3712 Register scratch, |
3626 and_(scratch, result, Operand(0xfff)); | 3713 Register new_value) { |
3627 mov(ip, Operand(result, LSR, 4)); | 3714 lwz(scratch, MemOperand(location)); |
3628 and_(ip, ip, Operand(0xf000)); | 3715 |
3629 orr(result, scratch, Operand(ip)); | 3716 #if V8_OOL_CONSTANT_POOL |
3630 // Then extract movt immediate and or into result. | 3717 if (emit_debug_code()) { |
3631 ldr(scratch, MemOperand(ldr_location, kInstrSize)); | 3718 // Check that the instruction sequence is a load from the constant pool |
3632 and_(ip, scratch, Operand(0xf0000)); | 3719 #if V8_TARGET_ARCH_PPC64 |
3633 orr(result, result, Operand(ip, LSL, 12)); | 3720 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16))); |
3634 and_(scratch, scratch, Operand(0xfff)); | 3721 Cmpi(scratch, Operand(ADDI), r0); |
3635 orr(result, result, Operand(scratch, LSL, 16)); | 3722 Check(eq, kTheInstructionShouldBeALi); |
3636 | 3723 lwz(scratch, MemOperand(location, kInstrSize)); |
3637 b(&load_result); | 3724 #endif |
3638 } | 3725 ExtractBitMask(scratch, scratch, 0x1f * B16); |
3639 | 3726 cmpi(scratch, Operand(kConstantPoolRegister.code())); |
3640 bind(&small_constant_pool_load); | |
3641 if (emit_debug_code()) { | |
3642 // Check that the instruction is a ldr reg, [<pc or pp> + offset] . | |
3643 and_(result, result, Operand(GetConsantPoolLoadPattern())); | |
3644 cmp(result, Operand(GetConsantPoolLoadPattern())); | |
3645 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); | 3727 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); |
3646 // Result was clobbered. Restore it. | 3728 // Scratch was clobbered. Restore it. |
3647 ldr(result, MemOperand(ldr_location)); | 3729 lwz(scratch, MemOperand(location)); |
3648 } | 3730 } |
3649 | 3731 // Get the address of the constant and patch it. |
3650 // Get the offset into the constant pool. | 3732 andi(scratch, scratch, Operand(kImm16Mask)); |
3651 const uint32_t kLdrOffsetMask = (1 << 12) - 1; | 3733 StorePX(new_value, MemOperand(kConstantPoolRegister, scratch)); |
3652 and_(result, result, Operand(kLdrOffsetMask)); | 3734 #else |
3653 | 3735 // This code assumes a FIXED_SEQUENCE for lis/ori |
3654 bind(&load_result); | 3736 |
3655 // Get the address of the constant. | 3737 // At this point scratch is a lis instruction. |
3656 if (FLAG_enable_ool_constant_pool) { | 3738 if (emit_debug_code()) { |
3657 add(result, pp, Operand(result)); | 3739 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16))); |
3658 } else { | 3740 Cmpi(scratch, Operand(ADDIS), r0); |
3659 add(result, ldr_location, Operand(result)); | 3741 Check(eq, kTheInstructionToPatchShouldBeALis); |
3660 add(result, result, Operand(Instruction::kPCReadOffset)); | 3742 lwz(scratch, MemOperand(location)); |
3661 } | 3743 } |
| 3744 |
| 3745 // insert new high word into lis instruction |
| 3746 #if V8_TARGET_ARCH_PPC64 |
| 3747 srdi(ip, new_value, Operand(32)); |
| 3748 rlwimi(scratch, ip, 16, 16, 31); |
| 3749 #else |
| 3750 rlwimi(scratch, new_value, 16, 16, 31); |
| 3751 #endif |
| 3752 |
| 3753 stw(scratch, MemOperand(location)); |
| 3754 |
| 3755 lwz(scratch, MemOperand(location, kInstrSize)); |
| 3756 // scratch is now ori. |
| 3757 if (emit_debug_code()) { |
| 3758 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3759 Cmpi(scratch, Operand(ORI), r0); |
| 3760 Check(eq, kTheInstructionShouldBeAnOri); |
| 3761 lwz(scratch, MemOperand(location, kInstrSize)); |
| 3762 } |
| 3763 |
| 3764 // insert new low word into ori instruction |
| 3765 #if V8_TARGET_ARCH_PPC64 |
| 3766 rlwimi(scratch, ip, 0, 16, 31); |
| 3767 #else |
| 3768 rlwimi(scratch, new_value, 0, 16, 31); |
| 3769 #endif |
| 3770 stw(scratch, MemOperand(location, kInstrSize)); |
| 3771 |
| 3772 #if V8_TARGET_ARCH_PPC64 |
| 3773 if (emit_debug_code()) { |
| 3774 lwz(scratch, MemOperand(location, 2*kInstrSize)); |
| 3775 // scratch is now sldi. |
| 3776 And(scratch, scratch, Operand(kOpcodeMask|kExt5OpcodeMask)); |
| 3777 Cmpi(scratch, Operand(EXT5|RLDICR), r0); |
| 3778 Check(eq, kTheInstructionShouldBeASldi); |
| 3779 } |
| 3780 |
| 3781 lwz(scratch, MemOperand(location, 3*kInstrSize)); |
| 3782 // scratch is now ori. |
| 3783 if (emit_debug_code()) { |
| 3784 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3785 Cmpi(scratch, Operand(ORIS), r0); |
| 3786 Check(eq, kTheInstructionShouldBeAnOris); |
| 3787 lwz(scratch, MemOperand(location, 3*kInstrSize)); |
| 3788 } |
| 3789 |
| 3790 rlwimi(scratch, new_value, 16, 16, 31); |
| 3791 stw(scratch, MemOperand(location, 3*kInstrSize)); |
| 3792 |
| 3793 lwz(scratch, MemOperand(location, 4*kInstrSize)); |
| 3794 // scratch is now ori. |
| 3795 if (emit_debug_code()) { |
| 3796 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3797 Cmpi(scratch, Operand(ORI), r0); |
| 3798 Check(eq, kTheInstructionShouldBeAnOri); |
| 3799 lwz(scratch, MemOperand(location, 4*kInstrSize)); |
| 3800 } |
| 3801 rlwimi(scratch, new_value, 0, 16, 31); |
| 3802 stw(scratch, MemOperand(location, 4*kInstrSize)); |
| 3803 #endif |
| 3804 |
| 3805 // Update the I-cache so the new lis and addic can be executed. |
| 3806 #if V8_TARGET_ARCH_PPC64 |
| 3807 FlushICache(location, 5 * kInstrSize, scratch); |
| 3808 #else |
| 3809 FlushICache(location, 2 * kInstrSize, scratch); |
| 3810 #endif |
| 3811 #endif |
| 3812 } |
| 3813 |
| 3814 |
| 3815 void MacroAssembler::GetRelocatedValue(Register location, |
| 3816 Register result, |
| 3817 Register scratch) { |
| 3818 lwz(result, MemOperand(location)); |
| 3819 |
| 3820 #if V8_OOL_CONSTANT_POOL |
| 3821 if (emit_debug_code()) { |
| 3822 // Check that the instruction sequence is a load from the constant pool |
| 3823 #if V8_TARGET_ARCH_PPC64 |
| 3824 And(result, result, Operand(kOpcodeMask | (0x1f * B16))); |
| 3825 Cmpi(result, Operand(ADDI), r0); |
| 3826 Check(eq, kTheInstructionShouldBeALi); |
| 3827 lwz(result, MemOperand(location, kInstrSize)); |
| 3828 #endif |
| 3829 ExtractBitMask(result, result, 0x1f * B16); |
| 3830 cmpi(result, Operand(kConstantPoolRegister.code())); |
| 3831 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); |
| 3832 lwz(result, MemOperand(location)); |
| 3833 } |
| 3834 // Get the address of the constant and retrieve it. |
| 3835 andi(result, result, Operand(kImm16Mask)); |
| 3836 LoadPX(result, MemOperand(kConstantPoolRegister, result)); |
| 3837 #else |
| 3838 // This code assumes a FIXED_SEQUENCE for lis/ori |
| 3839 if (emit_debug_code()) { |
| 3840 And(result, result, Operand(kOpcodeMask | (0x1f * B16))); |
| 3841 Cmpi(result, Operand(ADDIS), r0); |
| 3842 Check(eq, kTheInstructionShouldBeALis); |
| 3843 lwz(result, MemOperand(location)); |
| 3844 } |
| 3845 |
| 3846 // result now holds a lis instruction. Extract the immediate. |
| 3847 slwi(result, result, Operand(16)); |
| 3848 |
| 3849 lwz(scratch, MemOperand(location, kInstrSize)); |
| 3850 if (emit_debug_code()) { |
| 3851 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3852 Cmpi(scratch, Operand(ORI), r0); |
| 3853 Check(eq, kTheInstructionShouldBeAnOri); |
| 3854 lwz(scratch, MemOperand(location, kInstrSize)); |
| 3855 } |
| 3856 // Copy the low 16bits from ori instruction into result |
| 3857 rlwimi(result, scratch, 0, 16, 31); |
| 3858 |
| 3859 #if V8_TARGET_ARCH_PPC64 |
| 3860 if (emit_debug_code()) { |
| 3861 lwz(scratch, MemOperand(location, 2*kInstrSize)); |
| 3862 // scratch is now sldi. |
| 3863 And(scratch, scratch, Operand(kOpcodeMask|kExt5OpcodeMask)); |
| 3864 Cmpi(scratch, Operand(EXT5|RLDICR), r0); |
| 3865 Check(eq, kTheInstructionShouldBeASldi); |
| 3866 } |
| 3867 |
| 3868 lwz(scratch, MemOperand(location, 3*kInstrSize)); |
| 3869 // scratch is now ori. |
| 3870 if (emit_debug_code()) { |
| 3871 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3872 Cmpi(scratch, Operand(ORIS), r0); |
| 3873 Check(eq, kTheInstructionShouldBeAnOris); |
| 3874 lwz(scratch, MemOperand(location, 3*kInstrSize)); |
| 3875 } |
| 3876 sldi(result, result, Operand(16)); |
| 3877 rldimi(result, scratch, 0, 48); |
| 3878 |
| 3879 lwz(scratch, MemOperand(location, 4*kInstrSize)); |
| 3880 // scratch is now ori. |
| 3881 if (emit_debug_code()) { |
| 3882 And(scratch, scratch, Operand(kOpcodeMask)); |
| 3883 Cmpi(scratch, Operand(ORI), r0); |
| 3884 Check(eq, kTheInstructionShouldBeAnOri); |
| 3885 lwz(scratch, MemOperand(location, 4*kInstrSize)); |
| 3886 } |
| 3887 sldi(result, result, Operand(16)); |
| 3888 rldimi(result, scratch, 0, 48); |
| 3889 #endif |
| 3890 #endif |
3662 } | 3891 } |
3663 | 3892 |
3664 | 3893 |
3665 void MacroAssembler::CheckPageFlag( | 3894 void MacroAssembler::CheckPageFlag( |
3666 Register object, | 3895 Register object, |
3667 Register scratch, | 3896 Register scratch, // scratch may be same register as object |
3668 int mask, | 3897 int mask, |
3669 Condition cc, | 3898 Condition cc, |
3670 Label* condition_met) { | 3899 Label* condition_met) { |
3671 Bfc(scratch, object, 0, kPageSizeBits); | 3900 ASSERT(cc == ne || cc == eq); |
3672 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | 3901 ClearRightImm(scratch, object, Operand(kPageSizeBits)); |
3673 tst(scratch, Operand(mask)); | 3902 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
3674 b(cc, condition_met); | 3903 |
3675 } | 3904 And(r0, scratch, Operand(mask), SetRC); |
3676 | 3905 |
3677 | 3906 if (cc == ne) { |
| 3907 bne(condition_met, cr0); |
| 3908 } |
| 3909 if (cc == eq) { |
| 3910 beq(condition_met, cr0); |
| 3911 } |
| 3912 } |
| 3913 |
| 3914 |
3678 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, | 3915 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, |
3679 Register scratch, | 3916 Register scratch, |
3680 Label* if_deprecated) { | 3917 Label* if_deprecated) { |
3681 if (map->CanBeDeprecated()) { | 3918 if (map->CanBeDeprecated()) { |
3682 mov(scratch, Operand(map)); | 3919 mov(scratch, Operand(map)); |
3683 ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); | 3920 lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); |
3684 tst(scratch, Operand(Map::Deprecated::kMask)); | 3921 ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC); |
3685 b(ne, if_deprecated); | 3922 bne(if_deprecated, cr0); |
3686 } | 3923 } |
3687 } | 3924 } |
3688 | 3925 |
3689 | 3926 |
3690 void MacroAssembler::JumpIfBlack(Register object, | 3927 void MacroAssembler::JumpIfBlack(Register object, |
3691 Register scratch0, | 3928 Register scratch0, |
3692 Register scratch1, | 3929 Register scratch1, |
3693 Label* on_black) { | 3930 Label* on_black) { |
3694 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. | 3931 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. |
3695 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 3932 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
3696 } | 3933 } |
3697 | 3934 |
3698 | 3935 |
3699 void MacroAssembler::HasColor(Register object, | 3936 void MacroAssembler::HasColor(Register object, |
3700 Register bitmap_scratch, | 3937 Register bitmap_scratch, |
3701 Register mask_scratch, | 3938 Register mask_scratch, |
3702 Label* has_color, | 3939 Label* has_color, |
3703 int first_bit, | 3940 int first_bit, |
3704 int second_bit) { | 3941 int second_bit) { |
3705 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); | 3942 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); |
3706 | 3943 |
3707 GetMarkBits(object, bitmap_scratch, mask_scratch); | 3944 GetMarkBits(object, bitmap_scratch, mask_scratch); |
3708 | 3945 |
3709 Label other_color, word_boundary; | 3946 Label other_color, word_boundary; |
3710 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3947 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3711 tst(ip, Operand(mask_scratch)); | 3948 // Test the first bit |
3712 b(first_bit == 1 ? eq : ne, &other_color); | 3949 and_(r0, ip, mask_scratch, SetRC); |
3713 // Shift left 1 by adding. | 3950 b(first_bit == 1 ? eq : ne, &other_color, cr0); |
3714 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); | 3951 // Shift left 1 |
3715 b(eq, &word_boundary); | 3952 // May need to load the next cell |
3716 tst(ip, Operand(mask_scratch)); | 3953 slwi(mask_scratch, mask_scratch, Operand(1), SetRC); |
3717 b(second_bit == 1 ? ne : eq, has_color); | 3954 beq(&word_boundary, cr0); |
3718 jmp(&other_color); | 3955 // Test the second bit |
| 3956 and_(r0, ip, mask_scratch, SetRC); |
| 3957 b(second_bit == 1 ? ne : eq, has_color, cr0); |
| 3958 b(&other_color); |
3719 | 3959 |
3720 bind(&word_boundary); | 3960 bind(&word_boundary); |
3721 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); | 3961 lwz(ip, MemOperand(bitmap_scratch, |
3722 tst(ip, Operand(1)); | 3962 MemoryChunk::kHeaderSize + kIntSize)); |
3723 b(second_bit == 1 ? ne : eq, has_color); | 3963 andi(r0, ip, Operand(1)); |
| 3964 b(second_bit == 1 ? ne : eq, has_color, cr0); |
3724 bind(&other_color); | 3965 bind(&other_color); |
3725 } | 3966 } |
3726 | 3967 |
3727 | 3968 |
3728 // Detect some, but not all, common pointer-free objects. This is used by the | 3969 // Detect some, but not all, common pointer-free objects. This is used by the |
3729 // incremental write barrier which doesn't care about oddballs (they are always | 3970 // incremental write barrier which doesn't care about oddballs (they are always |
3730 // marked black immediately so this code is not hit). | 3971 // marked black immediately so this code is not hit). |
3731 void MacroAssembler::JumpIfDataObject(Register value, | 3972 void MacroAssembler::JumpIfDataObject(Register value, |
3732 Register scratch, | 3973 Register scratch, |
3733 Label* not_data_object) { | 3974 Label* not_data_object) { |
3734 Label is_data_object; | 3975 Label is_data_object; |
3735 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); | 3976 LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); |
3736 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); | 3977 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); |
3737 b(eq, &is_data_object); | 3978 beq(&is_data_object); |
3738 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 3979 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
3739 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 3980 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
3740 // If it's a string and it's not a cons string then it's an object containing | 3981 // If it's a string and it's not a cons string then it's an object containing |
3741 // no GC pointers. | 3982 // no GC pointers. |
3742 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 3983 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
3743 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); | 3984 STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81); |
3744 b(ne, not_data_object); | 3985 andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
| 3986 bne(not_data_object, cr0); |
3745 bind(&is_data_object); | 3987 bind(&is_data_object); |
3746 } | 3988 } |
3747 | 3989 |
3748 | 3990 |
3749 void MacroAssembler::GetMarkBits(Register addr_reg, | 3991 void MacroAssembler::GetMarkBits(Register addr_reg, |
3750 Register bitmap_reg, | 3992 Register bitmap_reg, |
3751 Register mask_reg) { | 3993 Register mask_reg) { |
3752 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); | 3994 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); |
3753 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); | 3995 ASSERT((~Page::kPageAlignmentMask & 0xffff) == 0); |
3754 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | 3996 lis(r0, Operand((~Page::kPageAlignmentMask >> 16))); |
| 3997 and_(bitmap_reg, addr_reg, r0); |
3755 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | 3998 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
3756 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); | 3999 ExtractBitRange(mask_reg, addr_reg, |
3757 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); | 4000 kLowBits - 1, |
3758 mov(ip, Operand(1)); | 4001 kPointerSizeLog2); |
3759 mov(mask_reg, Operand(ip, LSL, mask_reg)); | 4002 ExtractBitRange(ip, addr_reg, |
| 4003 kPageSizeBits - 1, |
| 4004 kLowBits); |
| 4005 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2)); |
| 4006 add(bitmap_reg, bitmap_reg, ip); |
| 4007 li(ip, Operand(1)); |
| 4008 slw(mask_reg, ip, mask_reg); |
3760 } | 4009 } |
3761 | 4010 |
3762 | 4011 |
3763 void MacroAssembler::EnsureNotWhite( | 4012 void MacroAssembler::EnsureNotWhite( |
3764 Register value, | 4013 Register value, |
3765 Register bitmap_scratch, | 4014 Register bitmap_scratch, |
3766 Register mask_scratch, | 4015 Register mask_scratch, |
3767 Register load_scratch, | 4016 Register load_scratch, |
3768 Label* value_is_white_and_not_data) { | 4017 Label* value_is_white_and_not_data) { |
3769 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); | 4018 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); |
3770 GetMarkBits(value, bitmap_scratch, mask_scratch); | 4019 GetMarkBits(value, bitmap_scratch, mask_scratch); |
3771 | 4020 |
3772 // If the value is black or grey we don't need to do anything. | 4021 // If the value is black or grey we don't need to do anything. |
3773 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | 4022 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
3774 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 4023 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
3775 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | 4024 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
3776 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | 4025 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
3777 | 4026 |
3778 Label done; | 4027 Label done; |
3779 | 4028 |
3780 // Since both black and grey have a 1 in the first position and white does | 4029 // Since both black and grey have a 1 in the first position and white does |
3781 // not have a 1 there we only need to check one bit. | 4030 // not have a 1 there we only need to check one bit. |
3782 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 4031 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3783 tst(mask_scratch, load_scratch); | 4032 and_(r0, mask_scratch, load_scratch, SetRC); |
3784 b(ne, &done); | 4033 bne(&done, cr0); |
3785 | 4034 |
3786 if (emit_debug_code()) { | 4035 if (emit_debug_code()) { |
3787 // Check for impossible bit pattern. | 4036 // Check for impossible bit pattern. |
3788 Label ok; | 4037 Label ok; |
3789 // LSL may overflow, making the check conservative. | 4038 // LSL may overflow, making the check conservative. |
3790 tst(load_scratch, Operand(mask_scratch, LSL, 1)); | 4039 slwi(r0, mask_scratch, Operand(1)); |
3791 b(eq, &ok); | 4040 and_(r0, load_scratch, r0, SetRC); |
| 4041 beq(&ok, cr0); |
3792 stop("Impossible marking bit pattern"); | 4042 stop("Impossible marking bit pattern"); |
3793 bind(&ok); | 4043 bind(&ok); |
3794 } | 4044 } |
3795 | 4045 |
3796 // Value is white. We check whether it is data that doesn't need scanning. | 4046 // Value is white. We check whether it is data that doesn't need scanning. |
3797 // Currently only checks for HeapNumber and non-cons strings. | 4047 // Currently only checks for HeapNumber and non-cons strings. |
3798 Register map = load_scratch; // Holds map while checking type. | 4048 Register map = load_scratch; // Holds map while checking type. |
3799 Register length = load_scratch; // Holds length of object after testing type. | 4049 Register length = load_scratch; // Holds length of object after testing type. |
3800 Label is_data_object; | 4050 Label is_data_object, maybe_string_object, is_string_object, is_encoded; |
| 4051 #if V8_TARGET_ARCH_PPC64 |
| 4052 Label length_computed; |
| 4053 #endif |
| 4054 |
3801 | 4055 |
3802 // Check for heap-number | 4056 // Check for heap-number |
3803 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | 4057 LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
3804 CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 4058 CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
3805 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); | 4059 bne(&maybe_string_object); |
3806 b(eq, &is_data_object); | 4060 li(length, Operand(HeapNumber::kSize)); |
| 4061 b(&is_data_object); |
| 4062 bind(&maybe_string_object); |
3807 | 4063 |
3808 // Check for strings. | 4064 // Check for strings. |
3809 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 4065 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
3810 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 4066 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
3811 // If it's a string and it's not a cons string then it's an object containing | 4067 // If it's a string and it's not a cons string then it's an object containing |
3812 // no GC pointers. | 4068 // no GC pointers. |
3813 Register instance_type = load_scratch; | 4069 Register instance_type = load_scratch; |
3814 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 4070 lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
3815 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); | 4071 andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
3816 b(ne, value_is_white_and_not_data); | 4072 bne(value_is_white_and_not_data, cr0); |
3817 // It's a non-indirect (non-cons and non-slice) string. | 4073 // It's a non-indirect (non-cons and non-slice) string. |
3818 // If it's external, the length is just ExternalString::kSize. | 4074 // If it's external, the length is just ExternalString::kSize. |
3819 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). | 4075 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). |
3820 // External strings are the only ones with the kExternalStringTag bit | 4076 // External strings are the only ones with the kExternalStringTag bit |
3821 // set. | 4077 // set. |
3822 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); | 4078 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); |
3823 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); | 4079 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); |
3824 tst(instance_type, Operand(kExternalStringTag)); | 4080 andi(r0, instance_type, Operand(kExternalStringTag)); |
3825 mov(length, Operand(ExternalString::kSize), LeaveCC, ne); | 4081 beq(&is_string_object, cr0); |
3826 b(ne, &is_data_object); | 4082 li(length, Operand(ExternalString::kSize)); |
| 4083 b(&is_data_object); |
| 4084 bind(&is_string_object); |
3827 | 4085 |
3828 // Sequential string, either ASCII or UC16. | 4086 // Sequential string, either ASCII or UC16. |
3829 // For ASCII (char-size of 1) we shift the smi tag away to get the length. | 4087 // For ASCII (char-size of 1) we untag the smi to get the length. |
3830 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby | 4088 // For UC16 (char-size of 2): |
3831 // getting the length multiplied by 2. | 4089 // - (32-bit) we just leave the smi tag in place, thereby getting |
| 4090 // the length multiplied by 2. |
| 4091 // - (64-bit) we compute the offset in the 2-byte array |
3832 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); | 4092 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); |
3833 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 4093 LoadP(ip, FieldMemOperand(value, String::kLengthOffset)); |
3834 ldr(ip, FieldMemOperand(value, String::kLengthOffset)); | 4094 andi(r0, instance_type, Operand(kStringEncodingMask)); |
3835 tst(instance_type, Operand(kStringEncodingMask)); | 4095 beq(&is_encoded, cr0); |
3836 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); | 4096 SmiUntag(ip); |
3837 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); | 4097 #if V8_TARGET_ARCH_PPC64 |
3838 and_(length, length, Operand(~kObjectAlignmentMask)); | 4098 b(&length_computed); |
| 4099 #endif |
| 4100 bind(&is_encoded); |
| 4101 #if V8_TARGET_ARCH_PPC64 |
| 4102 SmiToShortArrayOffset(ip, ip); |
| 4103 bind(&length_computed); |
| 4104 #else |
| 4105 ASSERT(kSmiShift == 1); |
| 4106 #endif |
| 4107 addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); |
| 4108 li(r0, Operand(~kObjectAlignmentMask)); |
| 4109 and_(length, length, r0); |
3839 | 4110 |
3840 bind(&is_data_object); | 4111 bind(&is_data_object); |
3841 // Value is a data object, and it is white. Mark it black. Since we know | 4112 // Value is a data object, and it is white. Mark it black. Since we know |
3842 // that the object is white we can make it black by flipping one bit. | 4113 // that the object is white we can make it black by flipping one bit. |
3843 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 4114 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3844 orr(ip, ip, Operand(mask_scratch)); | 4115 orx(ip, ip, mask_scratch); |
3845 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 4116 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3846 | 4117 |
3847 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); | 4118 mov(ip, Operand(~Page::kPageAlignmentMask)); |
3848 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | 4119 and_(bitmap_scratch, bitmap_scratch, ip); |
3849 add(ip, ip, Operand(length)); | 4120 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
3850 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | 4121 add(ip, ip, length); |
| 4122 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
3851 | 4123 |
3852 bind(&done); | 4124 bind(&done); |
3853 } | 4125 } |
3854 | 4126 |
3855 | 4127 |
| 4128 // Saturate a value into 8-bit unsigned integer |
| 4129 // if input_value < 0, output_value is 0 |
| 4130 // if input_value > 255, output_value is 255 |
| 4131 // otherwise output_value is the input_value |
3856 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { | 4132 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { |
3857 Usat(output_reg, 8, Operand(input_reg)); | 4133 Label done, negative_label, overflow_label; |
| 4134 int satval = (1 << 8) - 1; |
| 4135 |
| 4136 cmpi(input_reg, Operand::Zero()); |
| 4137 blt(&negative_label); |
| 4138 |
| 4139 cmpi(input_reg, Operand(satval)); |
| 4140 bgt(&overflow_label); |
| 4141 if (!output_reg.is(input_reg)) { |
| 4142 mr(output_reg, input_reg); |
| 4143 } |
| 4144 b(&done); |
| 4145 |
| 4146 bind(&negative_label); |
| 4147 li(output_reg, Operand::Zero()); // set to 0 if negative |
| 4148 b(&done); |
| 4149 |
| 4150 |
| 4151 bind(&overflow_label); // set to satval if > satval |
| 4152 li(output_reg, Operand(satval)); |
| 4153 |
| 4154 bind(&done); |
| 4155 } |
| 4156 |
| 4157 |
| 4158 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { |
| 4159 mtfsfi(7, RN); |
| 4160 } |
| 4161 |
| 4162 |
| 4163 void MacroAssembler::ResetRoundingMode() { |
| 4164 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest) |
3858 } | 4165 } |
3859 | 4166 |
3860 | 4167 |
3861 void MacroAssembler::ClampDoubleToUint8(Register result_reg, | 4168 void MacroAssembler::ClampDoubleToUint8(Register result_reg, |
3862 DwVfpRegister input_reg, | 4169 DoubleRegister input_reg, |
3863 LowDwVfpRegister double_scratch) { | 4170 DoubleRegister double_scratch) { |
| 4171 Label above_zero; |
3864 Label done; | 4172 Label done; |
| 4173 Label in_bounds; |
3865 | 4174 |
3866 // Handle inputs >= 255 (including +infinity). | 4175 LoadDoubleLiteral(double_scratch, 0.0, result_reg); |
3867 Vmov(double_scratch, 255.0, result_reg); | 4176 fcmpu(input_reg, double_scratch); |
3868 mov(result_reg, Operand(255)); | 4177 bgt(&above_zero); |
3869 VFPCompareAndSetFlags(input_reg, double_scratch); | |
3870 b(ge, &done); | |
3871 | 4178 |
3872 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest | 4179 // Double value is less than zero, NaN or Inf, return 0. |
3873 // rounding mode will provide the correct result. | 4180 LoadIntLiteral(result_reg, 0); |
3874 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding); | 4181 b(&done); |
3875 vmov(result_reg, double_scratch.low()); | 4182 |
| 4183 // Double value is >= 255, return 255. |
| 4184 bind(&above_zero); |
| 4185 LoadDoubleLiteral(double_scratch, 255.0, result_reg); |
| 4186 fcmpu(input_reg, double_scratch); |
| 4187 ble(&in_bounds); |
| 4188 LoadIntLiteral(result_reg, 255); |
| 4189 b(&done); |
| 4190 |
| 4191 // In 0-255 range, round and truncate. |
| 4192 bind(&in_bounds); |
| 4193 |
| 4194 // round to nearest (default rounding mode) |
| 4195 fctiw(double_scratch, input_reg); |
| 4196 |
| 4197 // reserve a slot on the stack |
| 4198 stfdu(double_scratch, MemOperand(sp, -kDoubleSize)); |
| 4199 nop(); // LHS/RAW optimization |
| 4200 lwz(result_reg, MemOperand(sp, Register::kMantissaOffset)); |
| 4201 // restore the stack |
| 4202 addi(sp, sp, Operand(kDoubleSize)); |
3876 | 4203 |
3877 bind(&done); | 4204 bind(&done); |
3878 } | 4205 } |
3879 | 4206 |
3880 | 4207 |
3881 void MacroAssembler::LoadInstanceDescriptors(Register map, | 4208 void MacroAssembler::LoadInstanceDescriptors(Register map, |
3882 Register descriptors) { | 4209 Register descriptors) { |
3883 ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); | 4210 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); |
3884 } | 4211 } |
3885 | 4212 |
3886 | 4213 |
3887 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { | 4214 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { |
3888 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 4215 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
3889 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); | 4216 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); |
3890 } | 4217 } |
3891 | 4218 |
3892 | 4219 |
3893 void MacroAssembler::EnumLength(Register dst, Register map) { | 4220 void MacroAssembler::EnumLength(Register dst, Register map) { |
3894 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | 4221 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); |
3895 ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 4222 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
3896 and_(dst, dst, Operand(Map::EnumLengthBits::kMask)); | 4223 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask); |
3897 SmiTag(dst); | 4224 SmiTag(dst); |
3898 } | 4225 } |
3899 | 4226 |
3900 | 4227 |
3901 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { | 4228 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { |
3902 Register empty_fixed_array_value = r6; | 4229 Register empty_fixed_array_value = r9; |
3903 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); | 4230 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); |
3904 Label next, start; | 4231 Label next, start; |
3905 mov(r2, r0); | 4232 mr(r5, r3); |
3906 | 4233 |
3907 // Check if the enum length field is properly initialized, indicating that | 4234 // Check if the enum length field is properly initialized, indicating that |
3908 // there is an enum cache. | 4235 // there is an enum cache. |
3909 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); | 4236 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); |
3910 | 4237 |
3911 EnumLength(r3, r1); | 4238 EnumLength(r6, r4); |
3912 cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel))); | 4239 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0); |
3913 b(eq, call_runtime); | 4240 beq(call_runtime); |
3914 | 4241 |
3915 jmp(&start); | 4242 b(&start); |
3916 | 4243 |
3917 bind(&next); | 4244 bind(&next); |
3918 ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); | 4245 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); |
3919 | 4246 |
3920 // For all objects but the receiver, check that the cache is empty. | 4247 // For all objects but the receiver, check that the cache is empty. |
3921 EnumLength(r3, r1); | 4248 EnumLength(r6, r4); |
3922 cmp(r3, Operand(Smi::FromInt(0))); | 4249 CmpSmiLiteral(r6, Smi::FromInt(0), r0); |
3923 b(ne, call_runtime); | 4250 bne(call_runtime); |
3924 | 4251 |
3925 bind(&start); | 4252 bind(&start); |
3926 | 4253 |
3927 // Check that there are no elements. Register r2 contains the current JS | 4254 // Check that there are no elements. Register r5 contains the current JS |
3928 // object we've reached through the prototype chain. | 4255 // object we've reached through the prototype chain. |
3929 Label no_elements; | 4256 Label no_elements; |
3930 ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset)); | 4257 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset)); |
3931 cmp(r2, empty_fixed_array_value); | 4258 cmp(r5, empty_fixed_array_value); |
3932 b(eq, &no_elements); | 4259 beq(&no_elements); |
3933 | 4260 |
3934 // Second chance, the object may be using the empty slow element dictionary. | 4261 // Second chance, the object may be using the empty slow element dictionary. |
3935 CompareRoot(r2, Heap::kEmptySlowElementDictionaryRootIndex); | 4262 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex); |
3936 b(ne, call_runtime); | 4263 bne(call_runtime); |
3937 | 4264 |
3938 bind(&no_elements); | 4265 bind(&no_elements); |
3939 ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset)); | 4266 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset)); |
3940 cmp(r2, null_value); | 4267 cmp(r5, null_value); |
3941 b(ne, &next); | 4268 bne(&next); |
3942 } | 4269 } |
3943 | 4270 |
3944 | 4271 |
| 4272 //////////////////////////////////////////////////////////////////////////////// |
| 4273 // |
| 4274 // New MacroAssembler Interfaces added for PPC |
| 4275 // |
| 4276 //////////////////////////////////////////////////////////////////////////////// |
| 4277 void MacroAssembler::LoadIntLiteral(Register dst, int value) { |
| 4278 mov(dst, Operand(value)); |
| 4279 } |
| 4280 |
| 4281 |
| 4282 void MacroAssembler::LoadSmiLiteral(Register dst, Smi *smi) { |
| 4283 mov(dst, Operand(smi)); |
| 4284 } |
| 4285 |
| 4286 |
| 4287 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, |
| 4288 double value, |
| 4289 Register scratch) { |
| 4290 #if V8_OOL_CONSTANT_POOL |
| 4291 // TODO(mbrandy): enable extended constant pool usage for doubles. |
| 4292 // See ARM commit e27ab337 for a reference. |
| 4293 if (is_constant_pool_available() && !use_extended_constant_pool()) { |
| 4294 RelocInfo rinfo(pc_, value); |
| 4295 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); |
| 4296 if (section == ConstantPoolArray::EXTENDED_SECTION) { |
| 4297 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 4298 addis(scratch, kConstantPoolRegister, Operand::Zero()); |
| 4299 lfd(result, MemOperand(scratch, 0)); |
| 4300 } else { |
| 4301 ASSERT(section == ConstantPoolArray::SMALL_SECTION); |
| 4302 #if V8_TARGET_ARCH_PPC64 |
| 4303 // We use 2 instruction sequence here for consistency with mov. |
| 4304 li(scratch, Operand::Zero()); |
| 4305 lfdx(result, MemOperand(kConstantPoolRegister, scratch)); |
| 4306 #else |
| 4307 lfd(result, MemOperand(kConstantPoolRegister, 0)); |
| 4308 #endif |
| 4309 } |
| 4310 return; |
| 4311 } |
| 4312 #endif |
| 4313 |
| 4314 addi(sp, sp, Operand(-8)); // reserve 1 temp double on the stack |
| 4315 |
| 4316 // avoid gcc strict aliasing error using union cast |
| 4317 union { |
| 4318 double dval; |
| 4319 #if V8_TARGET_ARCH_PPC64 |
| 4320 intptr_t ival; |
| 4321 #else |
| 4322 intptr_t ival[2]; |
| 4323 #endif |
| 4324 } litVal; |
| 4325 |
| 4326 litVal.dval = value; |
| 4327 #if V8_TARGET_ARCH_PPC64 |
| 4328 mov(scratch, Operand(litVal.ival)); |
| 4329 std(scratch, MemOperand(sp)); |
| 4330 #else |
| 4331 LoadIntLiteral(scratch, litVal.ival[0]); |
| 4332 stw(scratch, MemOperand(sp, 0)); |
| 4333 LoadIntLiteral(scratch, litVal.ival[1]); |
| 4334 stw(scratch, MemOperand(sp, 4)); |
| 4335 #endif |
| 4336 nop(); // LHS/RAW optimization |
| 4337 lfd(result, MemOperand(sp, 0)); |
| 4338 |
| 4339 addi(sp, sp, Operand(8)); // restore the stack ptr |
| 4340 } |
| 4341 |
| 4342 |
| 4343 void MacroAssembler::Add(Register dst, Register src, |
| 4344 intptr_t value, Register scratch) { |
| 4345 if (is_int16(value)) { |
| 4346 addi(dst, src, Operand(value)); |
| 4347 } else { |
| 4348 mov(scratch, Operand(value)); |
| 4349 add(dst, src, scratch); |
| 4350 } |
| 4351 } |
| 4352 |
| 4353 |
| 4354 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch, |
| 4355 CRegister cr) { |
| 4356 intptr_t value = src2.immediate(); |
| 4357 if (is_int16(value)) { |
| 4358 cmpi(src1, src2, cr); |
| 4359 } else { |
| 4360 mov(scratch, src2); |
| 4361 cmp(src1, scratch, cr); |
| 4362 } |
| 4363 } |
| 4364 |
| 4365 |
| 4366 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch, |
| 4367 CRegister cr) { |
| 4368 intptr_t value = src2.immediate(); |
| 4369 if (is_uint16(value)) { |
| 4370 cmpli(src1, src2, cr); |
| 4371 } else { |
| 4372 mov(scratch, src2); |
| 4373 cmpl(src1, scratch, cr); |
| 4374 } |
| 4375 } |
| 4376 |
| 4377 |
| 4378 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, |
| 4379 Register scratch, CRegister cr) { |
| 4380 intptr_t value = src2.immediate(); |
| 4381 if (is_int16(value)) { |
| 4382 cmpwi(src1, src2, cr); |
| 4383 } else { |
| 4384 mov(scratch, src2); |
| 4385 cmpw(src1, scratch, cr); |
| 4386 } |
| 4387 } |
| 4388 |
| 4389 |
| 4390 void MacroAssembler::Cmplwi(Register src1, const Operand& src2, |
| 4391 Register scratch, CRegister cr) { |
| 4392 intptr_t value = src2.immediate(); |
| 4393 if (is_uint16(value)) { |
| 4394 cmplwi(src1, src2, cr); |
| 4395 } else { |
| 4396 mov(scratch, src2); |
| 4397 cmplw(src1, scratch, cr); |
| 4398 } |
| 4399 } |
| 4400 |
| 4401 |
| 4402 void MacroAssembler::And(Register ra, Register rs, const Operand& rb, |
| 4403 RCBit rc) { |
| 4404 if (rb.is_reg()) { |
| 4405 and_(ra, rs, rb.rm(), rc); |
| 4406 } else { |
| 4407 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) { |
| 4408 andi(ra, rs, rb); |
| 4409 } else { |
| 4410 // mov handles the relocation. |
| 4411 ASSERT(!rs.is(r0)); |
| 4412 mov(r0, rb); |
| 4413 and_(ra, rs, r0, rc); |
| 4414 } |
| 4415 } |
| 4416 } |
| 4417 |
| 4418 |
| 4419 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) { |
| 4420 if (rb.is_reg()) { |
| 4421 orx(ra, rs, rb.rm(), rc); |
| 4422 } else { |
| 4423 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) { |
| 4424 ori(ra, rs, rb); |
| 4425 } else { |
| 4426 // mov handles the relocation. |
| 4427 ASSERT(!rs.is(r0)); |
| 4428 mov(r0, rb); |
| 4429 orx(ra, rs, r0, rc); |
| 4430 } |
| 4431 } |
| 4432 } |
| 4433 |
| 4434 |
| 4435 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb, |
| 4436 RCBit rc) { |
| 4437 if (rb.is_reg()) { |
| 4438 xor_(ra, rs, rb.rm(), rc); |
| 4439 } else { |
| 4440 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) { |
| 4441 xori(ra, rs, rb); |
| 4442 } else { |
| 4443 // mov handles the relocation. |
| 4444 ASSERT(!rs.is(r0)); |
| 4445 mov(r0, rb); |
| 4446 xor_(ra, rs, r0, rc); |
| 4447 } |
| 4448 } |
| 4449 } |
| 4450 |
| 4451 |
| 4452 void MacroAssembler::CmpSmiLiteral(Register src1, Smi *smi, Register scratch, |
| 4453 CRegister cr) { |
| 4454 #if V8_TARGET_ARCH_PPC64 |
| 4455 LoadSmiLiteral(scratch, smi); |
| 4456 cmp(src1, scratch, cr); |
| 4457 #else |
| 4458 Cmpi(src1, Operand(smi), scratch, cr); |
| 4459 #endif |
| 4460 } |
| 4461 |
| 4462 |
| 4463 void MacroAssembler::CmplSmiLiteral(Register src1, Smi *smi, Register scratch, |
| 4464 CRegister cr) { |
| 4465 #if V8_TARGET_ARCH_PPC64 |
| 4466 LoadSmiLiteral(scratch, smi); |
| 4467 cmpl(src1, scratch, cr); |
| 4468 #else |
| 4469 Cmpli(src1, Operand(smi), scratch, cr); |
| 4470 #endif |
| 4471 } |
| 4472 |
| 4473 |
| 4474 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi *smi, |
| 4475 Register scratch) { |
| 4476 #if V8_TARGET_ARCH_PPC64 |
| 4477 LoadSmiLiteral(scratch, smi); |
| 4478 add(dst, src, scratch); |
| 4479 #else |
| 4480 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch); |
| 4481 #endif |
| 4482 } |
| 4483 |
| 4484 |
| 4485 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi *smi, |
| 4486 Register scratch) { |
| 4487 #if V8_TARGET_ARCH_PPC64 |
| 4488 LoadSmiLiteral(scratch, smi); |
| 4489 sub(dst, src, scratch); |
| 4490 #else |
| 4491 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch); |
| 4492 #endif |
| 4493 } |
| 4494 |
| 4495 |
| 4496 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi *smi, |
| 4497 Register scratch, RCBit rc) { |
| 4498 #if V8_TARGET_ARCH_PPC64 |
| 4499 LoadSmiLiteral(scratch, smi); |
| 4500 and_(dst, src, scratch, rc); |
| 4501 #else |
| 4502 And(dst, src, Operand(smi), rc); |
| 4503 #endif |
| 4504 } |
| 4505 |
| 4506 |
| 4507 // Load a "pointer" sized value from the memory location |
| 4508 void MacroAssembler::LoadP(Register dst, const MemOperand& mem, |
| 4509 Register scratch) { |
| 4510 int offset = mem.offset(); |
| 4511 |
| 4512 if (!scratch.is(no_reg) && !is_int16(offset)) { |
| 4513 /* cannot use d-form */ |
| 4514 LoadIntLiteral(scratch, offset); |
| 4515 #if V8_TARGET_ARCH_PPC64 |
| 4516 ldx(dst, MemOperand(mem.ra(), scratch)); |
| 4517 #else |
| 4518 lwzx(dst, MemOperand(mem.ra(), scratch)); |
| 4519 #endif |
| 4520 } else { |
| 4521 #if V8_TARGET_ARCH_PPC64 |
| 4522 int misaligned = (offset & 3); |
| 4523 if (misaligned) { |
| 4524 // adjust base to conform to offset alignment requirements |
| 4525 // Todo: enhance to use scratch if dst is unsuitable |
| 4526 ASSERT(!dst.is(r0)); |
| 4527 addi(dst, mem.ra(), Operand((offset & 3) - 4)); |
| 4528 ld(dst, MemOperand(dst, (offset & ~3) + 4)); |
| 4529 } else { |
| 4530 ld(dst, mem); |
| 4531 } |
| 4532 #else |
| 4533 lwz(dst, mem); |
| 4534 #endif |
| 4535 } |
| 4536 } |
| 4537 |
| 4538 |
| 4539 // Store a "pointer" sized value to the memory location |
| 4540 void MacroAssembler::StoreP(Register src, const MemOperand& mem, |
| 4541 Register scratch) { |
| 4542 int offset = mem.offset(); |
| 4543 |
| 4544 if (!scratch.is(no_reg) && !is_int16(offset)) { |
| 4545 /* cannot use d-form */ |
| 4546 LoadIntLiteral(scratch, offset); |
| 4547 #if V8_TARGET_ARCH_PPC64 |
| 4548 stdx(src, MemOperand(mem.ra(), scratch)); |
| 4549 #else |
| 4550 stwx(src, MemOperand(mem.ra(), scratch)); |
| 4551 #endif |
| 4552 } else { |
| 4553 #if V8_TARGET_ARCH_PPC64 |
| 4554 int misaligned = (offset & 3); |
| 4555 if (misaligned) { |
| 4556 // adjust base to conform to offset alignment requirements |
| 4557 // a suitable scratch is required here |
| 4558 ASSERT(!scratch.is(no_reg)); |
| 4559 if (scratch.is(r0)) { |
| 4560 LoadIntLiteral(scratch, offset); |
| 4561 stdx(src, MemOperand(mem.ra(), scratch)); |
| 4562 } else { |
| 4563 addi(scratch, mem.ra(), Operand((offset & 3) - 4)); |
| 4564 std(src, MemOperand(scratch, (offset & ~3) + 4)); |
| 4565 } |
| 4566 } else { |
| 4567 std(src, mem); |
| 4568 } |
| 4569 #else |
| 4570 stw(src, mem); |
| 4571 #endif |
| 4572 } |
| 4573 } |
| 4574 |
| 4575 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem, |
| 4576 Register scratch) { |
| 4577 int offset = mem.offset(); |
| 4578 |
| 4579 if (!scratch.is(no_reg) && !is_int16(offset)) { |
| 4580 /* cannot use d-form */ |
| 4581 LoadIntLiteral(scratch, offset); |
| 4582 #if V8_TARGET_ARCH_PPC64 |
| 4583 // lwax(dst, MemOperand(mem.ra(), scratch)); |
| 4584 ASSERT(0); // lwax not yet implemented |
| 4585 #else |
| 4586 lwzx(dst, MemOperand(mem.ra(), scratch)); |
| 4587 #endif |
| 4588 } else { |
| 4589 #if V8_TARGET_ARCH_PPC64 |
| 4590 int misaligned = (offset & 3); |
| 4591 if (misaligned) { |
| 4592 // adjust base to conform to offset alignment requirements |
| 4593 // Todo: enhance to use scratch if dst is unsuitable |
| 4594 ASSERT(!dst.is(r0)); |
| 4595 addi(dst, mem.ra(), Operand((offset & 3) - 4)); |
| 4596 lwa(dst, MemOperand(dst, (offset & ~3) + 4)); |
| 4597 } else { |
| 4598 lwa(dst, mem); |
| 4599 } |
| 4600 #else |
| 4601 lwz(dst, mem); |
| 4602 #endif |
| 4603 } |
| 4604 } |
| 4605 |
| 4606 |
| 4607 // Variable length depending on whether offset fits into immediate field |
| 4608 // MemOperand currently only supports d-form |
| 4609 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem, |
| 4610 Register scratch, bool updateForm) { |
| 4611 Register base = mem.ra(); |
| 4612 int offset = mem.offset(); |
| 4613 |
| 4614 bool use_dform = true; |
| 4615 if (!is_int16(offset)) { |
| 4616 use_dform = false; |
| 4617 LoadIntLiteral(scratch, offset); |
| 4618 } |
| 4619 |
| 4620 if (!updateForm) { |
| 4621 if (use_dform) { |
| 4622 lwz(dst, mem); |
| 4623 } else { |
| 4624 lwzx(dst, MemOperand(base, scratch)); |
| 4625 } |
| 4626 } else { |
| 4627 if (use_dform) { |
| 4628 lwzu(dst, mem); |
| 4629 } else { |
| 4630 lwzux(dst, MemOperand(base, scratch)); |
| 4631 } |
| 4632 } |
| 4633 } |
| 4634 |
| 4635 |
| 4636 // Variable length depending on whether offset fits into immediate field |
| 4637 // MemOperand current only supports d-form |
| 4638 void MacroAssembler::StoreWord(Register src, const MemOperand& mem, |
| 4639 Register scratch, bool updateForm) { |
| 4640 Register base = mem.ra(); |
| 4641 int offset = mem.offset(); |
| 4642 |
| 4643 bool use_dform = true; |
| 4644 if (!is_int16(offset)) { |
| 4645 use_dform = false; |
| 4646 LoadIntLiteral(scratch, offset); |
| 4647 } |
| 4648 |
| 4649 if (!updateForm) { |
| 4650 if (use_dform) { |
| 4651 stw(src, mem); |
| 4652 } else { |
| 4653 stwx(src, MemOperand(base, scratch)); |
| 4654 } |
| 4655 } else { |
| 4656 if (use_dform) { |
| 4657 stwu(src, mem); |
| 4658 } else { |
| 4659 stwux(src, MemOperand(base, scratch)); |
| 4660 } |
| 4661 } |
| 4662 } |
| 4663 |
| 4664 |
| 4665 // Variable length depending on whether offset fits into immediate field |
| 4666 // MemOperand currently only supports d-form |
| 4667 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem, |
| 4668 Register scratch, bool updateForm) { |
| 4669 Register base = mem.ra(); |
| 4670 int offset = mem.offset(); |
| 4671 |
| 4672 bool use_dform = true; |
| 4673 if (!is_int16(offset)) { |
| 4674 use_dform = false; |
| 4675 LoadIntLiteral(scratch, offset); |
| 4676 } |
| 4677 |
| 4678 if (!updateForm) { |
| 4679 if (use_dform) { |
| 4680 lhz(dst, mem); |
| 4681 } else { |
| 4682 lhzx(dst, MemOperand(base, scratch)); |
| 4683 } |
| 4684 } else { |
| 4685 // If updateForm is ever true, then lhzu will |
| 4686 // need to be implemented |
| 4687 assert(0); |
| 4688 #if 0 // LoadHalfWord w\ update not yet needed |
| 4689 if (use_dform) { |
| 4690 lhzu(dst, mem); |
| 4691 } else { |
| 4692 lhzux(dst, MemOperand(base, scratch)); |
| 4693 } |
| 4694 #endif |
| 4695 } |
| 4696 } |
| 4697 |
| 4698 |
| 4699 // Variable length depending on whether offset fits into immediate field |
| 4700 // MemOperand current only supports d-form |
| 4701 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem, |
| 4702 Register scratch, bool updateForm) { |
| 4703 Register base = mem.ra(); |
| 4704 int offset = mem.offset(); |
| 4705 |
| 4706 bool use_dform = true; |
| 4707 if (!is_int16(offset)) { |
| 4708 use_dform = false; |
| 4709 LoadIntLiteral(scratch, offset); |
| 4710 } |
| 4711 |
| 4712 if (!updateForm) { |
| 4713 if (use_dform) { |
| 4714 sth(src, mem); |
| 4715 } else { |
| 4716 sthx(src, MemOperand(base, scratch)); |
| 4717 } |
| 4718 } else { |
| 4719 // If updateForm is ever true, then sthu will |
| 4720 // need to be implemented |
| 4721 assert(0); |
| 4722 #if 0 // StoreHalfWord w\ update not yet needed |
| 4723 if (use_dform) { |
| 4724 sthu(src, mem); |
| 4725 } else { |
| 4726 sthux(src, MemOperand(base, scratch)); |
| 4727 } |
| 4728 #endif |
| 4729 } |
| 4730 } |
| 4731 |
| 4732 |
| 4733 // Variable length depending on whether offset fits into immediate field |
| 4734 // MemOperand currently only supports d-form |
| 4735 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem, |
| 4736 Register scratch, bool updateForm) { |
| 4737 Register base = mem.ra(); |
| 4738 int offset = mem.offset(); |
| 4739 |
| 4740 bool use_dform = true; |
| 4741 if (!is_int16(offset)) { |
| 4742 use_dform = false; |
| 4743 LoadIntLiteral(scratch, offset); |
| 4744 } |
| 4745 |
| 4746 if (!updateForm) { |
| 4747 if (use_dform) { |
| 4748 lbz(dst, mem); |
| 4749 } else { |
| 4750 lbzx(dst, MemOperand(base, scratch)); |
| 4751 } |
| 4752 } else { |
| 4753 // If updateForm is ever true, then lbzu will |
| 4754 // need to be implemented |
| 4755 assert(0); |
| 4756 #if 0 // LoadByte w\ update not yet needed |
| 4757 if (use_dform) { |
| 4758 lbzu(dst, mem); |
| 4759 } else { |
| 4760 lbzux(dst, MemOperand(base, scratch)); |
| 4761 } |
| 4762 #endif |
| 4763 } |
| 4764 } |
| 4765 |
| 4766 |
| 4767 // Variable length depending on whether offset fits into immediate field |
| 4768 // MemOperand current only supports d-form |
| 4769 void MacroAssembler::StoreByte(Register src, const MemOperand& mem, |
| 4770 Register scratch, bool updateForm) { |
| 4771 Register base = mem.ra(); |
| 4772 int offset = mem.offset(); |
| 4773 |
| 4774 bool use_dform = true; |
| 4775 if (!is_int16(offset)) { |
| 4776 use_dform = false; |
| 4777 LoadIntLiteral(scratch, offset); |
| 4778 } |
| 4779 |
| 4780 if (!updateForm) { |
| 4781 if (use_dform) { |
| 4782 stb(src, mem); |
| 4783 } else { |
| 4784 stbx(src, MemOperand(base, scratch)); |
| 4785 } |
| 4786 } else { |
| 4787 // If updateForm is ever true, then stbu will |
| 4788 // need to be implemented |
| 4789 assert(0); |
| 4790 #if 0 // StoreByte w\ update not yet needed |
| 4791 if (use_dform) { |
| 4792 stbu(src, mem); |
| 4793 } else { |
| 4794 stbux(src, MemOperand(base, scratch)); |
| 4795 } |
| 4796 #endif |
| 4797 } |
| 4798 } |
| 4799 |
| 4800 |
| 4801 void MacroAssembler::LoadRepresentation(Register dst, |
| 4802 const MemOperand& mem, |
| 4803 Representation r, |
| 4804 Register scratch) { |
| 4805 ASSERT(!r.IsDouble()); |
| 4806 if (r.IsInteger8()) { |
| 4807 LoadByte(dst, mem, scratch); |
| 4808 extsb(dst, dst); |
| 4809 } else if (r.IsUInteger8()) { |
| 4810 LoadByte(dst, mem, scratch); |
| 4811 } else if (r.IsInteger16()) { |
| 4812 LoadHalfWord(dst, mem, scratch); |
| 4813 extsh(dst, dst); |
| 4814 } else if (r.IsUInteger16()) { |
| 4815 LoadHalfWord(dst, mem, scratch); |
| 4816 #if V8_TARGET_ARCH_PPC64 |
| 4817 } else if (r.IsInteger32()) { |
| 4818 LoadWord(dst, mem, scratch); |
| 4819 #endif |
| 4820 } else { |
| 4821 LoadP(dst, mem, scratch); |
| 4822 } |
| 4823 } |
| 4824 |
| 4825 |
| 4826 void MacroAssembler::StoreRepresentation(Register src, |
| 4827 const MemOperand& mem, |
| 4828 Representation r, |
| 4829 Register scratch) { |
| 4830 ASSERT(!r.IsDouble()); |
| 4831 if (r.IsInteger8() || r.IsUInteger8()) { |
| 4832 StoreByte(src, mem, scratch); |
| 4833 } else if (r.IsInteger16() || r.IsUInteger16()) { |
| 4834 StoreHalfWord(src, mem, scratch); |
| 4835 #if V8_TARGET_ARCH_PPC64 |
| 4836 } else if (r.IsInteger32()) { |
| 4837 StoreWord(src, mem, scratch); |
| 4838 #endif |
| 4839 } else { |
| 4840 if (r.IsHeapObject()) { |
| 4841 AssertNotSmi(src); |
| 4842 } else if (r.IsSmi()) { |
| 4843 AssertSmi(src); |
| 4844 } |
| 4845 StoreP(src, mem, scratch); |
| 4846 } |
| 4847 } |
| 4848 |
| 4849 |
3945 void MacroAssembler::TestJSArrayForAllocationMemento( | 4850 void MacroAssembler::TestJSArrayForAllocationMemento( |
3946 Register receiver_reg, | 4851 Register receiver_reg, |
3947 Register scratch_reg, | 4852 Register scratch_reg, |
3948 Label* no_memento_found) { | 4853 Label* no_memento_found) { |
3949 ExternalReference new_space_start = | 4854 ExternalReference new_space_start = |
3950 ExternalReference::new_space_start(isolate()); | 4855 ExternalReference::new_space_start(isolate()); |
3951 ExternalReference new_space_allocation_top = | 4856 ExternalReference new_space_allocation_top = |
3952 ExternalReference::new_space_allocation_top_address(isolate()); | 4857 ExternalReference::new_space_allocation_top_address(isolate()); |
3953 add(scratch_reg, receiver_reg, | 4858 addi(scratch_reg, receiver_reg, |
3954 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); | 4859 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); |
3955 cmp(scratch_reg, Operand(new_space_start)); | 4860 Cmpi(scratch_reg, Operand(new_space_start), r0); |
3956 b(lt, no_memento_found); | 4861 blt(no_memento_found); |
3957 mov(ip, Operand(new_space_allocation_top)); | 4862 mov(ip, Operand(new_space_allocation_top)); |
3958 ldr(ip, MemOperand(ip)); | 4863 LoadP(ip, MemOperand(ip)); |
3959 cmp(scratch_reg, ip); | 4864 cmp(scratch_reg, ip); |
3960 b(gt, no_memento_found); | 4865 bgt(no_memento_found); |
3961 ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); | 4866 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); |
3962 cmp(scratch_reg, | 4867 Cmpi(scratch_reg, |
3963 Operand(isolate()->factory()->allocation_memento_map())); | 4868 Operand(isolate()->factory()->allocation_memento_map()), r0); |
3964 } | 4869 } |
3965 | 4870 |
3966 | 4871 |
3967 Register GetRegisterThatIsNotOneOf(Register reg1, | 4872 Register GetRegisterThatIsNotOneOf(Register reg1, |
3968 Register reg2, | 4873 Register reg2, |
3969 Register reg3, | 4874 Register reg3, |
3970 Register reg4, | 4875 Register reg4, |
3971 Register reg5, | 4876 Register reg5, |
3972 Register reg6) { | 4877 Register reg6) { |
3973 RegList regs = 0; | 4878 RegList regs = 0; |
(...skipping 18 matching lines...) Expand all Loading... |
3992 Register object, | 4897 Register object, |
3993 Register scratch0, | 4898 Register scratch0, |
3994 Register scratch1, | 4899 Register scratch1, |
3995 Label* found) { | 4900 Label* found) { |
3996 ASSERT(!scratch1.is(scratch0)); | 4901 ASSERT(!scratch1.is(scratch0)); |
3997 Factory* factory = isolate()->factory(); | 4902 Factory* factory = isolate()->factory(); |
3998 Register current = scratch0; | 4903 Register current = scratch0; |
3999 Label loop_again; | 4904 Label loop_again; |
4000 | 4905 |
4001 // scratch contained elements pointer. | 4906 // scratch contained elements pointer. |
4002 mov(current, object); | 4907 mr(current, object); |
4003 | 4908 |
4004 // Loop based on the map going up the prototype chain. | 4909 // Loop based on the map going up the prototype chain. |
4005 bind(&loop_again); | 4910 bind(&loop_again); |
4006 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); | 4911 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset)); |
4007 ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); | 4912 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); |
4008 DecodeField<Map::ElementsKindBits>(scratch1); | 4913 DecodeField<Map::ElementsKindBits>(scratch1); |
4009 cmp(scratch1, Operand(DICTIONARY_ELEMENTS)); | 4914 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS)); |
4010 b(eq, found); | 4915 beq(found); |
4011 ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); | 4916 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset)); |
4012 cmp(current, Operand(factory->null_value())); | 4917 Cmpi(current, Operand(factory->null_value()), r0); |
4013 b(ne, &loop_again); | 4918 bne(&loop_again); |
4014 } | 4919 } |
4015 | 4920 |
4016 | 4921 |
4017 #ifdef DEBUG | 4922 #ifdef DEBUG |
4018 bool AreAliased(Register reg1, | 4923 bool AreAliased(Register reg1, |
4019 Register reg2, | 4924 Register reg2, |
4020 Register reg3, | 4925 Register reg3, |
4021 Register reg4, | 4926 Register reg4, |
4022 Register reg5, | 4927 Register reg5, |
4023 Register reg6, | 4928 Register reg6, |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4067 ASSERT(masm_.pc_ == address_ + size_); | 4972 ASSERT(masm_.pc_ == address_ + size_); |
4068 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 4973 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
4069 } | 4974 } |
4070 | 4975 |
4071 | 4976 |
4072 void CodePatcher::Emit(Instr instr) { | 4977 void CodePatcher::Emit(Instr instr) { |
4073 masm()->emit(instr); | 4978 masm()->emit(instr); |
4074 } | 4979 } |
4075 | 4980 |
4076 | 4981 |
4077 void CodePatcher::Emit(Address addr) { | |
4078 masm()->emit(reinterpret_cast<Instr>(addr)); | |
4079 } | |
4080 | |
4081 | |
4082 void CodePatcher::EmitCondition(Condition cond) { | 4982 void CodePatcher::EmitCondition(Condition cond) { |
4083 Instr instr = Assembler::instr_at(masm_.pc_); | 4983 Instr instr = Assembler::instr_at(masm_.pc_); |
4084 instr = (instr & ~kCondMask) | cond; | 4984 switch (cond) { |
| 4985 case eq: |
| 4986 instr = (instr & ~kCondMask) | BT; |
| 4987 break; |
| 4988 case ne: |
| 4989 instr = (instr & ~kCondMask) | BF; |
| 4990 break; |
| 4991 default: |
| 4992 UNIMPLEMENTED(); |
| 4993 } |
4085 masm_.emit(instr); | 4994 masm_.emit(instr); |
4086 } | 4995 } |
4087 | 4996 |
4088 | 4997 |
4089 void MacroAssembler::TruncatingDiv(Register result, | 4998 void MacroAssembler::TruncatingDiv(Register result, |
4090 Register dividend, | 4999 Register dividend, |
4091 int32_t divisor) { | 5000 int32_t divisor) { |
4092 ASSERT(!dividend.is(result)); | 5001 ASSERT(!dividend.is(result)); |
4093 ASSERT(!dividend.is(ip)); | 5002 ASSERT(!dividend.is(r0)); |
4094 ASSERT(!result.is(ip)); | 5003 ASSERT(!result.is(r0)); |
4095 MultiplierAndShift ms(divisor); | 5004 MultiplierAndShift ms(divisor); |
4096 mov(ip, Operand(ms.multiplier())); | 5005 mov(r0, Operand(ms.multiplier())); |
4097 smull(ip, result, dividend, ip); | 5006 mulhw(result, dividend, r0); |
4098 if (divisor > 0 && ms.multiplier() < 0) { | 5007 if (divisor > 0 && ms.multiplier() < 0) { |
4099 add(result, result, Operand(dividend)); | 5008 add(result, result, dividend); |
4100 } | 5009 } |
4101 if (divisor < 0 && ms.multiplier() > 0) { | 5010 if (divisor < 0 && ms.multiplier() > 0) { |
4102 sub(result, result, Operand(dividend)); | 5011 sub(result, result, dividend); |
4103 } | 5012 } |
4104 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); | 5013 if (ms.shift() > 0) srawi(result, result, ms.shift()); |
4105 add(result, result, Operand(dividend, LSR, 31)); | 5014 ExtractBit(r0, dividend, 31); |
| 5015 add(result, result, r0); |
4106 } | 5016 } |
4107 | 5017 |
4108 | 5018 |
4109 } } // namespace v8::internal | 5019 } } // namespace v8::internal |
4110 | 5020 |
4111 #endif // V8_TARGET_ARCH_ARM | 5021 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |