OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/ppc/codegen-ppc.h" | 5 #include "src/ppc/codegen-ppc.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_PPC | 7 #if V8_TARGET_ARCH_PPC |
8 | 8 |
9 #include <memory> | 9 #include <memory> |
10 | 10 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
66 DCHECK(masm->has_frame()); | 66 DCHECK(masm->has_frame()); |
67 masm->set_has_frame(false); | 67 masm->set_has_frame(false); |
68 } | 68 } |
69 | 69 |
70 | 70 |
71 // ------------------------------------------------------------------------- | 71 // ------------------------------------------------------------------------- |
72 // Code generators | 72 // Code generators |
73 | 73 |
74 #define __ ACCESS_MASM(masm) | 74 #define __ ACCESS_MASM(masm) |
75 | 75 |
76 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | |
77 MacroAssembler* masm, Register receiver, Register key, Register value, | |
78 Register target_map, AllocationSiteMode mode, | |
79 Label* allocation_memento_found) { | |
80 Register scratch_elements = r7; | |
81 DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements)); | |
82 | |
83 if (mode == TRACK_ALLOCATION_SITE) { | |
84 DCHECK(allocation_memento_found != NULL); | |
85 __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11, | |
86 allocation_memento_found); | |
87 } | |
88 | |
89 // Set transitioned map. | |
90 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0); | |
91 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11, | |
92 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
93 OMIT_SMI_CHECK); | |
94 } | |
95 | |
96 | |
97 void ElementsTransitionGenerator::GenerateSmiToDouble( | |
98 MacroAssembler* masm, Register receiver, Register key, Register value, | |
99 Register target_map, AllocationSiteMode mode, Label* fail) { | |
100 // lr contains the return address | |
101 Label loop, entry, convert_hole, only_change_map, done; | |
102 Register elements = r7; | |
103 Register length = r8; | |
104 Register array = r9; | |
105 Register array_end = array; | |
106 | |
107 // target_map parameter can be clobbered. | |
108 Register scratch1 = target_map; | |
109 Register scratch2 = r10; | |
110 Register scratch3 = r11; | |
111 Register scratch4 = r14; | |
112 | |
113 // Verify input registers don't conflict with locals. | |
114 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array, | |
115 scratch2)); | |
116 | |
117 if (mode == TRACK_ALLOCATION_SITE) { | |
118 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail); | |
119 } | |
120 | |
121 // Check for empty arrays, which only require a map transition and no changes | |
122 // to the backing store. | |
123 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
124 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); | |
125 __ beq(&only_change_map); | |
126 | |
127 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
128 // length: number of elements (smi-tagged) | |
129 | |
130 // Allocate new FixedDoubleArray. | |
131 __ SmiToDoubleArrayOffset(scratch3, length); | |
132 __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize)); | |
133 __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT); | |
134 __ subi(array, array, Operand(kHeapObjectTag)); | |
135 // array: destination FixedDoubleArray, not tagged as heap object. | |
136 // elements: source FixedArray. | |
137 | |
138 // Set destination FixedDoubleArray's length and map. | |
139 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); | |
140 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); | |
141 // Update receiver's map. | |
142 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset)); | |
143 | |
144 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0); | |
145 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2, | |
146 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, | |
147 OMIT_SMI_CHECK); | |
148 // Replace receiver's backing store with newly created FixedDoubleArray. | |
149 __ addi(scratch1, array, Operand(kHeapObjectTag)); | |
150 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0); | |
151 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2, | |
152 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
153 OMIT_SMI_CHECK); | |
154 | |
155 // Prepare for conversion loop. | |
156 __ addi(scratch1, elements, | |
157 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
158 __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize)); | |
159 __ SmiToDoubleArrayOffset(array_end, length); | |
160 __ add(array_end, scratch2, array_end); | |
161 // Repurpose registers no longer in use. | |
162 #if V8_TARGET_ARCH_PPC64 | |
163 Register hole_int64 = elements; | |
164 __ mov(hole_int64, Operand(kHoleNanInt64)); | |
165 #else | |
166 Register hole_lower = elements; | |
167 Register hole_upper = length; | |
168 __ mov(hole_lower, Operand(kHoleNanLower32)); | |
169 __ mov(hole_upper, Operand(kHoleNanUpper32)); | |
170 #endif | |
171 // scratch1: begin of source FixedArray element fields, not tagged | |
172 // hole_lower: kHoleNanLower32 OR hol_int64 | |
173 // hole_upper: kHoleNanUpper32 | |
174 // array_end: end of destination FixedDoubleArray, not tagged | |
175 // scratch2: begin of FixedDoubleArray element fields, not tagged | |
176 | |
177 __ b(&entry); | |
178 | |
179 __ bind(&only_change_map); | |
180 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0); | |
181 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2, | |
182 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, | |
183 OMIT_SMI_CHECK); | |
184 __ b(&done); | |
185 | |
186 // Convert and copy elements. | |
187 __ bind(&loop); | |
188 __ LoadP(scratch3, MemOperand(scratch1)); | |
189 __ addi(scratch1, scratch1, Operand(kPointerSize)); | |
190 // scratch3: current element | |
191 __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole); | |
192 | |
193 // Normal smi, convert to double and store. | |
194 __ ConvertIntToDouble(scratch3, d0); | |
195 __ stfd(d0, MemOperand(scratch2, 0)); | |
196 __ addi(scratch2, scratch2, Operand(8)); | |
197 __ b(&entry); | |
198 | |
199 // Hole found, store the-hole NaN. | |
200 __ bind(&convert_hole); | |
201 if (FLAG_debug_code) { | |
202 __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize)); | |
203 __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex); | |
204 __ Assert(eq, kObjectFoundInSmiOnlyArray); | |
205 } | |
206 #if V8_TARGET_ARCH_PPC64 | |
207 __ std(hole_int64, MemOperand(scratch2, 0)); | |
208 #else | |
209 __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset)); | |
210 __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset)); | |
211 #endif | |
212 __ addi(scratch2, scratch2, Operand(8)); | |
213 | |
214 __ bind(&entry); | |
215 __ cmp(scratch2, array_end); | |
216 __ blt(&loop); | |
217 | |
218 __ bind(&done); | |
219 } | |
220 | |
221 | |
222 void ElementsTransitionGenerator::GenerateDoubleToObject( | |
223 MacroAssembler* masm, Register receiver, Register key, Register value, | |
224 Register target_map, AllocationSiteMode mode, Label* fail) { | |
225 // Register lr contains the return address. | |
226 Label loop, convert_hole, gc_required, only_change_map; | |
227 Register elements = r7; | |
228 Register array = r9; | |
229 Register length = r8; | |
230 Register scratch = r10; | |
231 Register scratch3 = r11; | |
232 Register hole_value = r14; | |
233 | |
234 // Verify input registers don't conflict with locals. | |
235 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length, | |
236 scratch)); | |
237 | |
238 if (mode == TRACK_ALLOCATION_SITE) { | |
239 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail); | |
240 } | |
241 | |
242 // Check for empty arrays, which only require a map transition and no changes | |
243 // to the backing store. | |
244 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
245 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); | |
246 __ beq(&only_change_map); | |
247 | |
248 __ Push(target_map, receiver, key, value); | |
249 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
250 // elements: source FixedDoubleArray | |
251 // length: number of elements (smi-tagged) | |
252 | |
253 // Allocate new FixedArray. | |
254 // Re-use value and target_map registers, as they have been saved on the | |
255 // stack. | |
256 Register array_size = value; | |
257 Register allocate_scratch = target_map; | |
258 __ li(array_size, Operand(FixedDoubleArray::kHeaderSize)); | |
259 __ SmiToPtrArrayOffset(r0, length); | |
260 __ add(array_size, array_size, r0); | |
261 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required, | |
262 NO_ALLOCATION_FLAGS); | |
263 // array: destination FixedArray, tagged as heap object | |
264 // Set destination FixedDoubleArray's length and map. | |
265 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); | |
266 __ StoreP(length, FieldMemOperand(array, | |
267 FixedDoubleArray::kLengthOffset), r0); | |
268 __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0); | |
269 | |
270 // Prepare for conversion loop. | |
271 Register src_elements = elements; | |
272 Register dst_elements = target_map; | |
273 Register dst_end = length; | |
274 Register heap_number_map = scratch; | |
275 __ addi(src_elements, elements, | |
276 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | |
277 __ SmiToPtrArrayOffset(length, length); | |
278 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex); | |
279 | |
280 Label initialization_loop, loop_done; | |
281 __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC); | |
282 __ beq(&loop_done, cr0); | |
283 | |
284 // Allocating heap numbers in the loop below can fail and cause a jump to | |
285 // gc_required. We can't leave a partly initialized FixedArray behind, | |
286 // so pessimistically fill it with holes now. | |
287 __ mtctr(r0); | |
288 __ addi(dst_elements, array, | |
289 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); | |
290 __ bind(&initialization_loop); | |
291 __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize)); | |
292 __ bdnz(&initialization_loop); | |
293 | |
294 __ addi(dst_elements, array, | |
295 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
296 __ add(dst_end, dst_elements, length); | |
297 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
298 // Using offsetted addresses in src_elements to fully take advantage of | |
299 // post-indexing. | |
300 // dst_elements: begin of destination FixedArray element fields, not tagged | |
301 // src_elements: begin of source FixedDoubleArray element fields, | |
302 // not tagged, +4 | |
303 // dst_end: end of destination FixedArray, not tagged | |
304 // array: destination FixedArray | |
305 // hole_value: the-hole pointer | |
306 // heap_number_map: heap number map | |
307 __ b(&loop); | |
308 | |
309 // Call into runtime if GC is required. | |
310 __ bind(&gc_required); | |
311 __ Pop(target_map, receiver, key, value); | |
312 __ b(fail); | |
313 | |
314 // Replace the-hole NaN with the-hole pointer. | |
315 __ bind(&convert_hole); | |
316 __ StoreP(hole_value, MemOperand(dst_elements)); | |
317 __ addi(dst_elements, dst_elements, Operand(kPointerSize)); | |
318 __ cmpl(dst_elements, dst_end); | |
319 __ bge(&loop_done); | |
320 | |
321 __ bind(&loop); | |
322 Register upper_bits = key; | |
323 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset)); | |
324 __ addi(src_elements, src_elements, Operand(kDoubleSize)); | |
325 // upper_bits: current element's upper 32 bit | |
326 // src_elements: address of next element's upper 32 bit | |
327 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0); | |
328 __ beq(&convert_hole); | |
329 | |
330 // Non-hole double, copy value into a heap number. | |
331 Register heap_number = receiver; | |
332 Register scratch2 = value; | |
333 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map, | |
334 &gc_required); | |
335 // heap_number: new heap number | |
336 #if V8_TARGET_ARCH_PPC64 | |
337 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize)); | |
338 // subtract tag for std | |
339 __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag)); | |
340 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset)); | |
341 #else | |
342 __ lwz(scratch2, | |
343 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize)); | |
344 __ lwz(upper_bits, | |
345 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize)); | |
346 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset)); | |
347 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset)); | |
348 #endif | |
349 __ mr(scratch2, dst_elements); | |
350 __ StoreP(heap_number, MemOperand(dst_elements)); | |
351 __ addi(dst_elements, dst_elements, Operand(kPointerSize)); | |
352 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved, | |
353 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
354 __ cmpl(dst_elements, dst_end); | |
355 __ blt(&loop); | |
356 __ bind(&loop_done); | |
357 | |
358 __ Pop(target_map, receiver, key, value); | |
359 // Replace receiver's backing store with newly created and filled FixedArray. | |
360 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0); | |
361 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch, | |
362 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
363 OMIT_SMI_CHECK); | |
364 | |
365 __ bind(&only_change_map); | |
366 // Update receiver's map. | |
367 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0); | |
368 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch, | |
369 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, | |
370 OMIT_SMI_CHECK); | |
371 } | |
372 | |
373 | |
374 // assume ip can be used as a scratch register below | 76 // assume ip can be used as a scratch register below |
375 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string, | 77 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string, |
376 Register index, Register result, | 78 Register index, Register result, |
377 Label* call_runtime) { | 79 Label* call_runtime) { |
378 // Fetch the instance type of the receiver into result register. | 80 // Fetch the instance type of the receiver into result register. |
379 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); | 81 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
380 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); | 82 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
381 | 83 |
382 // We need special handling for indirect strings. | 84 // We need special handling for indirect strings. |
383 Label check_sequential; | 85 Label check_sequential; |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
529 patcher.masm()->Jump(r3); | 231 patcher.masm()->Jump(r3); |
530 for (int i = 0; i < kCodeAgingSequenceNops; i++) { | 232 for (int i = 0; i < kCodeAgingSequenceNops; i++) { |
531 patcher.masm()->nop(); | 233 patcher.masm()->nop(); |
532 } | 234 } |
533 } | 235 } |
534 } | 236 } |
535 } // namespace internal | 237 } // namespace internal |
536 } // namespace v8 | 238 } // namespace v8 |
537 | 239 |
538 #endif // V8_TARGET_ARCH_PPC | 240 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |