Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/ppc/codegen-ppc.cc

Issue 901083004: Contribution of PowerPC port (continuation of 422063005) - PPC dir update (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Contribution of PowerPC port (continuation of 422063005) - PPC dir update -comments and rebase Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ppc/code-stubs-ppc.cc ('k') | src/ppc/constants-ppc.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_PPC 7 #if V8_TARGET_ARCH_PPC
8 8
9 #include "src/codegen.h" 9 #include "src/codegen.h"
10 #include "src/macro-assembler.h" 10 #include "src/macro-assembler.h"
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
148 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11, 148 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
149 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, 149 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
150 OMIT_SMI_CHECK); 150 OMIT_SMI_CHECK);
151 } 151 }
152 152
153 153
154 void ElementsTransitionGenerator::GenerateSmiToDouble( 154 void ElementsTransitionGenerator::GenerateSmiToDouble(
155 MacroAssembler* masm, Register receiver, Register key, Register value, 155 MacroAssembler* masm, Register receiver, Register key, Register value,
156 Register target_map, AllocationSiteMode mode, Label* fail) { 156 Register target_map, AllocationSiteMode mode, Label* fail) {
157 // lr contains the return address 157 // lr contains the return address
158 Label loop, entry, convert_hole, gc_required, only_change_map, done; 158 Label loop, entry, convert_hole, only_change_map, done;
159 Register elements = r7; 159 Register elements = r7;
160 Register length = r8; 160 Register length = r8;
161 Register array = r9; 161 Register array = r9;
162 Register array_end = array; 162 Register array_end = array;
163 163
164 // target_map parameter can be clobbered. 164 // target_map parameter can be clobbered.
165 Register scratch1 = target_map; 165 Register scratch1 = target_map;
166 Register scratch2 = r11; 166 Register scratch2 = r10;
167 Register scratch3 = r11;
168 Register scratch4 = r14;
167 169
168 // Verify input registers don't conflict with locals. 170 // Verify input registers don't conflict with locals.
169 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array, 171 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
170 scratch2)); 172 scratch2));
171 173
172 if (mode == TRACK_ALLOCATION_SITE) { 174 if (mode == TRACK_ALLOCATION_SITE) {
173 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); 175 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
174 } 176 }
175 177
176 // Check for empty arrays, which only require a map transition and no changes 178 // Check for empty arrays, which only require a map transition and no changes
177 // to the backing store. 179 // to the backing store.
178 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 180 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
179 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); 181 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
180 __ beq(&only_change_map); 182 __ beq(&only_change_map);
181 183
182 // Preserve lr and use r17 as a temporary register.
183 __ mflr(r0);
184 __ Push(r0);
185
186 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); 184 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
187 // length: number of elements (smi-tagged) 185 // length: number of elements (smi-tagged)
188 186
189 // Allocate new FixedDoubleArray. 187 // Allocate new FixedDoubleArray.
190 __ SmiToDoubleArrayOffset(r17, length); 188 __ SmiToDoubleArrayOffset(scratch3, length);
191 __ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize)); 189 __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
192 __ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT); 190 __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
191 // array: destination FixedDoubleArray, not tagged as heap object.
192 // elements: source FixedArray.
193 193
194 // Set destination FixedDoubleArray's length and map. 194 // Set destination FixedDoubleArray's length and map.
195 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); 195 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
196 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); 196 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
197 // Update receiver's map. 197 // Update receiver's map.
198 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset)); 198 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
199 199
200 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0); 200 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
201 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2, 201 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
202 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, 202 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
203 OMIT_SMI_CHECK); 203 OMIT_SMI_CHECK);
204 // Replace receiver's backing store with newly created FixedDoubleArray. 204 // Replace receiver's backing store with newly created FixedDoubleArray.
205 __ addi(scratch1, array, Operand(kHeapObjectTag)); 205 __ addi(scratch1, array, Operand(kHeapObjectTag));
206 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0); 206 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
207 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2, 207 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
208 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, 208 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
209 OMIT_SMI_CHECK); 209 OMIT_SMI_CHECK);
210 210
211 // Prepare for conversion loop. 211 // Prepare for conversion loop.
212 __ addi(target_map, elements, 212 __ addi(scratch1, elements,
213 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 213 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
214 __ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize)); 214 __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
215 __ SmiToDoubleArrayOffset(array, length); 215 __ SmiToDoubleArrayOffset(array_end, length);
216 __ add(array_end, r10, array); 216 __ add(array_end, scratch2, array_end);
217 // Repurpose registers no longer in use. 217 // Repurpose registers no longer in use.
218 #if V8_TARGET_ARCH_PPC64 218 #if V8_TARGET_ARCH_PPC64
219 Register hole_int64 = elements; 219 Register hole_int64 = elements;
220 __ mov(hole_int64, Operand(kHoleNanInt64));
220 #else 221 #else
221 Register hole_lower = elements; 222 Register hole_lower = elements;
222 Register hole_upper = length; 223 Register hole_upper = length;
224 __ mov(hole_lower, Operand(kHoleNanLower32));
225 __ mov(hole_upper, Operand(kHoleNanUpper32));
223 #endif 226 #endif
224 // scratch1: begin of source FixedArray element fields, not tagged 227 // scratch1: begin of source FixedArray element fields, not tagged
225 // hole_lower: kHoleNanLower32 OR hol_int64 228 // hole_lower: kHoleNanLower32 OR hol_int64
226 // hole_upper: kHoleNanUpper32 229 // hole_upper: kHoleNanUpper32
227 // array_end: end of destination FixedDoubleArray, not tagged 230 // array_end: end of destination FixedDoubleArray, not tagged
228 // scratch2: begin of FixedDoubleArray element fields, not tagged 231 // scratch2: begin of FixedDoubleArray element fields, not tagged
229 232
230 __ b(&entry); 233 __ b(&entry);
231 234
232 __ bind(&only_change_map); 235 __ bind(&only_change_map);
233 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0); 236 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
234 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2, 237 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
235 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, 238 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
236 OMIT_SMI_CHECK); 239 OMIT_SMI_CHECK);
237 __ b(&done); 240 __ b(&done);
238 241
239 // Call into runtime if GC is required.
240 __ bind(&gc_required);
241 __ Pop(r0);
242 __ mtlr(r0);
243 __ b(fail);
244
245 // Convert and copy elements. 242 // Convert and copy elements.
246 __ bind(&loop); 243 __ bind(&loop);
247 __ LoadP(r11, MemOperand(scratch1)); 244 __ LoadP(scratch3, MemOperand(scratch1));
248 __ addi(scratch1, scratch1, Operand(kPointerSize)); 245 __ addi(scratch1, scratch1, Operand(kPointerSize));
249 // r11: current element 246 // scratch3: current element
250 __ UntagAndJumpIfNotSmi(r11, r11, &convert_hole); 247 __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
251 248
252 // Normal smi, convert to double and store. 249 // Normal smi, convert to double and store.
253 __ ConvertIntToDouble(r11, d0); 250 __ ConvertIntToDouble(scratch3, d0);
254 __ stfd(d0, MemOperand(scratch2, 0)); 251 __ stfd(d0, MemOperand(scratch2, 0));
255 __ addi(r10, r10, Operand(8)); 252 __ addi(scratch2, scratch2, Operand(8));
256
257 __ b(&entry); 253 __ b(&entry);
258 254
259 // Hole found, store the-hole NaN. 255 // Hole found, store the-hole NaN.
260 __ bind(&convert_hole); 256 __ bind(&convert_hole);
261 if (FLAG_debug_code) { 257 if (FLAG_debug_code) {
262 // Restore a "smi-untagged" heap object. 258 __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
263 __ LoadP(r11, MemOperand(r6, -kPointerSize)); 259 __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
264 __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
265 __ Assert(eq, kObjectFoundInSmiOnlyArray); 260 __ Assert(eq, kObjectFoundInSmiOnlyArray);
266 } 261 }
267 #if V8_TARGET_ARCH_PPC64 262 #if V8_TARGET_ARCH_PPC64
268 __ std(hole_int64, MemOperand(r10, 0)); 263 __ std(hole_int64, MemOperand(scratch2, 0));
269 #else 264 #else
270 __ stw(hole_upper, MemOperand(r10, Register::kExponentOffset)); 265 __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
271 __ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset)); 266 __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
272 #endif 267 #endif
273 __ addi(r10, r10, Operand(8)); 268 __ addi(scratch2, scratch2, Operand(8));
274 269
275 __ bind(&entry); 270 __ bind(&entry);
276 __ cmp(r10, array_end); 271 __ cmp(scratch2, array_end);
277 __ blt(&loop); 272 __ blt(&loop);
278 273
279 __ Pop(r0);
280 __ mtlr(r0);
281 __ bind(&done); 274 __ bind(&done);
282 } 275 }
283 276
284 277
285 void ElementsTransitionGenerator::GenerateDoubleToObject( 278 void ElementsTransitionGenerator::GenerateDoubleToObject(
286 MacroAssembler* masm, Register receiver, Register key, Register value, 279 MacroAssembler* masm, Register receiver, Register key, Register value,
287 Register target_map, AllocationSiteMode mode, Label* fail) { 280 Register target_map, AllocationSiteMode mode, Label* fail) {
288 // Register lr contains the return address. 281 // Register lr contains the return address.
289 Label entry, loop, convert_hole, gc_required, only_change_map; 282 Label loop, convert_hole, gc_required, only_change_map;
290 Register elements = r7; 283 Register elements = r7;
291 Register array = r9; 284 Register array = r9;
292 Register length = r8; 285 Register length = r8;
293 Register scratch = r11; 286 Register scratch = r10;
287 Register scratch3 = r11;
288 Register hole_value = r14;
294 289
295 // Verify input registers don't conflict with locals. 290 // Verify input registers don't conflict with locals.
296 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length, 291 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
297 scratch)); 292 scratch));
298 293
299 if (mode == TRACK_ALLOCATION_SITE) { 294 if (mode == TRACK_ALLOCATION_SITE) {
300 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); 295 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
301 } 296 }
302 297
303 // Check for empty arrays, which only require a map transition and no changes 298 // Check for empty arrays, which only require a map transition and no changes
(...skipping 25 matching lines...) Expand all
329 __ addi(array, array, Operand(kHeapObjectTag)); 324 __ addi(array, array, Operand(kHeapObjectTag));
330 325
331 // Prepare for conversion loop. 326 // Prepare for conversion loop.
332 Register src_elements = elements; 327 Register src_elements = elements;
333 Register dst_elements = target_map; 328 Register dst_elements = target_map;
334 Register dst_end = length; 329 Register dst_end = length;
335 Register heap_number_map = scratch; 330 Register heap_number_map = scratch;
336 __ addi(src_elements, elements, 331 __ addi(src_elements, elements,
337 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); 332 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
338 __ SmiToPtrArrayOffset(length, length); 333 __ SmiToPtrArrayOffset(length, length);
339 __ LoadRoot(r10, Heap::kTheHoleValueRootIndex); 334 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
340 335
341 Label initialization_loop, loop_done; 336 Label initialization_loop, loop_done;
342 __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC); 337 __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
343 __ beq(&loop_done, cr0); 338 __ beq(&loop_done, cr0);
344 339
345 // Allocating heap numbers in the loop below can fail and cause a jump to 340 // Allocating heap numbers in the loop below can fail and cause a jump to
346 // gc_required. We can't leave a partly initialized FixedArray behind, 341 // gc_required. We can't leave a partly initialized FixedArray behind,
347 // so pessimistically fill it with holes now. 342 // so pessimistically fill it with holes now.
348 __ mtctr(r0); 343 __ mtctr(r0);
349 __ addi(dst_elements, array, 344 __ addi(dst_elements, array,
350 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); 345 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
351 __ bind(&initialization_loop); 346 __ bind(&initialization_loop);
352 __ StorePU(r10, MemOperand(dst_elements, kPointerSize)); 347 __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
353 __ bdnz(&initialization_loop); 348 __ bdnz(&initialization_loop);
354 349
355 __ addi(dst_elements, array, 350 __ addi(dst_elements, array,
356 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 351 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
357 __ add(dst_end, dst_elements, length); 352 __ add(dst_end, dst_elements, length);
358 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 353 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
359 // Using offsetted addresses in src_elements to fully take advantage of 354 // Using offsetted addresses in src_elements to fully take advantage of
360 // post-indexing. 355 // post-indexing.
361 // dst_elements: begin of destination FixedArray element fields, not tagged 356 // dst_elements: begin of destination FixedArray element fields, not tagged
362 // src_elements: begin of source FixedDoubleArray element fields, 357 // src_elements: begin of source FixedDoubleArray element fields,
363 // not tagged, +4 358 // not tagged, +4
364 // dst_end: end of destination FixedArray, not tagged 359 // dst_end: end of destination FixedArray, not tagged
365 // array: destination FixedArray 360 // array: destination FixedArray
366 // r10: the-hole pointer 361 // hole_value: the-hole pointer
367 // heap_number_map: heap number map 362 // heap_number_map: heap number map
368 __ b(&loop); 363 __ b(&loop);
369 364
370 // Call into runtime if GC is required. 365 // Call into runtime if GC is required.
371 __ bind(&gc_required); 366 __ bind(&gc_required);
372 __ Pop(target_map, receiver, key, value); 367 __ Pop(target_map, receiver, key, value);
373 __ b(fail); 368 __ b(fail);
374 369
375 // Replace the-hole NaN with the-hole pointer. 370 // Replace the-hole NaN with the-hole pointer.
376 __ bind(&convert_hole); 371 __ bind(&convert_hole);
377 __ StoreP(r10, MemOperand(dst_elements)); 372 __ StoreP(hole_value, MemOperand(dst_elements));
378 __ addi(dst_elements, dst_elements, Operand(kPointerSize)); 373 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
379 __ cmpl(dst_elements, dst_end); 374 __ cmpl(dst_elements, dst_end);
380 __ bge(&loop_done); 375 __ bge(&loop_done);
381 376
382 __ bind(&loop); 377 __ bind(&loop);
383 Register upper_bits = key; 378 Register upper_bits = key;
384 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset)); 379 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
385 __ addi(src_elements, src_elements, Operand(kDoubleSize)); 380 __ addi(src_elements, src_elements, Operand(kDoubleSize));
386 // upper_bits: current element's upper 32 bit 381 // upper_bits: current element's upper 32 bit
387 // src_elements: address of next element's upper 32 bit 382 // src_elements: address of next element's upper 32 bit
388 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0); 383 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
389 __ beq(&convert_hole); 384 __ beq(&convert_hole);
390 385
391 // Non-hole double, copy value into a heap number. 386 // Non-hole double, copy value into a heap number.
392 Register heap_number = receiver; 387 Register heap_number = receiver;
393 Register scratch2 = value; 388 Register scratch2 = value;
394 __ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map, 389 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
395 &gc_required); 390 &gc_required);
396 // heap_number: new heap number 391 // heap_number: new heap number
397 #if V8_TARGET_ARCH_PPC64 392 #if V8_TARGET_ARCH_PPC64
398 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize)); 393 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
399 // subtract tag for std 394 // subtract tag for std
400 __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag)); 395 __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
401 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset)); 396 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
402 #else 397 #else
403 __ lwz(scratch2, 398 __ lwz(scratch2,
404 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize)); 399 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
405 __ lwz(upper_bits, 400 __ lwz(upper_bits,
406 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize)); 401 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
407 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset)); 402 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
408 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset)); 403 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
409 #endif 404 #endif
410 __ mr(scratch2, dst_elements); 405 __ mr(scratch2, dst_elements);
411 __ StoreP(heap_number, MemOperand(dst_elements)); 406 __ StoreP(heap_number, MemOperand(dst_elements));
412 __ addi(dst_elements, dst_elements, Operand(kPointerSize)); 407 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
413 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved, 408 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
414 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); 409 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
415 __ b(&entry);
416
417 // Replace the-hole NaN with the-hole pointer.
418 __ bind(&convert_hole);
419 __ StoreP(r10, MemOperand(dst_elements));
420 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
421
422 __ bind(&entry);
423 __ cmpl(dst_elements, dst_end); 410 __ cmpl(dst_elements, dst_end);
424 __ blt(&loop); 411 __ blt(&loop);
425 __ bind(&loop_done); 412 __ bind(&loop_done);
426 413
427 __ Pop(target_map, receiver, key, value); 414 __ Pop(target_map, receiver, key, value);
428 // Replace receiver's backing store with newly created and filled FixedArray. 415 // Replace receiver's backing store with newly created and filled FixedArray.
429 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0); 416 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
430 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch, 417 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
431 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, 418 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
432 OMIT_SMI_CHECK); 419 OMIT_SMI_CHECK);
(...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after
687 patcher.masm()->Jump(r3); 674 patcher.masm()->Jump(r3);
688 for (int i = 0; i < kCodeAgingSequenceNops; i++) { 675 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
689 patcher.masm()->nop(); 676 patcher.masm()->nop();
690 } 677 }
691 } 678 }
692 } 679 }
693 } 680 }
694 } // namespace v8::internal 681 } // namespace v8::internal
695 682
696 #endif // V8_TARGET_ARCH_PPC 683 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« no previous file with comments | « src/ppc/code-stubs-ppc.cc ('k') | src/ppc/constants-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698