| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/s390/codegen-s390.h" | 5 #include "src/s390/codegen-s390.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_S390 | 7 #if V8_TARGET_ARCH_S390 |
| 8 | 8 |
| 9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
| 10 #include "src/macro-assembler.h" | 10 #include "src/macro-assembler.h" |
| (...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 263 | 263 |
| 264 void ElementsTransitionGenerator::GenerateDoubleToObject( | 264 void ElementsTransitionGenerator::GenerateDoubleToObject( |
| 265 MacroAssembler* masm, Register receiver, Register key, Register value, | 265 MacroAssembler* masm, Register receiver, Register key, Register value, |
| 266 Register target_map, AllocationSiteMode mode, Label* fail) { | 266 Register target_map, AllocationSiteMode mode, Label* fail) { |
| 267 // Register lr contains the return address. | 267 // Register lr contains the return address. |
| 268 Label loop, convert_hole, gc_required, only_change_map; | 268 Label loop, convert_hole, gc_required, only_change_map; |
| 269 Register elements = r6; | 269 Register elements = r6; |
| 270 Register array = r8; | 270 Register array = r8; |
| 271 Register length = r7; | 271 Register length = r7; |
| 272 Register scratch = r1; | 272 Register scratch = r1; |
| 273 Register scratch3 = r9; |
| 274 Register hole_value = r9; |
| 273 | 275 |
| 274 // Verify input registers don't conflict with locals. | 276 // Verify input registers don't conflict with locals. |
| 275 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length, | 277 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length, |
| 276 scratch)); | 278 scratch)); |
| 277 | 279 |
| 278 if (mode == TRACK_ALLOCATION_SITE) { | 280 if (mode == TRACK_ALLOCATION_SITE) { |
| 279 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); | 281 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
| 280 } | 282 } |
| 281 | 283 |
| 282 // Check for empty arrays, which only require a map transition and no changes | 284 // Check for empty arrays, which only require a map transition and no changes |
| (...skipping 25 matching lines...) Expand all Loading... |
| 308 __ AddP(array, Operand(kHeapObjectTag)); | 310 __ AddP(array, Operand(kHeapObjectTag)); |
| 309 | 311 |
| 310 // Prepare for conversion loop. | 312 // Prepare for conversion loop. |
| 311 Register src_elements = elements; | 313 Register src_elements = elements; |
| 312 Register dst_elements = target_map; | 314 Register dst_elements = target_map; |
| 313 Register dst_end = length; | 315 Register dst_end = length; |
| 314 Register heap_number_map = scratch; | 316 Register heap_number_map = scratch; |
| 315 __ AddP(src_elements, | 317 __ AddP(src_elements, |
| 316 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 318 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
| 317 __ SmiToPtrArrayOffset(length, length); | 319 __ SmiToPtrArrayOffset(length, length); |
| 318 __ LoadRoot(r9, Heap::kTheHoleValueRootIndex); | 320 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex); |
| 319 | 321 |
| 320 Label initialization_loop, loop_done; | 322 Label initialization_loop, loop_done; |
| 321 __ ShiftRightP(r0, length, Operand(kPointerSizeLog2)); | 323 __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2)); |
| 322 __ beq(&loop_done, Label::kNear /*, cr0*/); | 324 __ beq(&loop_done, Label::kNear); |
| 323 | 325 |
| 324 // Allocating heap numbers in the loop below can fail and cause a jump to | 326 // Allocating heap numbers in the loop below can fail and cause a jump to |
| 325 // gc_required. We can't leave a partly initialized FixedArray behind, | 327 // gc_required. We can't leave a partly initialized FixedArray behind, |
| 326 // so pessimistically fill it with holes now. | 328 // so pessimistically fill it with holes now. |
| 327 __ AddP(dst_elements, array, | 329 __ AddP(dst_elements, array, |
| 328 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); | 330 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); |
| 329 __ bind(&initialization_loop); | 331 __ bind(&initialization_loop); |
| 330 __ StoreP(r9, MemOperand(dst_elements, kPointerSize)); | 332 __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize)); |
| 331 __ lay(dst_elements, MemOperand(dst_elements, kPointerSize)); | 333 __ lay(dst_elements, MemOperand(dst_elements, kPointerSize)); |
| 332 __ BranchOnCount(r0, &initialization_loop); | 334 __ BranchOnCount(scratch, &initialization_loop); |
| 333 | 335 |
| 334 __ AddP(dst_elements, array, | 336 __ AddP(dst_elements, array, |
| 335 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 337 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 336 __ AddP(dst_end, dst_elements, length); | 338 __ AddP(dst_end, dst_elements, length); |
| 337 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 339 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 338 // Using offsetted addresses in src_elements to fully take advantage of | 340 // Using offsetted addresses in src_elements to fully take advantage of |
| 339 // post-indexing. | 341 // post-indexing. |
| 340 // dst_elements: begin of destination FixedArray element fields, not tagged | 342 // dst_elements: begin of destination FixedArray element fields, not tagged |
| 341 // src_elements: begin of source FixedDoubleArray element fields, | 343 // src_elements: begin of source FixedDoubleArray element fields, |
| 342 // not tagged, +4 | 344 // not tagged, +4 |
| 343 // dst_end: end of destination FixedArray, not tagged | 345 // dst_end: end of destination FixedArray, not tagged |
| 344 // array: destination FixedArray | 346 // array: destination FixedArray |
| 345 // r9: the-hole pointer | 347 // hole_value: the-hole pointer |
| 346 // heap_number_map: heap number map | 348 // heap_number_map: heap number map |
| 347 __ b(&loop, Label::kNear); | 349 __ b(&loop, Label::kNear); |
| 348 | 350 |
| 349 // Call into runtime if GC is required. | 351 // Call into runtime if GC is required. |
| 350 __ bind(&gc_required); | 352 __ bind(&gc_required); |
| 351 __ Pop(target_map, receiver, key, value); | 353 __ Pop(target_map, receiver, key, value); |
| 352 __ b(fail); | 354 __ b(fail); |
| 353 | 355 |
| 354 // Replace the-hole NaN with the-hole pointer. | 356 // Replace the-hole NaN with the-hole pointer. |
| 355 __ bind(&convert_hole); | 357 __ bind(&convert_hole); |
| 356 __ StoreP(r9, MemOperand(dst_elements)); | 358 __ StoreP(hole_value, MemOperand(dst_elements)); |
| 357 __ AddP(dst_elements, Operand(kPointerSize)); | 359 __ AddP(dst_elements, Operand(kPointerSize)); |
| 358 __ CmpLogicalP(dst_elements, dst_end); | 360 __ CmpLogicalP(dst_elements, dst_end); |
| 359 __ bge(&loop_done); | 361 __ bge(&loop_done); |
| 360 | 362 |
| 361 __ bind(&loop); | 363 __ bind(&loop); |
| 362 Register upper_bits = key; | 364 Register upper_bits = key; |
| 363 __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset)); | 365 __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset)); |
| 364 __ AddP(src_elements, Operand(kDoubleSize)); | 366 __ AddP(src_elements, Operand(kDoubleSize)); |
| 365 // upper_bits: current element's upper 32 bit | 367 // upper_bits: current element's upper 32 bit |
| 366 // src_elements: address of next element's upper 32 bit | 368 // src_elements: address of next element's upper 32 bit |
| 367 __ Cmp32(upper_bits, Operand(kHoleNanUpper32)); | 369 __ Cmp32(upper_bits, Operand(kHoleNanUpper32)); |
| 368 __ beq(&convert_hole, Label::kNear); | 370 __ beq(&convert_hole, Label::kNear); |
| 369 | 371 |
| 370 // Non-hole double, copy value into a heap number. | 372 // Non-hole double, copy value into a heap number. |
| 371 Register heap_number = receiver; | 373 Register heap_number = receiver; |
| 372 Register scratch2 = value; | 374 Register scratch2 = value; |
| 373 __ AllocateHeapNumber(heap_number, scratch2, r1, heap_number_map, | 375 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map, |
| 374 &gc_required); | 376 &gc_required); |
| 375 // heap_number: new heap number | 377 // heap_number: new heap number |
| 376 #if V8_TARGET_ARCH_S390X | 378 #if V8_TARGET_ARCH_S390X |
| 377 __ lg(scratch2, MemOperand(src_elements, -kDoubleSize)); | 379 __ lg(scratch2, MemOperand(src_elements, -kDoubleSize)); |
| 378 // subtract tag for std | 380 // subtract tag for std |
| 379 __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag)); | 381 __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag)); |
| 380 __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset)); | 382 __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset)); |
| 381 #else | 383 #else |
| 382 __ LoadlW(scratch2, | 384 __ LoadlW(scratch2, |
| 383 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize)); | 385 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize)); |
| (...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 600 USE(isolate); | 602 USE(isolate); |
| 601 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); | 603 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); |
| 602 // Since patcher is a large object, allocate it dynamically when needed, | 604 // Since patcher is a large object, allocate it dynamically when needed, |
| 603 // to avoid overloading the stack in stress conditions. | 605 // to avoid overloading the stack in stress conditions. |
| 604 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in | 606 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in |
| 605 // the process, before ARM simulator ICache is setup. | 607 // the process, before ARM simulator ICache is setup. |
| 606 base::SmartPointer<CodePatcher> patcher( | 608 base::SmartPointer<CodePatcher> patcher( |
| 607 new CodePatcher(isolate, young_sequence_.start(), | 609 new CodePatcher(isolate, young_sequence_.start(), |
| 608 young_sequence_.length(), CodePatcher::DONT_FLUSH)); | 610 young_sequence_.length(), CodePatcher::DONT_FLUSH)); |
| 609 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); | 611 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); |
| 610 patcher->masm()->PushFixedFrame(r3); | 612 patcher->masm()->PushStandardFrame(r3); |
| 611 patcher->masm()->la( | |
| 612 fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp)); | |
| 613 } | 613 } |
| 614 | 614 |
| 615 #ifdef DEBUG | 615 #ifdef DEBUG |
| 616 bool CodeAgingHelper::IsOld(byte* candidate) const { | 616 bool CodeAgingHelper::IsOld(byte* candidate) const { |
| 617 return Assembler::IsNop(Assembler::instr_at(candidate)); | 617 return Assembler::IsNop(Assembler::instr_at(candidate)); |
| 618 } | 618 } |
| 619 #endif | 619 #endif |
| 620 | 620 |
| 621 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { | 621 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { |
| 622 bool result = isolate->code_aging_helper()->IsYoung(sequence); | 622 bool result = isolate->code_aging_helper()->IsYoung(sequence); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 666 // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes. | 666 // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes. |
| 667 patcher.masm()->nop(); // 2-byte nops(). | 667 patcher.masm()->nop(); // 2-byte nops(). |
| 668 } | 668 } |
| 669 } | 669 } |
| 670 } | 670 } |
| 671 | 671 |
| 672 } // namespace internal | 672 } // namespace internal |
| 673 } // namespace v8 | 673 } // namespace v8 |
| 674 | 674 |
| 675 #endif // V8_TARGET_ARCH_S390 | 675 #endif // V8_TARGET_ARCH_S390 |
| OLD | NEW |