| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_PPC | 5 #if V8_TARGET_ARCH_PPC |
| 6 | 6 |
| 7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
| 8 #include "src/ic/ic.h" | 8 #include "src/ic/ic.h" |
| 9 #include "src/ic/ic-compiler.h" | 9 #include "src/ic/ic-compiler.h" |
| 10 #include "src/ic/stub-cache.h" | 10 #include "src/ic/stub-cache.h" |
| (...skipping 480 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 491 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, | 491 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, |
| 492 Register value, Register key, Register receiver, Register receiver_map, | 492 Register value, Register key, Register receiver, Register receiver_map, |
| 493 Register elements_map, Register elements) { | 493 Register elements_map, Register elements) { |
| 494 Label transition_smi_elements; | 494 Label transition_smi_elements; |
| 495 Label finish_object_store, non_double_value, transition_double_elements; | 495 Label finish_object_store, non_double_value, transition_double_elements; |
| 496 Label fast_double_without_map_check; | 496 Label fast_double_without_map_check; |
| 497 | 497 |
| 498 // Fast case: Do the store, could be either Object or double. | 498 // Fast case: Do the store, could be either Object or double. |
| 499 __ bind(fast_object); | 499 __ bind(fast_object); |
| 500 Register scratch = r7; | 500 Register scratch = r7; |
| 501 Register scratch2 = r0; | |
| 502 Register address = r8; | 501 Register address = r8; |
| 503 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, | 502 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, |
| 504 scratch, scratch2, address)); | 503 scratch, address)); |
| 505 | 504 |
| 506 if (check_map == kCheckMap) { | 505 if (check_map == kCheckMap) { |
| 507 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 506 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 508 __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map())); | 507 __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map())); |
| 509 __ cmp(elements_map, scratch); | 508 __ cmp(elements_map, scratch); |
| 510 __ bne(fast_double); | 509 __ bne(fast_double); |
| 511 } | 510 } |
| 512 | 511 |
| 513 // HOLECHECK: guards "A[i] = V" | 512 // HOLECHECK: guards "A[i] = V" |
| 514 // We have to go to the runtime if the current value is the hole because | 513 // We have to go to the runtime if the current value is the hole because |
| 515 // there may be a callback on the element | 514 // there may be a callback on the element |
| 516 Label holecheck_passed1; | 515 Label holecheck_passed1; |
| 517 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 516 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 518 __ SmiToPtrArrayOffset(scratch, key); | 517 __ SmiToPtrArrayOffset(scratch, key); |
| 519 __ LoadPX(scratch, MemOperand(address, scratch)); | 518 __ LoadPX(scratch, MemOperand(address, scratch)); |
| 520 __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), | 519 __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0); |
| 521 scratch2); | |
| 522 __ bne(&holecheck_passed1); | 520 __ bne(&holecheck_passed1); |
| 523 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); | 521 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); |
| 524 | 522 |
| 525 __ bind(&holecheck_passed1); | 523 __ bind(&holecheck_passed1); |
| 526 | 524 |
| 527 // Smi stores don't require further checks. | 525 // Smi stores don't require further checks. |
| 528 Label non_smi_value; | 526 Label non_smi_value; |
| 529 __ JumpIfNotSmi(value, &non_smi_value); | 527 __ JumpIfNotSmi(value, &non_smi_value); |
| 530 | 528 |
| 531 if (increment_length == kIncrementLength) { | 529 if (increment_length == kIncrementLength) { |
| 532 // Add 1 to receiver->length. | 530 // Add 1 to receiver->length. |
| 533 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), scratch2); | 531 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); |
| 534 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), | 532 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); |
| 535 scratch2); | |
| 536 } | 533 } |
| 537 // It's irrelevant whether array is smi-only or not when writing a smi. | 534 // It's irrelevant whether array is smi-only or not when writing a smi. |
| 538 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 535 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 539 __ SmiToPtrArrayOffset(scratch, key); | 536 __ SmiToPtrArrayOffset(scratch, key); |
| 540 __ StorePX(value, MemOperand(address, scratch)); | 537 __ StorePX(value, MemOperand(address, scratch)); |
| 541 __ Ret(); | 538 __ Ret(); |
| 542 | 539 |
| 543 __ bind(&non_smi_value); | 540 __ bind(&non_smi_value); |
| 544 // Escape to elements kind transition case. | 541 // Escape to elements kind transition case. |
| 545 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); | 542 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); |
| 546 | 543 |
| 547 // Fast elements array, store the value to the elements backing store. | 544 // Fast elements array, store the value to the elements backing store. |
| 548 __ bind(&finish_object_store); | 545 __ bind(&finish_object_store); |
| 549 if (increment_length == kIncrementLength) { | 546 if (increment_length == kIncrementLength) { |
| 550 // Add 1 to receiver->length. | 547 // Add 1 to receiver->length. |
| 551 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), scratch2); | 548 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); |
| 552 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), | 549 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); |
| 553 scratch2); | |
| 554 } | 550 } |
| 555 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 551 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 556 __ SmiToPtrArrayOffset(scratch, key); | 552 __ SmiToPtrArrayOffset(scratch, key); |
| 557 __ StorePUX(value, MemOperand(address, scratch)); | 553 __ StorePUX(value, MemOperand(address, scratch)); |
| 558 // Update write barrier for the elements array address. | 554 // Update write barrier for the elements array address. |
| 559 __ mr(scratch, value); // Preserve the value which is returned. | 555 __ mr(scratch, value); // Preserve the value which is returned. |
| 560 __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved, | 556 __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved, |
| 561 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 557 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 562 __ Ret(); | 558 __ Ret(); |
| 563 | 559 |
| 564 __ bind(fast_double); | 560 __ bind(fast_double); |
| 565 if (check_map == kCheckMap) { | 561 if (check_map == kCheckMap) { |
| 566 // Check for fast double array case. If this fails, call through to the | 562 // Check for fast double array case. If this fails, call through to the |
| 567 // runtime. | 563 // runtime. |
| 568 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); | 564 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); |
| 569 __ bne(slow); | 565 __ bne(slow); |
| 570 } | 566 } |
| 571 | 567 |
| 572 // HOLECHECK: guards "A[i] double hole?" | 568 // HOLECHECK: guards "A[i] double hole?" |
| 573 // We have to see if the double version of the hole is present. If so | 569 // We have to see if the double version of the hole is present. If so |
| 574 // go to the runtime. | 570 // go to the runtime. |
| 575 __ addi(address, elements, | 571 __ addi(address, elements, |
| 576 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset - | 572 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset - |
| 577 kHeapObjectTag))); | 573 kHeapObjectTag))); |
| 578 __ SmiToDoubleArrayOffset(scratch, key); | 574 __ SmiToDoubleArrayOffset(scratch, key); |
| 579 __ lwzx(scratch, MemOperand(address, scratch)); | 575 __ lwzx(scratch, MemOperand(address, scratch)); |
| 580 __ Cmpi(scratch, Operand(kHoleNanUpper32), scratch2); | 576 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); |
| 581 __ bne(&fast_double_without_map_check); | 577 __ bne(&fast_double_without_map_check); |
| 582 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); | 578 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); |
| 583 | 579 |
| 584 __ bind(&fast_double_without_map_check); | 580 __ bind(&fast_double_without_map_check); |
| 585 __ StoreNumberToDoubleElements(value, key, elements, scratch, d0, | 581 __ StoreNumberToDoubleElements(value, key, elements, scratch, d0, |
| 586 &transition_double_elements); | 582 &transition_double_elements); |
| 587 if (increment_length == kIncrementLength) { | 583 if (increment_length == kIncrementLength) { |
| 588 // Add 1 to receiver->length. | 584 // Add 1 to receiver->length. |
| 589 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), scratch2); | 585 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); |
| 590 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), | 586 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); |
| 591 scratch2); | |
| 592 } | 587 } |
| 593 __ Ret(); | 588 __ Ret(); |
| 594 | 589 |
| 595 __ bind(&transition_smi_elements); | 590 __ bind(&transition_smi_elements); |
| 596 // Transition the array appropriately depending on the value type. | 591 // Transition the array appropriately depending on the value type. |
| 597 __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset)); | 592 __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 598 __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex); | 593 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); |
| 599 __ bne(&non_double_value); | 594 __ bne(&non_double_value); |
| 600 | 595 |
| 601 // Value is a double. Transition FAST_SMI_ELEMENTS -> | 596 // Value is a double. Transition FAST_SMI_ELEMENTS -> |
| 602 // FAST_DOUBLE_ELEMENTS and complete the store. | 597 // FAST_DOUBLE_ELEMENTS and complete the store. |
| 603 __ LoadTransitionedArrayMapConditional( | 598 __ LoadTransitionedArrayMapConditional( |
| 604 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow); | 599 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow); |
| 605 AllocationSiteMode mode = | 600 AllocationSiteMode mode = |
| 606 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); | 601 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); |
| 607 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, | 602 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, |
| 608 receiver_map, mode, slow); | 603 receiver_map, mode, slow); |
| (...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 911 patcher.EmitCondition(ne); | 906 patcher.EmitCondition(ne); |
| 912 } else { | 907 } else { |
| 913 DCHECK(Assembler::GetCondition(branch_instr) == ne); | 908 DCHECK(Assembler::GetCondition(branch_instr) == ne); |
| 914 patcher.EmitCondition(eq); | 909 patcher.EmitCondition(eq); |
| 915 } | 910 } |
| 916 } | 911 } |
| 917 } // namespace internal | 912 } // namespace internal |
| 918 } // namespace v8 | 913 } // namespace v8 |
| 919 | 914 |
| 920 #endif // V8_TARGET_ARCH_PPC | 915 #endif // V8_TARGET_ARCH_PPC |
| OLD | NEW |