OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_MIPS64 | 5 #if V8_TARGET_ARCH_MIPS64 |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/ic/ic.h" | 8 #include "src/ic/ic.h" |
9 #include "src/ic/ic-compiler.h" | 9 #include "src/ic/ic-compiler.h" |
10 #include "src/ic/stub-cache.h" | 10 #include "src/ic/stub-cache.h" |
(...skipping 456 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
467 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, | 467 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, |
468 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, | 468 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, |
469 Register value, Register key, Register receiver, Register receiver_map, | 469 Register value, Register key, Register receiver, Register receiver_map, |
470 Register elements_map, Register elements) { | 470 Register elements_map, Register elements) { |
471 Label transition_smi_elements; | 471 Label transition_smi_elements; |
472 Label finish_object_store, non_double_value, transition_double_elements; | 472 Label finish_object_store, non_double_value, transition_double_elements; |
473 Label fast_double_without_map_check; | 473 Label fast_double_without_map_check; |
474 | 474 |
475 // Fast case: Do the store, could be either Object or double. | 475 // Fast case: Do the store, could be either Object or double. |
476 __ bind(fast_object); | 476 __ bind(fast_object); |
477 Register scratch_value = a4; | 477 Register scratch = a4; |
| 478 Register scratch2 = t0; |
478 Register address = a5; | 479 Register address = a5; |
| 480 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, |
| 481 scratch, scratch2, address)); |
| 482 |
479 if (check_map == kCheckMap) { | 483 if (check_map == kCheckMap) { |
480 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 484 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
481 __ Branch(fast_double, ne, elements_map, | 485 __ Branch(fast_double, ne, elements_map, |
482 Operand(masm->isolate()->factory()->fixed_array_map())); | 486 Operand(masm->isolate()->factory()->fixed_array_map())); |
483 } | 487 } |
484 | 488 |
485 // HOLECHECK: guards "A[i] = V" | 489 // HOLECHECK: guards "A[i] = V" |
486 // We have to go to the runtime if the current value is the hole because | 490 // We have to go to the runtime if the current value is the hole because |
487 // there may be a callback on the element. | 491 // there may be a callback on the element. |
488 Label holecheck_passed1; | 492 Label holecheck_passed1; |
489 __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); | 493 __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
490 __ SmiScale(at, key, kPointerSizeLog2); | 494 __ SmiScale(at, key, kPointerSizeLog2); |
491 __ daddu(address, address, at); | 495 __ daddu(address, address, at); |
492 __ ld(scratch_value, MemOperand(address)); | 496 __ ld(scratch, MemOperand(address)); |
493 | 497 |
494 __ Branch(&holecheck_passed1, ne, scratch_value, | 498 __ Branch(&holecheck_passed1, ne, scratch, |
495 Operand(masm->isolate()->factory()->the_hole_value())); | 499 Operand(masm->isolate()->factory()->the_hole_value())); |
496 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | 500 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); |
497 slow); | |
498 | 501 |
499 __ bind(&holecheck_passed1); | 502 __ bind(&holecheck_passed1); |
500 | 503 |
501 // Smi stores don't require further checks. | 504 // Smi stores don't require further checks. |
502 Label non_smi_value; | 505 Label non_smi_value; |
503 __ JumpIfNotSmi(value, &non_smi_value); | 506 __ JumpIfNotSmi(value, &non_smi_value); |
504 | 507 |
505 if (increment_length == kIncrementLength) { | 508 if (increment_length == kIncrementLength) { |
506 // Add 1 to receiver->length. | 509 // Add 1 to receiver->length. |
507 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); | 510 __ Daddu(scratch, key, Operand(Smi::FromInt(1))); |
508 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 511 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
509 } | 512 } |
510 // It's irrelevant whether array is smi-only or not when writing a smi. | 513 // It's irrelevant whether array is smi-only or not when writing a smi. |
511 __ Daddu(address, elements, | 514 __ Daddu(address, elements, |
512 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 515 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
513 __ SmiScale(scratch_value, key, kPointerSizeLog2); | 516 __ SmiScale(scratch, key, kPointerSizeLog2); |
514 __ Daddu(address, address, scratch_value); | 517 __ Daddu(address, address, scratch); |
515 __ sd(value, MemOperand(address)); | 518 __ sd(value, MemOperand(address)); |
516 __ Ret(); | 519 __ Ret(); |
517 | 520 |
518 __ bind(&non_smi_value); | 521 __ bind(&non_smi_value); |
519 // Escape to elements kind transition case. | 522 // Escape to elements kind transition case. |
520 __ CheckFastObjectElements(receiver_map, scratch_value, | 523 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); |
521 &transition_smi_elements); | |
522 | 524 |
523 // Fast elements array, store the value to the elements backing store. | 525 // Fast elements array, store the value to the elements backing store. |
524 __ bind(&finish_object_store); | 526 __ bind(&finish_object_store); |
525 if (increment_length == kIncrementLength) { | 527 if (increment_length == kIncrementLength) { |
526 // Add 1 to receiver->length. | 528 // Add 1 to receiver->length. |
527 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); | 529 __ Daddu(scratch, key, Operand(Smi::FromInt(1))); |
528 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 530 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
529 } | 531 } |
530 __ Daddu(address, elements, | 532 __ Daddu(address, elements, |
531 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 533 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
532 __ SmiScale(scratch_value, key, kPointerSizeLog2); | 534 __ SmiScale(scratch, key, kPointerSizeLog2); |
533 __ Daddu(address, address, scratch_value); | 535 __ Daddu(address, address, scratch); |
534 __ sd(value, MemOperand(address)); | 536 __ sd(value, MemOperand(address)); |
535 // Update write barrier for the elements array address. | 537 // Update write barrier for the elements array address. |
536 __ mov(scratch_value, value); // Preserve the value which is returned. | 538 __ mov(scratch, value); // Preserve the value which is returned. |
537 __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved, | 539 __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved, |
538 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 540 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
539 __ Ret(); | 541 __ Ret(); |
540 | 542 |
541 __ bind(fast_double); | 543 __ bind(fast_double); |
542 if (check_map == kCheckMap) { | 544 if (check_map == kCheckMap) { |
543 // Check for fast double array case. If this fails, call through to the | 545 // Check for fast double array case. If this fails, call through to the |
544 // runtime. | 546 // runtime. |
545 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); | 547 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); |
546 __ Branch(slow, ne, elements_map, Operand(at)); | 548 __ Branch(slow, ne, elements_map, Operand(at)); |
547 } | 549 } |
548 | 550 |
549 // HOLECHECK: guards "A[i] double hole?" | 551 // HOLECHECK: guards "A[i] double hole?" |
550 // We have to see if the double version of the hole is present. If so | 552 // We have to see if the double version of the hole is present. If so |
551 // go to the runtime. | 553 // go to the runtime. |
552 __ Daddu(address, elements, | 554 __ Daddu(address, elements, |
553 Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset - | 555 Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset - |
554 kHeapObjectTag)); | 556 kHeapObjectTag)); |
555 __ SmiScale(at, key, kPointerSizeLog2); | 557 __ SmiScale(at, key, kPointerSizeLog2); |
556 __ daddu(address, address, at); | 558 __ daddu(address, address, at); |
557 __ lw(scratch_value, MemOperand(address)); | 559 __ lw(scratch, MemOperand(address)); |
558 __ Branch(&fast_double_without_map_check, ne, scratch_value, | 560 __ Branch(&fast_double_without_map_check, ne, scratch, |
559 Operand(static_cast<int32_t>(kHoleNanUpper32))); | 561 Operand(static_cast<int32_t>(kHoleNanUpper32))); |
560 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | 562 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); |
561 slow); | |
562 | 563 |
563 __ bind(&fast_double_without_map_check); | 564 __ bind(&fast_double_without_map_check); |
564 __ StoreNumberToDoubleElements(value, key, | 565 __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2, |
565 elements, // Overwritten. | 566 &transition_double_elements); |
566 a3, // Scratch regs... | |
567 a4, &transition_double_elements); | |
568 if (increment_length == kIncrementLength) { | 567 if (increment_length == kIncrementLength) { |
569 // Add 1 to receiver->length. | 568 // Add 1 to receiver->length. |
570 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); | 569 __ Daddu(scratch, key, Operand(Smi::FromInt(1))); |
571 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 570 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
572 } | 571 } |
573 __ Ret(); | 572 __ Ret(); |
574 | 573 |
575 __ bind(&transition_smi_elements); | 574 __ bind(&transition_smi_elements); |
576 // Transition the array appropriately depending on the value type. | 575 // Transition the array appropriately depending on the value type. |
577 __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset)); | 576 __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); |
578 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 577 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
579 __ Branch(&non_double_value, ne, a4, Operand(at)); | 578 __ Branch(&non_double_value, ne, scratch, Operand(at)); |
580 | 579 |
581 // Value is a double. Transition FAST_SMI_ELEMENTS -> | 580 // Value is a double. Transition FAST_SMI_ELEMENTS -> |
582 // FAST_DOUBLE_ELEMENTS and complete the store. | 581 // FAST_DOUBLE_ELEMENTS and complete the store. |
583 __ LoadTransitionedArrayMapConditional( | 582 __ LoadTransitionedArrayMapConditional( |
584 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, a4, slow); | 583 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow); |
585 AllocationSiteMode mode = | 584 AllocationSiteMode mode = |
586 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); | 585 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); |
587 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, | 586 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, |
588 receiver_map, mode, slow); | 587 receiver_map, mode, slow); |
589 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 588 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
590 __ jmp(&fast_double_without_map_check); | 589 __ jmp(&fast_double_without_map_check); |
591 | 590 |
592 __ bind(&non_double_value); | 591 __ bind(&non_double_value); |
593 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS | 592 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS |
594 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, | 593 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, |
595 receiver_map, a4, slow); | 594 receiver_map, scratch, slow); |
596 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | 595 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); |
597 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | 596 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
598 masm, receiver, key, value, receiver_map, mode, slow); | 597 masm, receiver, key, value, receiver_map, mode, slow); |
599 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 598 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
600 __ jmp(&finish_object_store); | 599 __ jmp(&finish_object_store); |
601 | 600 |
602 __ bind(&transition_double_elements); | 601 __ bind(&transition_double_elements); |
603 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | 602 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a |
604 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | 603 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and |
605 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | 604 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS |
606 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, | 605 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, |
607 receiver_map, a4, slow); | 606 receiver_map, scratch, slow); |
608 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | 607 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); |
609 ElementsTransitionGenerator::GenerateDoubleToObject( | 608 ElementsTransitionGenerator::GenerateDoubleToObject( |
610 masm, receiver, key, value, receiver_map, mode, slow); | 609 masm, receiver, key, value, receiver_map, mode, slow); |
611 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 610 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
612 __ jmp(&finish_object_store); | 611 __ jmp(&finish_object_store); |
613 } | 612 } |
614 | 613 |
615 | 614 |
616 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, | 615 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, |
617 LanguageMode language_mode) { | 616 LanguageMode language_mode) { |
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
894 patcher.ChangeBranchCondition(ne); | 893 patcher.ChangeBranchCondition(ne); |
895 } else { | 894 } else { |
896 DCHECK(Assembler::IsBne(branch_instr)); | 895 DCHECK(Assembler::IsBne(branch_instr)); |
897 patcher.ChangeBranchCondition(eq); | 896 patcher.ChangeBranchCondition(eq); |
898 } | 897 } |
899 } | 898 } |
900 } // namespace internal | 899 } // namespace internal |
901 } // namespace v8 | 900 } // namespace v8 |
902 | 901 |
903 #endif // V8_TARGET_ARCH_MIPS64 | 902 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |