OLD | NEW |
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
83 Register addr, | 83 Register addr, |
84 Register scratch) { | 84 Register scratch) { |
85 if (FLAG_debug_code) { | 85 if (FLAG_debug_code) { |
86 // Check that the object is not in new space. | 86 // Check that the object is not in new space. |
87 Label not_in_new_space; | 87 Label not_in_new_space; |
88 InNewSpace(object, scratch, not_equal, ¬_in_new_space); | 88 InNewSpace(object, scratch, not_equal, ¬_in_new_space); |
89 Abort("new-space object passed to RecordWriteHelper"); | 89 Abort("new-space object passed to RecordWriteHelper"); |
90 bind(¬_in_new_space); | 90 bind(¬_in_new_space); |
91 } | 91 } |
92 | 92 |
| 93 Label fast; |
| 94 |
93 // Compute the page start address from the heap object pointer, and reuse | 95 // Compute the page start address from the heap object pointer, and reuse |
94 // the 'object' register for it. | 96 // the 'object' register for it. |
95 and_(object, Immediate(~Page::kPageAlignmentMask)); | 97 ASSERT(is_int32(~Page::kPageAlignmentMask)); |
| 98 and_(object, |
| 99 Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask))); |
| 100 Register page_start = object; |
96 | 101 |
97 // Compute number of region covering addr. See Page::GetRegionNumberForAddress | 102 // Compute the bit addr in the remembered set/index of the pointer in the |
98 // method for more details. | 103 // page. Reuse 'addr' as pointer_offset. |
99 and_(addr, Immediate(Page::kPageAlignmentMask)); | 104 subq(addr, page_start); |
100 shrl(addr, Immediate(Page::kRegionSizeLog2)); | 105 shr(addr, Immediate(kPointerSizeLog2)); |
| 106 Register pointer_offset = addr; |
101 | 107 |
102 // Set dirty mark for region. | 108 // If the bit offset lies beyond the normal remembered set range, it is in |
103 bts(Operand(object, Page::kDirtyFlagOffset), addr); | 109 // the extra remembered set area of a large object. |
| 110 cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize)); |
| 111 j(below, &fast); |
| 112 |
| 113 // We have a large object containing pointers. It must be a FixedArray. |
| 114 |
| 115 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the |
| 116 // extra remembered set after the large object. |
| 117 |
| 118 // Load the array length into 'scratch'. |
| 119 movl(scratch, |
| 120 Operand(page_start, |
| 121 Page::kObjectStartOffset + FixedArray::kLengthOffset)); |
| 122 Register array_length = scratch; |
| 123 |
| 124 // Extra remembered set starts right after the large object (a FixedArray), at |
| 125 // page_start + kObjectStartOffset + objectSize |
| 126 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. |
| 127 // Add the delta between the end of the normal RSet and the start of the |
| 128 // extra RSet to 'page_start', so that addressing the bit using |
| 129 // 'pointer_offset' hits the extra RSet words. |
| 130 lea(page_start, |
| 131 Operand(page_start, array_length, times_pointer_size, |
| 132 Page::kObjectStartOffset + FixedArray::kHeaderSize |
| 133 - Page::kRSetEndOffset)); |
| 134 |
| 135 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction |
| 136 // to limit code size. We should probably evaluate this decision by |
| 137 // measuring the performance of an equivalent implementation using |
| 138 // "simpler" instructions |
| 139 bind(&fast); |
| 140 bts(Operand(page_start, Page::kRSetOffset), pointer_offset); |
104 } | 141 } |
105 | 142 |
106 | 143 |
107 // For page containing |object| mark region covering [object+offset] dirty. | 144 // Set the remembered set bit for [object+offset]. |
108 // object is the object being stored into, value is the object being stored. | 145 // object is the object being stored into, value is the object being stored. |
109 // If offset is zero, then the smi_index register contains the array index into | 146 // If offset is zero, then the smi_index register contains the array index into |
110 // the elements array represented as a smi. Otherwise it can be used as a | 147 // the elements array represented as a smi. Otherwise it can be used as a |
111 // scratch register. | 148 // scratch register. |
112 // All registers are clobbered by the operation. | 149 // All registers are clobbered by the operation. |
113 void MacroAssembler::RecordWrite(Register object, | 150 void MacroAssembler::RecordWrite(Register object, |
114 int offset, | 151 int offset, |
115 Register value, | 152 Register value, |
116 Register smi_index) { | 153 Register smi_index) { |
117 // The compiled code assumes that record write doesn't change the | 154 // The compiled code assumes that record write doesn't change the |
118 // context register, so we check that none of the clobbered | 155 // context register, so we check that none of the clobbered |
119 // registers are rsi. | 156 // registers are rsi. |
120 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); | 157 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); |
121 | 158 |
122 // First, check if a write barrier is even needed. The tests below | 159 // First, check if a remembered set write is even needed. The tests below |
123 // catch stores of Smis and stores into young gen. | 160 // catch stores of Smis and stores into young gen (which does not have space |
| 161 // for the remembered set bits). |
124 Label done; | 162 Label done; |
125 JumpIfSmi(value, &done); | 163 JumpIfSmi(value, &done); |
126 | 164 |
127 RecordWriteNonSmi(object, offset, value, smi_index); | 165 RecordWriteNonSmi(object, offset, value, smi_index); |
128 bind(&done); | 166 bind(&done); |
129 | 167 |
130 // Clobber all input registers when running with the debug-code flag | 168 // Clobber all input registers when running with the debug-code flag |
131 // turned on to provoke errors. This clobbering repeats the | 169 // turned on to provoke errors. This clobbering repeats the |
132 // clobbering done inside RecordWriteNonSmi but it's necessary to | 170 // clobbering done inside RecordWriteNonSmi but it's necessary to |
133 // avoid having the fast case for smis leave the registers | 171 // avoid having the fast case for smis leave the registers |
(...skipping 12 matching lines...) Expand all Loading... |
146 Register smi_index) { | 184 Register smi_index) { |
147 Label done; | 185 Label done; |
148 | 186 |
149 if (FLAG_debug_code) { | 187 if (FLAG_debug_code) { |
150 Label okay; | 188 Label okay; |
151 JumpIfNotSmi(object, &okay); | 189 JumpIfNotSmi(object, &okay); |
152 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); | 190 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); |
153 bind(&okay); | 191 bind(&okay); |
154 } | 192 } |
155 | 193 |
156 // Test that the object address is not in the new space. We cannot | 194 // Test that the object address is not in the new space. We cannot |
157 // update page dirty marks for new space pages. | 195 // set remembered set bits in the new space. |
158 InNewSpace(object, scratch, equal, &done); | 196 InNewSpace(object, scratch, equal, &done); |
159 | 197 |
160 // The offset is relative to a tagged or untagged HeapObject pointer, | 198 // The offset is relative to a tagged or untagged HeapObject pointer, |
161 // so either offset or offset + kHeapObjectTag must be a | 199 // so either offset or offset + kHeapObjectTag must be a |
162 // multiple of kPointerSize. | 200 // multiple of kPointerSize. |
163 ASSERT(IsAligned(offset, kPointerSize) || | 201 ASSERT(IsAligned(offset, kPointerSize) || |
164 IsAligned(offset + kHeapObjectTag, kPointerSize)); | 202 IsAligned(offset + kHeapObjectTag, kPointerSize)); |
165 | 203 |
166 Register dst = smi_index; | 204 // We use optimized write barrier code if the word being written to is not in |
167 if (offset != 0) { | 205 // a large object page, or is in the first "page" of a large object page. |
168 lea(dst, Operand(object, offset)); | 206 // We make sure that an offset is inside the right limits whether it is |
| 207 // tagged or untagged. |
| 208 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) { |
| 209 // Compute the bit offset in the remembered set, leave it in 'scratch'. |
| 210 lea(scratch, Operand(object, offset)); |
| 211 ASSERT(is_int32(Page::kPageAlignmentMask)); |
| 212 and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask))); |
| 213 shr(scratch, Immediate(kPointerSizeLog2)); |
| 214 |
| 215 // Compute the page address from the heap object pointer, leave it in |
| 216 // 'object' (immediate value is sign extended). |
| 217 and_(object, Immediate(~Page::kPageAlignmentMask)); |
| 218 |
| 219 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction |
| 220 // to limit code size. We should probably evaluate this decision by |
| 221 // measuring the performance of an equivalent implementation using |
| 222 // "simpler" instructions |
| 223 bts(Operand(object, Page::kRSetOffset), scratch); |
169 } else { | 224 } else { |
170 // array access: calculate the destination address in the same manner as | 225 Register dst = smi_index; |
171 // KeyedStoreIC::GenerateGeneric. | 226 if (offset != 0) { |
172 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); | 227 lea(dst, Operand(object, offset)); |
173 lea(dst, FieldOperand(object, | 228 } else { |
174 index.reg, | 229 // array access: calculate the destination address in the same manner as |
175 index.scale, | 230 // KeyedStoreIC::GenerateGeneric. |
176 FixedArray::kHeaderSize)); | 231 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); |
| 232 lea(dst, FieldOperand(object, |
| 233 index.reg, |
| 234 index.scale, |
| 235 FixedArray::kHeaderSize)); |
| 236 } |
| 237 // If we are already generating a shared stub, not inlining the |
| 238 // record write code isn't going to save us any memory. |
| 239 if (generating_stub()) { |
| 240 RecordWriteHelper(object, dst, scratch); |
| 241 } else { |
| 242 RecordWriteStub stub(object, dst, scratch); |
| 243 CallStub(&stub); |
| 244 } |
177 } | 245 } |
178 RecordWriteHelper(object, dst, scratch); | |
179 | 246 |
180 bind(&done); | 247 bind(&done); |
181 | 248 |
182 // Clobber all input registers when running with the debug-code flag | 249 // Clobber all input registers when running with the debug-code flag |
183 // turned on to provoke errors. | 250 // turned on to provoke errors. |
184 if (FLAG_debug_code) { | 251 if (FLAG_debug_code) { |
185 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 252 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
186 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 253 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
187 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 254 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
188 } | 255 } |
(...skipping 381 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
570 movq(dst, src); | 637 movq(dst, src); |
571 } | 638 } |
572 if (power < kSmiShift) { | 639 if (power < kSmiShift) { |
573 sar(dst, Immediate(kSmiShift - power)); | 640 sar(dst, Immediate(kSmiShift - power)); |
574 } else if (power > kSmiShift) { | 641 } else if (power > kSmiShift) { |
575 shl(dst, Immediate(power - kSmiShift)); | 642 shl(dst, Immediate(power - kSmiShift)); |
576 } | 643 } |
577 } | 644 } |
578 | 645 |
579 | 646 |
580 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, | |
581 Register src, | |
582 int power) { | |
583 ASSERT((0 <= power) && (power < 32)); | |
584 if (dst.is(src)) { | |
585 shr(dst, Immediate(power + kSmiShift)); | |
586 } else { | |
587 UNIMPLEMENTED(); // Not used. | |
588 } | |
589 } | |
590 | |
591 | |
592 Condition MacroAssembler::CheckSmi(Register src) { | 647 Condition MacroAssembler::CheckSmi(Register src) { |
593 ASSERT_EQ(0, kSmiTag); | 648 ASSERT_EQ(0, kSmiTag); |
594 testb(src, Immediate(kSmiTagMask)); | 649 testb(src, Immediate(kSmiTagMask)); |
595 return zero; | 650 return zero; |
596 } | 651 } |
597 | 652 |
598 | 653 |
599 Condition MacroAssembler::CheckPositiveSmi(Register src) { | 654 Condition MacroAssembler::CheckPositiveSmi(Register src) { |
600 ASSERT_EQ(0, kSmiTag); | 655 ASSERT_EQ(0, kSmiTag); |
601 movq(kScratchRegister, src); | 656 movq(kScratchRegister, src); |
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
740 } else { | 795 } else { |
741 movq(dst, src1); | 796 movq(dst, src1); |
742 subq(dst, src2); | 797 subq(dst, src2); |
743 j(overflow, on_not_smi_result); | 798 j(overflow, on_not_smi_result); |
744 } | 799 } |
745 } | 800 } |
746 | 801 |
747 | 802 |
748 void MacroAssembler::SmiSub(Register dst, | 803 void MacroAssembler::SmiSub(Register dst, |
749 Register src1, | 804 Register src1, |
750 const Operand& src2, | 805 Operand const& src2, |
751 Label* on_not_smi_result) { | 806 Label* on_not_smi_result) { |
752 if (on_not_smi_result == NULL) { | 807 if (on_not_smi_result == NULL) { |
753 // No overflow checking. Use only when it's known that | 808 // No overflow checking. Use only when it's known that |
754 // overflowing is impossible (e.g., subtracting two positive smis). | 809 // overflowing is impossible (e.g., subtracting two positive smis). |
755 if (dst.is(src1)) { | 810 if (dst.is(src1)) { |
756 subq(dst, src2); | 811 subq(dst, src2); |
757 } else { | 812 } else { |
758 movq(dst, src1); | 813 movq(dst, src1); |
759 subq(dst, src2); | 814 subq(dst, src2); |
760 } | 815 } |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
858 | 913 |
859 Move(kScratchRegister, constant); | 914 Move(kScratchRegister, constant); |
860 addq(dst, kScratchRegister); | 915 addq(dst, kScratchRegister); |
861 } else { | 916 } else { |
862 Move(dst, constant); | 917 Move(dst, constant); |
863 addq(dst, src); | 918 addq(dst, src); |
864 } | 919 } |
865 } | 920 } |
866 | 921 |
867 | 922 |
868 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { | |
869 ASSERT(!dst.is(kScratchRegister)); | |
870 if (constant->value() != 0) { | |
871 Move(kScratchRegister, constant); | |
872 addq(dst, kScratchRegister); | |
873 } | |
874 } | |
875 | |
876 | |
877 void MacroAssembler::SmiAddConstant(Register dst, | 923 void MacroAssembler::SmiAddConstant(Register dst, |
878 Register src, | 924 Register src, |
879 Smi* constant, | 925 Smi* constant, |
880 Label* on_not_smi_result) { | 926 Label* on_not_smi_result) { |
881 if (constant->value() == 0) { | 927 if (constant->value() == 0) { |
882 if (!dst.is(src)) { | 928 if (!dst.is(src)) { |
883 movq(dst, src); | 929 movq(dst, src); |
884 } | 930 } |
885 } else if (dst.is(src)) { | 931 } else if (dst.is(src)) { |
886 ASSERT(!dst.is(kScratchRegister)); | 932 ASSERT(!dst.is(kScratchRegister)); |
(...skipping 1659 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2546 scratch2, | 2592 scratch2, |
2547 scratch3, | 2593 scratch3, |
2548 gc_required, | 2594 gc_required, |
2549 TAG_OBJECT); | 2595 TAG_OBJECT); |
2550 | 2596 |
2551 // Set the map, length and hash field. | 2597 // Set the map, length and hash field. |
2552 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); | 2598 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); |
2553 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); | 2599 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); |
2554 Integer32ToSmi(scratch1, length); | 2600 Integer32ToSmi(scratch1, length); |
2555 movq(FieldOperand(result, String::kLengthOffset), scratch1); | 2601 movq(FieldOperand(result, String::kLengthOffset), scratch1); |
2556 movq(FieldOperand(result, String::kHashFieldOffset), | 2602 movl(FieldOperand(result, String::kHashFieldOffset), |
2557 Immediate(String::kEmptyHashField)); | 2603 Immediate(String::kEmptyHashField)); |
2558 } | 2604 } |
2559 | 2605 |
2560 | 2606 |
2561 void MacroAssembler::AllocateAsciiString(Register result, | 2607 void MacroAssembler::AllocateAsciiString(Register result, |
2562 Register length, | 2608 Register length, |
2563 Register scratch1, | 2609 Register scratch1, |
2564 Register scratch2, | 2610 Register scratch2, |
2565 Register scratch3, | 2611 Register scratch3, |
2566 Label* gc_required) { | 2612 Label* gc_required) { |
(...skipping 17 matching lines...) Expand all Loading... |
2584 scratch2, | 2630 scratch2, |
2585 scratch3, | 2631 scratch3, |
2586 gc_required, | 2632 gc_required, |
2587 TAG_OBJECT); | 2633 TAG_OBJECT); |
2588 | 2634 |
2589 // Set the map, length and hash field. | 2635 // Set the map, length and hash field. |
2590 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); | 2636 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); |
2591 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); | 2637 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); |
2592 Integer32ToSmi(scratch1, length); | 2638 Integer32ToSmi(scratch1, length); |
2593 movq(FieldOperand(result, String::kLengthOffset), scratch1); | 2639 movq(FieldOperand(result, String::kLengthOffset), scratch1); |
2594 movq(FieldOperand(result, String::kHashFieldOffset), | 2640 movl(FieldOperand(result, String::kHashFieldOffset), |
2595 Immediate(String::kEmptyHashField)); | 2641 Immediate(String::kEmptyHashField)); |
2596 } | 2642 } |
2597 | 2643 |
2598 | 2644 |
2599 void MacroAssembler::AllocateConsString(Register result, | 2645 void MacroAssembler::AllocateConsString(Register result, |
2600 Register scratch1, | 2646 Register scratch1, |
2601 Register scratch2, | 2647 Register scratch2, |
2602 Label* gc_required) { | 2648 Label* gc_required) { |
2603 // Allocate heap number in new space. | 2649 // Allocate heap number in new space. |
2604 AllocateInNewSpace(ConsString::kSize, | 2650 AllocateInNewSpace(ConsString::kSize, |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2715 CPU::FlushICache(address_, size_); | 2761 CPU::FlushICache(address_, size_); |
2716 | 2762 |
2717 // Check that the code was patched as expected. | 2763 // Check that the code was patched as expected. |
2718 ASSERT(masm_.pc_ == address_ + size_); | 2764 ASSERT(masm_.pc_ == address_ + size_); |
2719 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2765 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
2720 } | 2766 } |
2721 | 2767 |
2722 } } // namespace v8::internal | 2768 } } // namespace v8::internal |
2723 | 2769 |
2724 #endif // V8_TARGET_ARCH_X64 | 2770 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |