OLD | NEW |
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
83 Register addr, | 83 Register addr, |
84 Register scratch) { | 84 Register scratch) { |
85 if (FLAG_debug_code) { | 85 if (FLAG_debug_code) { |
86 // Check that the object is not in new space. | 86 // Check that the object is not in new space. |
87 Label not_in_new_space; | 87 Label not_in_new_space; |
88 InNewSpace(object, scratch, not_equal, ¬_in_new_space); | 88 InNewSpace(object, scratch, not_equal, ¬_in_new_space); |
89 Abort("new-space object passed to RecordWriteHelper"); | 89 Abort("new-space object passed to RecordWriteHelper"); |
90 bind(¬_in_new_space); | 90 bind(¬_in_new_space); |
91 } | 91 } |
92 | 92 |
93 Label fast; | |
94 | |
95 // Compute the page start address from the heap object pointer, and reuse | 93 // Compute the page start address from the heap object pointer, and reuse |
96 // the 'object' register for it. | 94 // the 'object' register for it. |
97 ASSERT(is_int32(~Page::kPageAlignmentMask)); | 95 and_(object, Immediate(~Page::kPageAlignmentMask)); |
98 and_(object, | |
99 Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask))); | |
100 Register page_start = object; | |
101 | 96 |
102 // Compute the bit addr in the remembered set/index of the pointer in the | 97 // Compute number of region covering addr. See Page::GetRegionNumberForAddress |
103 // page. Reuse 'addr' as pointer_offset. | 98 // method for more details. |
104 subq(addr, page_start); | 99 and_(addr, Immediate(Page::kPageAlignmentMask)); |
105 shr(addr, Immediate(kPointerSizeLog2)); | 100 shrl(addr, Immediate(Page::kRegionSizeLog2)); |
106 Register pointer_offset = addr; | |
107 | 101 |
108 // If the bit offset lies beyond the normal remembered set range, it is in | 102 // Set dirty mark for region. |
109 // the extra remembered set area of a large object. | 103 bts(Operand(object, Page::kDirtyFlagOffset), addr); |
110 cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize)); | |
111 j(below, &fast); | |
112 | |
113 // We have a large object containing pointers. It must be a FixedArray. | |
114 | |
115 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the | |
116 // extra remembered set after the large object. | |
117 | |
118 // Load the array length into 'scratch'. | |
119 movl(scratch, | |
120 Operand(page_start, | |
121 Page::kObjectStartOffset + FixedArray::kLengthOffset)); | |
122 Register array_length = scratch; | |
123 | |
124 // Extra remembered set starts right after the large object (a FixedArray), at | |
125 // page_start + kObjectStartOffset + objectSize | |
126 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. | |
127 // Add the delta between the end of the normal RSet and the start of the | |
128 // extra RSet to 'page_start', so that addressing the bit using | |
129 // 'pointer_offset' hits the extra RSet words. | |
130 lea(page_start, | |
131 Operand(page_start, array_length, times_pointer_size, | |
132 Page::kObjectStartOffset + FixedArray::kHeaderSize | |
133 - Page::kRSetEndOffset)); | |
134 | |
135 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | |
136 // to limit code size. We should probably evaluate this decision by | |
137 // measuring the performance of an equivalent implementation using | |
138 // "simpler" instructions | |
139 bind(&fast); | |
140 bts(Operand(page_start, Page::kRSetOffset), pointer_offset); | |
141 } | 104 } |
142 | 105 |
143 | 106 |
144 // Set the remembered set bit for [object+offset]. | 107 // For page containing |object| mark region covering [object+offset] dirty. |
145 // object is the object being stored into, value is the object being stored. | 108 // object is the object being stored into, value is the object being stored. |
146 // If offset is zero, then the smi_index register contains the array index into | 109 // If offset is zero, then the smi_index register contains the array index into |
147 // the elements array represented as a smi. Otherwise it can be used as a | 110 // the elements array represented as a smi. Otherwise it can be used as a |
148 // scratch register. | 111 // scratch register. |
149 // All registers are clobbered by the operation. | 112 // All registers are clobbered by the operation. |
150 void MacroAssembler::RecordWrite(Register object, | 113 void MacroAssembler::RecordWrite(Register object, |
151 int offset, | 114 int offset, |
152 Register value, | 115 Register value, |
153 Register smi_index) { | 116 Register smi_index) { |
154 // The compiled code assumes that record write doesn't change the | 117 // The compiled code assumes that record write doesn't change the |
155 // context register, so we check that none of the clobbered | 118 // context register, so we check that none of the clobbered |
156 // registers are rsi. | 119 // registers are rsi. |
157 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); | 120 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); |
158 | 121 |
159 // First, check if a remembered set write is even needed. The tests below | 122 // First, check if a write barrier is even needed. The tests below |
160 // catch stores of Smis and stores into young gen (which does not have space | 123 // catch stores of Smis and stores into young gen. |
161 // for the remembered set bits). | |
162 Label done; | 124 Label done; |
163 JumpIfSmi(value, &done); | 125 JumpIfSmi(value, &done); |
164 | 126 |
165 RecordWriteNonSmi(object, offset, value, smi_index); | 127 RecordWriteNonSmi(object, offset, value, smi_index); |
166 bind(&done); | 128 bind(&done); |
167 | 129 |
168 // Clobber all input registers when running with the debug-code flag | 130 // Clobber all input registers when running with the debug-code flag |
169 // turned on to provoke errors. This clobbering repeats the | 131 // turned on to provoke errors. This clobbering repeats the |
170 // clobbering done inside RecordWriteNonSmi but it's necessary to | 132 // clobbering done inside RecordWriteNonSmi but it's necessary to |
171 // avoid having the fast case for smis leave the registers | 133 // avoid having the fast case for smis leave the registers |
(...skipping 12 matching lines...) Expand all Loading... |
184 Register smi_index) { | 146 Register smi_index) { |
185 Label done; | 147 Label done; |
186 | 148 |
187 if (FLAG_debug_code) { | 149 if (FLAG_debug_code) { |
188 Label okay; | 150 Label okay; |
189 JumpIfNotSmi(object, &okay); | 151 JumpIfNotSmi(object, &okay); |
190 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); | 152 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); |
191 bind(&okay); | 153 bind(&okay); |
192 } | 154 } |
193 | 155 |
194 // Test that the object address is not in the new space. We cannot | 156 // Test that the object address is not in the new space. We cannot |
195 // set remembered set bits in the new space. | 157 // update page dirty marks for new space pages. |
196 InNewSpace(object, scratch, equal, &done); | 158 InNewSpace(object, scratch, equal, &done); |
197 | 159 |
198 // The offset is relative to a tagged or untagged HeapObject pointer, | 160 // The offset is relative to a tagged or untagged HeapObject pointer, |
199 // so either offset or offset + kHeapObjectTag must be a | 161 // so either offset or offset + kHeapObjectTag must be a |
200 // multiple of kPointerSize. | 162 // multiple of kPointerSize. |
201 ASSERT(IsAligned(offset, kPointerSize) || | 163 ASSERT(IsAligned(offset, kPointerSize) || |
202 IsAligned(offset + kHeapObjectTag, kPointerSize)); | 164 IsAligned(offset + kHeapObjectTag, kPointerSize)); |
203 | 165 |
204 // We use optimized write barrier code if the word being written to is not in | 166 Register dst = smi_index; |
205 // a large object page, or is in the first "page" of a large object page. | 167 if (offset != 0) { |
206 // We make sure that an offset is inside the right limits whether it is | 168 lea(dst, Operand(object, offset)); |
207 // tagged or untagged. | |
208 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) { | |
209 // Compute the bit offset in the remembered set, leave it in 'scratch'. | |
210 lea(scratch, Operand(object, offset)); | |
211 ASSERT(is_int32(Page::kPageAlignmentMask)); | |
212 and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask))); | |
213 shr(scratch, Immediate(kPointerSizeLog2)); | |
214 | |
215 // Compute the page address from the heap object pointer, leave it in | |
216 // 'object' (immediate value is sign extended). | |
217 and_(object, Immediate(~Page::kPageAlignmentMask)); | |
218 | |
219 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | |
220 // to limit code size. We should probably evaluate this decision by | |
221 // measuring the performance of an equivalent implementation using | |
222 // "simpler" instructions | |
223 bts(Operand(object, Page::kRSetOffset), scratch); | |
224 } else { | 169 } else { |
225 Register dst = smi_index; | 170 // array access: calculate the destination address in the same manner as |
226 if (offset != 0) { | 171 // KeyedStoreIC::GenerateGeneric. |
227 lea(dst, Operand(object, offset)); | 172 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); |
228 } else { | 173 lea(dst, FieldOperand(object, |
229 // array access: calculate the destination address in the same manner as | 174 index.reg, |
230 // KeyedStoreIC::GenerateGeneric. | 175 index.scale, |
231 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); | 176 FixedArray::kHeaderSize)); |
232 lea(dst, FieldOperand(object, | |
233 index.reg, | |
234 index.scale, | |
235 FixedArray::kHeaderSize)); | |
236 } | |
237 // If we are already generating a shared stub, not inlining the | |
238 // record write code isn't going to save us any memory. | |
239 if (generating_stub()) { | |
240 RecordWriteHelper(object, dst, scratch); | |
241 } else { | |
242 RecordWriteStub stub(object, dst, scratch); | |
243 CallStub(&stub); | |
244 } | |
245 } | 177 } |
| 178 RecordWriteHelper(object, dst, scratch); |
246 | 179 |
247 bind(&done); | 180 bind(&done); |
248 | 181 |
249 // Clobber all input registers when running with the debug-code flag | 182 // Clobber all input registers when running with the debug-code flag |
250 // turned on to provoke errors. | 183 // turned on to provoke errors. |
251 if (FLAG_debug_code) { | 184 if (FLAG_debug_code) { |
252 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 185 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
253 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 186 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
254 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 187 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
255 } | 188 } |
(...skipping 381 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
637 movq(dst, src); | 570 movq(dst, src); |
638 } | 571 } |
639 if (power < kSmiShift) { | 572 if (power < kSmiShift) { |
640 sar(dst, Immediate(kSmiShift - power)); | 573 sar(dst, Immediate(kSmiShift - power)); |
641 } else if (power > kSmiShift) { | 574 } else if (power > kSmiShift) { |
642 shl(dst, Immediate(power - kSmiShift)); | 575 shl(dst, Immediate(power - kSmiShift)); |
643 } | 576 } |
644 } | 577 } |
645 | 578 |
646 | 579 |
| 580 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, |
| 581 Register src, |
| 582 int power) { |
| 583 ASSERT((0 <= power) && (power < 32)); |
| 584 if (dst.is(src)) { |
| 585 shr(dst, Immediate(power + kSmiShift)); |
| 586 } else { |
| 587 UNIMPLEMENTED(); // Not used. |
| 588 } |
| 589 } |
| 590 |
| 591 |
647 Condition MacroAssembler::CheckSmi(Register src) { | 592 Condition MacroAssembler::CheckSmi(Register src) { |
648 ASSERT_EQ(0, kSmiTag); | 593 ASSERT_EQ(0, kSmiTag); |
649 testb(src, Immediate(kSmiTagMask)); | 594 testb(src, Immediate(kSmiTagMask)); |
650 return zero; | 595 return zero; |
651 } | 596 } |
652 | 597 |
653 | 598 |
654 Condition MacroAssembler::CheckPositiveSmi(Register src) { | 599 Condition MacroAssembler::CheckPositiveSmi(Register src) { |
655 ASSERT_EQ(0, kSmiTag); | 600 ASSERT_EQ(0, kSmiTag); |
656 movq(kScratchRegister, src); | 601 movq(kScratchRegister, src); |
(...skipping 1943 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2600 scratch2, | 2545 scratch2, |
2601 scratch3, | 2546 scratch3, |
2602 gc_required, | 2547 gc_required, |
2603 TAG_OBJECT); | 2548 TAG_OBJECT); |
2604 | 2549 |
2605 // Set the map, length and hash field. | 2550 // Set the map, length and hash field. |
2606 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); | 2551 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); |
2607 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); | 2552 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); |
2608 Integer32ToSmi(scratch1, length); | 2553 Integer32ToSmi(scratch1, length); |
2609 movq(FieldOperand(result, String::kLengthOffset), scratch1); | 2554 movq(FieldOperand(result, String::kLengthOffset), scratch1); |
2610 movl(FieldOperand(result, String::kHashFieldOffset), | 2555 movq(FieldOperand(result, String::kHashFieldOffset), |
2611 Immediate(String::kEmptyHashField)); | 2556 Immediate(String::kEmptyHashField)); |
2612 } | 2557 } |
2613 | 2558 |
2614 | 2559 |
2615 void MacroAssembler::AllocateAsciiString(Register result, | 2560 void MacroAssembler::AllocateAsciiString(Register result, |
2616 Register length, | 2561 Register length, |
2617 Register scratch1, | 2562 Register scratch1, |
2618 Register scratch2, | 2563 Register scratch2, |
2619 Register scratch3, | 2564 Register scratch3, |
2620 Label* gc_required) { | 2565 Label* gc_required) { |
(...skipping 17 matching lines...) Expand all Loading... |
2638 scratch2, | 2583 scratch2, |
2639 scratch3, | 2584 scratch3, |
2640 gc_required, | 2585 gc_required, |
2641 TAG_OBJECT); | 2586 TAG_OBJECT); |
2642 | 2587 |
2643 // Set the map, length and hash field. | 2588 // Set the map, length and hash field. |
2644 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); | 2589 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); |
2645 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); | 2590 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); |
2646 Integer32ToSmi(scratch1, length); | 2591 Integer32ToSmi(scratch1, length); |
2647 movq(FieldOperand(result, String::kLengthOffset), scratch1); | 2592 movq(FieldOperand(result, String::kLengthOffset), scratch1); |
2648 movl(FieldOperand(result, String::kHashFieldOffset), | 2593 movq(FieldOperand(result, String::kHashFieldOffset), |
2649 Immediate(String::kEmptyHashField)); | 2594 Immediate(String::kEmptyHashField)); |
2650 } | 2595 } |
2651 | 2596 |
2652 | 2597 |
2653 void MacroAssembler::AllocateConsString(Register result, | 2598 void MacroAssembler::AllocateConsString(Register result, |
2654 Register scratch1, | 2599 Register scratch1, |
2655 Register scratch2, | 2600 Register scratch2, |
2656 Label* gc_required) { | 2601 Label* gc_required) { |
2657 // Allocate heap number in new space. | 2602 // Allocate heap number in new space. |
2658 AllocateInNewSpace(ConsString::kSize, | 2603 AllocateInNewSpace(ConsString::kSize, |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2769 CPU::FlushICache(address_, size_); | 2714 CPU::FlushICache(address_, size_); |
2770 | 2715 |
2771 // Check that the code was patched as expected. | 2716 // Check that the code was patched as expected. |
2772 ASSERT(masm_.pc_ == address_ + size_); | 2717 ASSERT(masm_.pc_ == address_ + size_); |
2773 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2718 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
2774 } | 2719 } |
2775 | 2720 |
2776 } } // namespace v8::internal | 2721 } } // namespace v8::internal |
2777 | 2722 |
2778 #endif // V8_TARGET_ARCH_X64 | 2723 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |