OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
139 // If offset is zero, then the scratch register contains the array index into | 139 // If offset is zero, then the scratch register contains the array index into |
140 // the elements array represented as a Smi. | 140 // the elements array represented as a Smi. |
141 // All registers are clobbered by the operation. | 141 // All registers are clobbered by the operation. |
142 void MacroAssembler::RecordWrite(Register object, int offset, | 142 void MacroAssembler::RecordWrite(Register object, int offset, |
143 Register value, Register scratch) { | 143 Register value, Register scratch) { |
144 // First, check if a remembered set write is even needed. The tests below | 144 // First, check if a remembered set write is even needed. The tests below |
145 // catch stores of Smis and stores into young gen (which does not have space | 145 // catch stores of Smis and stores into young gen (which does not have space |
146 // for the remembered set bits. | 146 // for the remembered set bits. |
147 Label done; | 147 Label done; |
148 | 148 |
149 // This optimization cannot survive serialization and deserialization, | 149 // Skip barrier if writing a smi. |
150 // so we disable as long as serialization can take place. | 150 ASSERT_EQ(0, kSmiTag); |
151 int32_t new_space_start = | 151 test(value, Immediate(kSmiTagMask)); |
152 reinterpret_cast<int32_t>(ExternalReference::new_space_start().address()); | 152 j(zero, &done); |
153 if (Serializer::enabled() || new_space_start < 0) { | 153 |
154 // Cannot do smart bit-twiddling. Need to do two consecutive checks. | 154 if (Serializer::enabled()) { |
155 // Check for Smi first. | 155 // Can't do arithmetic on external references if it might get serialized. |
156 test(value, Immediate(kSmiTagMask)); | |
157 j(zero, &done); | |
158 // Test that the object address is not in the new space. We cannot | |
159 // set remembered set bits in the new space. | |
160 mov(value, Operand(object)); | 156 mov(value, Operand(object)); |
161 and_(value, Heap::NewSpaceMask()); | 157 and_(value, Heap::NewSpaceMask()); |
162 cmp(Operand(value), Immediate(ExternalReference::new_space_start())); | 158 cmp(Operand(value), Immediate(ExternalReference::new_space_start())); |
163 j(equal, &done); | 159 j(equal, &done); |
164 } else { | 160 } else { |
165 // move the value SmiTag into the sign bit | 161 int32_t new_space_start = reinterpret_cast<int32_t>( |
166 shl(value, 31); | 162 ExternalReference::new_space_start().address()); |
167 // combine the object with value SmiTag | 163 lea(value, Operand(object, -new_space_start)); |
168 or_(value, Operand(object)); | 164 and_(value, Heap::NewSpaceMask()); |
169 // remove the uninteresing bits inside the page | 165 j(equal, &done); |
170 and_(value, Heap::NewSpaceMask() | (1 << 31)); | |
171 // xor has two effects: | |
172 // - if the value was a smi, then the result will be negative | |
173 // - if the object is pointing into new space area the page bits will | |
174 // all be zero | |
175 xor_(value, new_space_start | (1 << 31)); | |
176 // Check for both conditions in one branch | |
177 j(less_equal, &done); | |
178 } | 166 } |
179 | 167 |
180 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) { | 168 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) { |
181 // Compute the bit offset in the remembered set, leave it in 'value'. | 169 // Compute the bit offset in the remembered set, leave it in 'value'. |
182 mov(value, Operand(object)); | 170 lea(value, Operand(object, offset)); |
183 and_(value, Page::kPageAlignmentMask); | 171 and_(value, Page::kPageAlignmentMask); |
184 add(Operand(value), Immediate(offset)); | 172 shr(value, kPointerSizeLog2); |
185 shr(value, kObjectAlignmentBits); | |
186 | 173 |
187 // Compute the page address from the heap object pointer, leave it in | 174 // Compute the page address from the heap object pointer, leave it in |
188 // 'object'. | 175 // 'object'. |
189 and_(object, ~Page::kPageAlignmentMask); | 176 and_(object, ~Page::kPageAlignmentMask); |
190 | 177 |
191 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | 178 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction |
192 // to limit code size. We should probably evaluate this decision by | 179 // to limit code size. We should probably evaluate this decision by |
193 // measuring the performance of an equivalent implementation using | 180 // measuring the performance of an equivalent implementation using |
194 // "simpler" instructions | 181 // "simpler" instructions |
195 bts(Operand(object, 0), value); | 182 bts(Operand(object, Page::kRSetOffset), value); |
196 } else { | 183 } else { |
197 Register dst = scratch; | 184 Register dst = scratch; |
198 if (offset != 0) { | 185 if (offset != 0) { |
199 lea(dst, Operand(object, offset)); | 186 lea(dst, Operand(object, offset)); |
200 } else { | 187 } else { |
201 // array access: calculate the destination address in the same manner as | 188 // array access: calculate the destination address in the same manner as |
202 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset | 189 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset |
203 // into an array of words. | 190 // into an array of words. |
204 lea(dst, Operand(object, dst, times_2, | 191 ASSERT_EQ(1, kSmiTagSize); |
| 192 ASSERT_EQ(0, kSmiTag); |
| 193 lea(dst, Operand(object, dst, times_half_pointer_size, |
205 FixedArray::kHeaderSize - kHeapObjectTag)); | 194 FixedArray::kHeaderSize - kHeapObjectTag)); |
206 } | 195 } |
207 // If we are already generating a shared stub, not inlining the | 196 // If we are already generating a shared stub, not inlining the |
208 // record write code isn't going to save us any memory. | 197 // record write code isn't going to save us any memory. |
209 if (generating_stub()) { | 198 if (generating_stub()) { |
210 RecordWriteHelper(this, object, dst, value); | 199 RecordWriteHelper(this, object, dst, value); |
211 } else { | 200 } else { |
212 RecordWriteStub stub(object, dst, value); | 201 RecordWriteStub stub(object, dst, value); |
213 CallStub(&stub); | 202 CallStub(&stub); |
214 } | 203 } |
(...skipping 825 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1040 // Indicate that code has changed. | 1029 // Indicate that code has changed. |
1041 CPU::FlushICache(address_, size_); | 1030 CPU::FlushICache(address_, size_); |
1042 | 1031 |
1043 // Check that the code was patched as expected. | 1032 // Check that the code was patched as expected. |
1044 ASSERT(masm_.pc_ == address_ + size_); | 1033 ASSERT(masm_.pc_ == address_ + size_); |
1045 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 1034 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
1046 } | 1035 } |
1047 | 1036 |
1048 | 1037 |
1049 } } // namespace v8::internal | 1038 } } // namespace v8::internal |
OLD | NEW |