OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
53 Register addr, | 53 Register addr, |
54 Register scratch) { | 54 Register scratch) { |
55 if (FLAG_debug_code) { | 55 if (FLAG_debug_code) { |
56 // Check that the object is not in new space. | 56 // Check that the object is not in new space. |
57 Label not_in_new_space; | 57 Label not_in_new_space; |
58 InNewSpace(object, scratch, not_equal, ¬_in_new_space); | 58 InNewSpace(object, scratch, not_equal, ¬_in_new_space); |
59 Abort("new-space object passed to RecordWriteHelper"); | 59 Abort("new-space object passed to RecordWriteHelper"); |
60 bind(¬_in_new_space); | 60 bind(¬_in_new_space); |
61 } | 61 } |
62 | 62 |
63 Label fast; | |
64 | |
65 // Compute the page start address from the heap object pointer, and reuse | 63 // Compute the page start address from the heap object pointer, and reuse |
66 // the 'object' register for it. | 64 // the 'object' register for it. |
67 and_(object, ~Page::kPageAlignmentMask); | 65 and_(object, ~Page::kPageAlignmentMask); |
68 Register page_start = object; | |
69 | 66 |
70 // Compute the bit addr in the remembered set/index of the pointer in the | 67 // Compute number of region covering addr. See Page::GetRegionNumberForAddress |
71 // page. Reuse 'addr' as pointer_offset. | 68 // method for more details. |
72 sub(addr, Operand(page_start)); | 69 and_(addr, Page::kPageAlignmentMask); |
73 shr(addr, kObjectAlignmentBits); | 70 shr(addr, Page::kRegionSizeLog2); |
74 Register pointer_offset = addr; | |
75 | 71 |
76 // If the bit offset lies beyond the normal remembered set range, it is in | 72 // Set dirty mark for region. |
77 // the extra remembered set area of a large object. | 73 bts(Operand(object, Page::kDirtyFlagOffset), addr); |
78 cmp(pointer_offset, Page::kPageSize / kPointerSize); | |
79 j(less, &fast); | |
80 | |
81 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the | |
82 // extra remembered set after the large object. | |
83 | |
84 // Find the length of the large object (FixedArray). | |
85 mov(scratch, Operand(page_start, Page::kObjectStartOffset | |
86 + FixedArray::kLengthOffset)); | |
87 Register array_length = scratch; | |
88 | |
89 // Extra remembered set starts right after the large object (a FixedArray), at | |
90 // page_start + kObjectStartOffset + objectSize | |
91 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. | |
92 // Add the delta between the end of the normal RSet and the start of the | |
93 // extra RSet to 'page_start', so that addressing the bit using | |
94 // 'pointer_offset' hits the extra RSet words. | |
95 lea(page_start, | |
96 Operand(page_start, array_length, times_pointer_size, | |
97 Page::kObjectStartOffset + FixedArray::kHeaderSize | |
98 - Page::kRSetEndOffset)); | |
99 | |
100 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | |
101 // to limit code size. We should probably evaluate this decision by | |
102 // measuring the performance of an equivalent implementation using | |
103 // "simpler" instructions | |
104 bind(&fast); | |
105 bts(Operand(page_start, Page::kRSetOffset), pointer_offset); | |
106 } | 74 } |
107 | 75 |
108 | 76 |
109 void MacroAssembler::InNewSpace(Register object, | 77 void MacroAssembler::InNewSpace(Register object, |
110 Register scratch, | 78 Register scratch, |
111 Condition cc, | 79 Condition cc, |
112 Label* branch) { | 80 Label* branch) { |
113 ASSERT(cc == equal || cc == not_equal); | 81 ASSERT(cc == equal || cc == not_equal); |
114 if (Serializer::enabled()) { | 82 if (Serializer::enabled()) { |
115 // Can't do arithmetic on external references if it might get serialized. | 83 // Can't do arithmetic on external references if it might get serialized. |
116 mov(scratch, Operand(object)); | 84 mov(scratch, Operand(object)); |
117 // The mask isn't really an address. We load it as an external reference in | 85 // The mask isn't really an address. We load it as an external reference in |
118 // case the size of the new space is different between the snapshot maker | 86 // case the size of the new space is different between the snapshot maker |
119 // and the running system. | 87 // and the running system. |
120 and_(Operand(scratch), Immediate(ExternalReference::new_space_mask())); | 88 and_(Operand(scratch), Immediate(ExternalReference::new_space_mask())); |
121 cmp(Operand(scratch), Immediate(ExternalReference::new_space_start())); | 89 cmp(Operand(scratch), Immediate(ExternalReference::new_space_start())); |
122 j(cc, branch); | 90 j(cc, branch); |
123 } else { | 91 } else { |
124 int32_t new_space_start = reinterpret_cast<int32_t>( | 92 int32_t new_space_start = reinterpret_cast<int32_t>( |
125 ExternalReference::new_space_start().address()); | 93 ExternalReference::new_space_start().address()); |
126 lea(scratch, Operand(object, -new_space_start)); | 94 lea(scratch, Operand(object, -new_space_start)); |
127 and_(scratch, Heap::NewSpaceMask()); | 95 and_(scratch, Heap::NewSpaceMask()); |
128 j(cc, branch); | 96 j(cc, branch); |
129 } | 97 } |
130 } | 98 } |
131 | 99 |
132 | 100 |
133 // Set the remembered set bit for [object+offset]. | 101 // For page containing |object| mark region covering [object+offset] dirty. |
134 // object is the object being stored into, value is the object being stored. | 102 // object is the object being stored into, value is the object being stored. |
135 // If offset is zero, then the scratch register contains the array index into | 103 // If offset is zero, then the scratch register contains the array index into |
136 // the elements array represented as a Smi. | 104 // the elements array represented as a Smi. |
137 // All registers are clobbered by the operation. | 105 // All registers are clobbered by the operation. |
138 void MacroAssembler::RecordWrite(Register object, int offset, | 106 void MacroAssembler::RecordWrite(Register object, int offset, |
139 Register value, Register scratch) { | 107 Register value, Register scratch) { |
140 // The compiled code assumes that record write doesn't change the | 108 // The compiled code assumes that record write doesn't change the |
141 // context register, so we check that none of the clobbered | 109 // context register, so we check that none of the clobbered |
142 // registers are esi. | 110 // registers are esi. |
143 ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi)); | 111 ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi)); |
144 | 112 |
145 // First, check if a remembered set write is even needed. The tests below | 113 // First, check if a write barrier is even needed. The tests below |
146 // catch stores of Smis and stores into young gen (which does not have space | 114 // catch stores of Smis and stores into young gen. |
147 // for the remembered set bits). | |
148 Label done; | 115 Label done; |
149 | 116 |
150 // Skip barrier if writing a smi. | 117 // Skip barrier if writing a smi. |
151 ASSERT_EQ(0, kSmiTag); | 118 ASSERT_EQ(0, kSmiTag); |
152 test(value, Immediate(kSmiTagMask)); | 119 test(value, Immediate(kSmiTagMask)); |
153 j(zero, &done); | 120 j(zero, &done); |
154 | 121 |
155 InNewSpace(object, value, equal, &done); | 122 InNewSpace(object, value, equal, &done); |
156 | 123 |
157 // The offset is relative to a tagged or untagged HeapObject pointer, | 124 // The offset is relative to a tagged or untagged HeapObject pointer, |
158 // so either offset or offset + kHeapObjectTag must be a | 125 // so either offset or offset + kHeapObjectTag must be a |
159 // multiple of kPointerSize. | 126 // multiple of kPointerSize. |
160 ASSERT(IsAligned(offset, kPointerSize) || | 127 ASSERT(IsAligned(offset, kPointerSize) || |
161 IsAligned(offset + kHeapObjectTag, kPointerSize)); | 128 IsAligned(offset + kHeapObjectTag, kPointerSize)); |
162 | 129 |
163 // We use optimized write barrier code if the word being written to is not in | 130 Register dst = scratch; |
164 // a large object chunk or is in the first page of a large object chunk. | 131 if (offset != 0) { |
165 // We make sure that an offset is inside the right limits whether it is | 132 lea(dst, Operand(object, offset)); |
166 // tagged or untagged. | |
167 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) { | |
168 // Compute the bit offset in the remembered set, leave it in 'value'. | |
169 lea(value, Operand(object, offset)); | |
170 and_(value, Page::kPageAlignmentMask); | |
171 shr(value, kPointerSizeLog2); | |
172 | |
173 // Compute the page address from the heap object pointer, leave it in | |
174 // 'object'. | |
175 and_(object, ~Page::kPageAlignmentMask); | |
176 | |
177 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | |
178 // to limit code size. We should probably evaluate this decision by | |
179 // measuring the performance of an equivalent implementation using | |
180 // "simpler" instructions | |
181 bts(Operand(object, Page::kRSetOffset), value); | |
182 } else { | 133 } else { |
183 Register dst = scratch; | 134 // Array access: calculate the destination address in the same manner as |
184 if (offset != 0) { | 135 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset |
185 lea(dst, Operand(object, offset)); | 136 // into an array of words. |
186 } else { | 137 ASSERT_EQ(1, kSmiTagSize); |
187 // array access: calculate the destination address in the same manner as | 138 ASSERT_EQ(0, kSmiTag); |
188 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset | 139 lea(dst, Operand(object, dst, times_half_pointer_size, |
189 // into an array of words. | 140 FixedArray::kHeaderSize - kHeapObjectTag)); |
190 ASSERT_EQ(1, kSmiTagSize); | |
191 ASSERT_EQ(0, kSmiTag); | |
192 lea(dst, Operand(object, dst, times_half_pointer_size, | |
193 FixedArray::kHeaderSize - kHeapObjectTag)); | |
194 } | |
195 // If we are already generating a shared stub, not inlining the | |
196 // record write code isn't going to save us any memory. | |
197 if (generating_stub()) { | |
198 RecordWriteHelper(object, dst, value); | |
199 } else { | |
200 RecordWriteStub stub(object, dst, value); | |
201 CallStub(&stub); | |
202 } | |
203 } | 141 } |
| 142 RecordWriteHelper(object, dst, value); |
204 | 143 |
205 bind(&done); | 144 bind(&done); |
206 | 145 |
207 // Clobber all input registers when running with the debug-code flag | 146 // Clobber all input registers when running with the debug-code flag |
208 // turned on to provoke errors. | 147 // turned on to provoke errors. |
209 if (FLAG_debug_code) { | 148 if (FLAG_debug_code) { |
210 mov(object, Immediate(BitCast<int32_t>(kZapValue))); | 149 mov(object, Immediate(BitCast<int32_t>(kZapValue))); |
211 mov(value, Immediate(BitCast<int32_t>(kZapValue))); | 150 mov(value, Immediate(BitCast<int32_t>(kZapValue))); |
212 mov(scratch, Immediate(BitCast<int32_t>(kZapValue))); | 151 mov(scratch, Immediate(BitCast<int32_t>(kZapValue))); |
213 } | 152 } |
(...skipping 1163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1377 } | 1316 } |
1378 | 1317 |
1379 | 1318 |
1380 void MacroAssembler::InvokeFunction(Register fun, | 1319 void MacroAssembler::InvokeFunction(Register fun, |
1381 const ParameterCount& actual, | 1320 const ParameterCount& actual, |
1382 InvokeFlag flag) { | 1321 InvokeFlag flag) { |
1383 ASSERT(fun.is(edi)); | 1322 ASSERT(fun.is(edi)); |
1384 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); | 1323 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); |
1385 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); | 1324 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); |
1386 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); | 1325 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1326 SmiUntag(ebx); |
1387 mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); | 1327 mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); |
1388 lea(edx, FieldOperand(edx, Code::kHeaderSize)); | 1328 lea(edx, FieldOperand(edx, Code::kHeaderSize)); |
1389 | 1329 |
1390 ParameterCount expected(ebx); | 1330 ParameterCount expected(ebx); |
1391 InvokeCode(Operand(edx), expected, actual, flag); | 1331 InvokeCode(Operand(edx), expected, actual, flag); |
1392 } | 1332 } |
1393 | 1333 |
1394 | 1334 |
1395 void MacroAssembler::InvokeFunction(JSFunction* function, | 1335 void MacroAssembler::InvokeFunction(JSFunction* function, |
1396 const ParameterCount& actual, | 1336 const ParameterCount& actual, |
(...skipping 306 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1703 | 1643 |
1704 // Check that the code was patched as expected. | 1644 // Check that the code was patched as expected. |
1705 ASSERT(masm_.pc_ == address_ + size_); | 1645 ASSERT(masm_.pc_ == address_ + size_); |
1706 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 1646 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
1707 } | 1647 } |
1708 | 1648 |
1709 | 1649 |
1710 } } // namespace v8::internal | 1650 } } // namespace v8::internal |
1711 | 1651 |
1712 #endif // V8_TARGET_ARCH_IA32 | 1652 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |