OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
53 Register addr, | 53 Register addr, |
54 Register scratch) { | 54 Register scratch) { |
55 if (FLAG_debug_code) { | 55 if (FLAG_debug_code) { |
56 // Check that the object is not in new space. | 56 // Check that the object is not in new space. |
57 Label not_in_new_space; | 57 Label not_in_new_space; |
58 InNewSpace(object, scratch, not_equal, ¬_in_new_space); | 58 InNewSpace(object, scratch, not_equal, ¬_in_new_space); |
59 Abort("new-space object passed to RecordWriteHelper"); | 59 Abort("new-space object passed to RecordWriteHelper"); |
60 bind(¬_in_new_space); | 60 bind(¬_in_new_space); |
61 } | 61 } |
62 | 62 |
| 63 Label fast; |
| 64 |
63 // Compute the page start address from the heap object pointer, and reuse | 65 // Compute the page start address from the heap object pointer, and reuse |
64 // the 'object' register for it. | 66 // the 'object' register for it. |
65 and_(object, ~Page::kPageAlignmentMask); | 67 and_(object, ~Page::kPageAlignmentMask); |
| 68 Register page_start = object; |
66 | 69 |
67 // Compute number of region covering addr. See Page::GetRegionNumberForAddress | 70 // Compute the bit addr in the remembered set/index of the pointer in the |
68 // method for more details. | 71 // page. Reuse 'addr' as pointer_offset. |
69 and_(addr, Page::kPageAlignmentMask); | 72 sub(addr, Operand(page_start)); |
70 shr(addr, Page::kRegionSizeLog2); | 73 shr(addr, kObjectAlignmentBits); |
| 74 Register pointer_offset = addr; |
71 | 75 |
72 // Set dirty mark for region. | 76 // If the bit offset lies beyond the normal remembered set range, it is in |
73 bts(Operand(object, Page::kDirtyFlagOffset), addr); | 77 // the extra remembered set area of a large object. |
| 78 cmp(pointer_offset, Page::kPageSize / kPointerSize); |
| 79 j(less, &fast); |
| 80 |
| 81 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the |
| 82 // extra remembered set after the large object. |
| 83 |
| 84 // Find the length of the large object (FixedArray). |
| 85 mov(scratch, Operand(page_start, Page::kObjectStartOffset |
| 86 + FixedArray::kLengthOffset)); |
| 87 Register array_length = scratch; |
| 88 |
| 89 // Extra remembered set starts right after the large object (a FixedArray), at |
| 90 // page_start + kObjectStartOffset + objectSize |
| 91 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. |
| 92 // Add the delta between the end of the normal RSet and the start of the |
| 93 // extra RSet to 'page_start', so that addressing the bit using |
| 94 // 'pointer_offset' hits the extra RSet words. |
| 95 lea(page_start, |
| 96 Operand(page_start, array_length, times_pointer_size, |
| 97 Page::kObjectStartOffset + FixedArray::kHeaderSize |
| 98 - Page::kRSetEndOffset)); |
| 99 |
| 100 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction |
| 101 // to limit code size. We should probably evaluate this decision by |
| 102 // measuring the performance of an equivalent implementation using |
| 103 // "simpler" instructions |
| 104 bind(&fast); |
| 105 bts(Operand(page_start, Page::kRSetOffset), pointer_offset); |
74 } | 106 } |
75 | 107 |
76 | 108 |
77 void MacroAssembler::InNewSpace(Register object, | 109 void MacroAssembler::InNewSpace(Register object, |
78 Register scratch, | 110 Register scratch, |
79 Condition cc, | 111 Condition cc, |
80 Label* branch) { | 112 Label* branch) { |
81 ASSERT(cc == equal || cc == not_equal); | 113 ASSERT(cc == equal || cc == not_equal); |
82 if (Serializer::enabled()) { | 114 if (Serializer::enabled()) { |
83 // Can't do arithmetic on external references if it might get serialized. | 115 // Can't do arithmetic on external references if it might get serialized. |
84 mov(scratch, Operand(object)); | 116 mov(scratch, Operand(object)); |
85 // The mask isn't really an address. We load it as an external reference in | 117 // The mask isn't really an address. We load it as an external reference in |
86 // case the size of the new space is different between the snapshot maker | 118 // case the size of the new space is different between the snapshot maker |
87 // and the running system. | 119 // and the running system. |
88 and_(Operand(scratch), Immediate(ExternalReference::new_space_mask())); | 120 and_(Operand(scratch), Immediate(ExternalReference::new_space_mask())); |
89 cmp(Operand(scratch), Immediate(ExternalReference::new_space_start())); | 121 cmp(Operand(scratch), Immediate(ExternalReference::new_space_start())); |
90 j(cc, branch); | 122 j(cc, branch); |
91 } else { | 123 } else { |
92 int32_t new_space_start = reinterpret_cast<int32_t>( | 124 int32_t new_space_start = reinterpret_cast<int32_t>( |
93 ExternalReference::new_space_start().address()); | 125 ExternalReference::new_space_start().address()); |
94 lea(scratch, Operand(object, -new_space_start)); | 126 lea(scratch, Operand(object, -new_space_start)); |
95 and_(scratch, Heap::NewSpaceMask()); | 127 and_(scratch, Heap::NewSpaceMask()); |
96 j(cc, branch); | 128 j(cc, branch); |
97 } | 129 } |
98 } | 130 } |
99 | 131 |
100 | 132 |
101 // For page containing |object| mark region covering [object+offset] dirty. | 133 // Set the remembered set bit for [object+offset]. |
102 // object is the object being stored into, value is the object being stored. | 134 // object is the object being stored into, value is the object being stored. |
103 // If offset is zero, then the scratch register contains the array index into | 135 // If offset is zero, then the scratch register contains the array index into |
104 // the elements array represented as a Smi. | 136 // the elements array represented as a Smi. |
105 // All registers are clobbered by the operation. | 137 // All registers are clobbered by the operation. |
106 void MacroAssembler::RecordWrite(Register object, int offset, | 138 void MacroAssembler::RecordWrite(Register object, int offset, |
107 Register value, Register scratch) { | 139 Register value, Register scratch) { |
108 // The compiled code assumes that record write doesn't change the | 140 // The compiled code assumes that record write doesn't change the |
109 // context register, so we check that none of the clobbered | 141 // context register, so we check that none of the clobbered |
110 // registers are esi. | 142 // registers are esi. |
111 ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi)); | 143 ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi)); |
112 | 144 |
113 // First, check if a write barrier is even needed. The tests below | 145 // First, check if a remembered set write is even needed. The tests below |
114 // catch stores of Smis and stores into young gen. | 146 // catch stores of Smis and stores into young gen (which does not have space |
| 147 // for the remembered set bits). |
115 Label done; | 148 Label done; |
116 | 149 |
117 // Skip barrier if writing a smi. | 150 // Skip barrier if writing a smi. |
118 ASSERT_EQ(0, kSmiTag); | 151 ASSERT_EQ(0, kSmiTag); |
119 test(value, Immediate(kSmiTagMask)); | 152 test(value, Immediate(kSmiTagMask)); |
120 j(zero, &done); | 153 j(zero, &done); |
121 | 154 |
122 InNewSpace(object, value, equal, &done); | 155 InNewSpace(object, value, equal, &done); |
123 | 156 |
124 // The offset is relative to a tagged or untagged HeapObject pointer, | 157 // The offset is relative to a tagged or untagged HeapObject pointer, |
125 // so either offset or offset + kHeapObjectTag must be a | 158 // so either offset or offset + kHeapObjectTag must be a |
126 // multiple of kPointerSize. | 159 // multiple of kPointerSize. |
127 ASSERT(IsAligned(offset, kPointerSize) || | 160 ASSERT(IsAligned(offset, kPointerSize) || |
128 IsAligned(offset + kHeapObjectTag, kPointerSize)); | 161 IsAligned(offset + kHeapObjectTag, kPointerSize)); |
129 | 162 |
130 Register dst = scratch; | 163 // We use optimized write barrier code if the word being written to is not in |
131 if (offset != 0) { | 164 // a large object chunk or is in the first page of a large object chunk. |
132 lea(dst, Operand(object, offset)); | 165 // We make sure that an offset is inside the right limits whether it is |
| 166 // tagged or untagged. |
| 167 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) { |
| 168 // Compute the bit offset in the remembered set, leave it in 'value'. |
| 169 lea(value, Operand(object, offset)); |
| 170 and_(value, Page::kPageAlignmentMask); |
| 171 shr(value, kPointerSizeLog2); |
| 172 |
| 173 // Compute the page address from the heap object pointer, leave it in |
| 174 // 'object'. |
| 175 and_(object, ~Page::kPageAlignmentMask); |
| 176 |
| 177 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction |
| 178 // to limit code size. We should probably evaluate this decision by |
| 179 // measuring the performance of an equivalent implementation using |
| 180 // "simpler" instructions |
| 181 bts(Operand(object, Page::kRSetOffset), value); |
133 } else { | 182 } else { |
134 // Array access: calculate the destination address in the same manner as | 183 Register dst = scratch; |
135 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset | 184 if (offset != 0) { |
136 // into an array of words. | 185 lea(dst, Operand(object, offset)); |
137 ASSERT_EQ(1, kSmiTagSize); | 186 } else { |
138 ASSERT_EQ(0, kSmiTag); | 187 // array access: calculate the destination address in the same manner as |
139 lea(dst, Operand(object, dst, times_half_pointer_size, | 188 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset |
140 FixedArray::kHeaderSize - kHeapObjectTag)); | 189 // into an array of words. |
| 190 ASSERT_EQ(1, kSmiTagSize); |
| 191 ASSERT_EQ(0, kSmiTag); |
| 192 lea(dst, Operand(object, dst, times_half_pointer_size, |
| 193 FixedArray::kHeaderSize - kHeapObjectTag)); |
| 194 } |
| 195 // If we are already generating a shared stub, not inlining the |
| 196 // record write code isn't going to save us any memory. |
| 197 if (generating_stub()) { |
| 198 RecordWriteHelper(object, dst, value); |
| 199 } else { |
| 200 RecordWriteStub stub(object, dst, value); |
| 201 CallStub(&stub); |
| 202 } |
141 } | 203 } |
142 RecordWriteHelper(object, dst, value); | |
143 | 204 |
144 bind(&done); | 205 bind(&done); |
145 | 206 |
146 // Clobber all input registers when running with the debug-code flag | 207 // Clobber all input registers when running with the debug-code flag |
147 // turned on to provoke errors. | 208 // turned on to provoke errors. |
148 if (FLAG_debug_code) { | 209 if (FLAG_debug_code) { |
149 mov(object, Immediate(BitCast<int32_t>(kZapValue))); | 210 mov(object, Immediate(BitCast<int32_t>(kZapValue))); |
150 mov(value, Immediate(BitCast<int32_t>(kZapValue))); | 211 mov(value, Immediate(BitCast<int32_t>(kZapValue))); |
151 mov(scratch, Immediate(BitCast<int32_t>(kZapValue))); | 212 mov(scratch, Immediate(BitCast<int32_t>(kZapValue))); |
152 } | 213 } |
(...skipping 1163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1316 } | 1377 } |
1317 | 1378 |
1318 | 1379 |
1319 void MacroAssembler::InvokeFunction(Register fun, | 1380 void MacroAssembler::InvokeFunction(Register fun, |
1320 const ParameterCount& actual, | 1381 const ParameterCount& actual, |
1321 InvokeFlag flag) { | 1382 InvokeFlag flag) { |
1322 ASSERT(fun.is(edi)); | 1383 ASSERT(fun.is(edi)); |
1323 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); | 1384 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); |
1324 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); | 1385 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); |
1325 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); | 1386 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); |
1326 SmiUntag(ebx); | |
1327 mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); | 1387 mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); |
1328 lea(edx, FieldOperand(edx, Code::kHeaderSize)); | 1388 lea(edx, FieldOperand(edx, Code::kHeaderSize)); |
1329 | 1389 |
1330 ParameterCount expected(ebx); | 1390 ParameterCount expected(ebx); |
1331 InvokeCode(Operand(edx), expected, actual, flag); | 1391 InvokeCode(Operand(edx), expected, actual, flag); |
1332 } | 1392 } |
1333 | 1393 |
1334 | 1394 |
1335 void MacroAssembler::InvokeFunction(JSFunction* function, | 1395 void MacroAssembler::InvokeFunction(JSFunction* function, |
1336 const ParameterCount& actual, | 1396 const ParameterCount& actual, |
(...skipping 306 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1643 | 1703 |
1644 // Check that the code was patched as expected. | 1704 // Check that the code was patched as expected. |
1645 ASSERT(masm_.pc_ == address_ + size_); | 1705 ASSERT(masm_.pc_ == address_ + size_); |
1646 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 1706 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
1647 } | 1707 } |
1648 | 1708 |
1649 | 1709 |
1650 } } // namespace v8::internal | 1710 } } // namespace v8::internal |
1651 | 1711 |
1652 #endif // V8_TARGET_ARCH_IA32 | 1712 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |