Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/ia32/macro-assembler-ia32.cc

Issue 2101002: Cardmarking writebarrier. (Closed)
Patch Set: fixed review comments Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ia32/macro-assembler-ia32.h ('k') | src/ia32/stub-cache-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 Register addr, 51 Register addr,
52 Register scratch) { 52 Register scratch) {
53 if (FLAG_debug_code) { 53 if (FLAG_debug_code) {
54 // Check that the object is not in new space. 54 // Check that the object is not in new space.
55 Label not_in_new_space; 55 Label not_in_new_space;
56 InNewSpace(object, scratch, not_equal, &not_in_new_space); 56 InNewSpace(object, scratch, not_equal, &not_in_new_space);
57 Abort("new-space object passed to RecordWriteHelper"); 57 Abort("new-space object passed to RecordWriteHelper");
58 bind(&not_in_new_space); 58 bind(&not_in_new_space);
59 } 59 }
60 60
61 Label fast;
62
63 // Compute the page start address from the heap object pointer, and reuse 61 // Compute the page start address from the heap object pointer, and reuse
64 // the 'object' register for it. 62 // the 'object' register for it.
65 and_(object, ~Page::kPageAlignmentMask); 63 and_(object, ~Page::kPageAlignmentMask);
66 Register page_start = object;
67 64
68 // Compute the bit addr in the remembered set/index of the pointer in the 65 // Compute number of region covering addr. See Page::GetRegionNumberForAddress
69 // page. Reuse 'addr' as pointer_offset. 66 // method for more details.
70 sub(addr, Operand(page_start)); 67 and_(addr, Page::kPageAlignmentMask);
71 shr(addr, kObjectAlignmentBits); 68 shr(addr, Page::kRegionSizeLog2);
72 Register pointer_offset = addr;
73 69
74 // If the bit offset lies beyond the normal remembered set range, it is in 70 // Set dirty mark for region.
75 // the extra remembered set area of a large object. 71 bts(Operand(object, Page::kDirtyFlagOffset), addr);
76 cmp(pointer_offset, Page::kPageSize / kPointerSize);
77 j(less, &fast);
78
79 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
80 // extra remembered set after the large object.
81
82 // Find the length of the large object (FixedArray).
83 mov(scratch, Operand(page_start, Page::kObjectStartOffset
84 + FixedArray::kLengthOffset));
85 Register array_length = scratch;
86
87 // Extra remembered set starts right after the large object (a FixedArray), at
88 // page_start + kObjectStartOffset + objectSize
89 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
90 // Add the delta between the end of the normal RSet and the start of the
91 // extra RSet to 'page_start', so that addressing the bit using
92 // 'pointer_offset' hits the extra RSet words.
93 lea(page_start,
94 Operand(page_start, array_length, times_pointer_size,
95 Page::kObjectStartOffset + FixedArray::kHeaderSize
96 - Page::kRSetEndOffset));
97
98 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
99 // to limit code size. We should probably evaluate this decision by
100 // measuring the performance of an equivalent implementation using
101 // "simpler" instructions
102 bind(&fast);
103 bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
104 } 72 }
105 73
106 74
107 void MacroAssembler::InNewSpace(Register object, 75 void MacroAssembler::InNewSpace(Register object,
108 Register scratch, 76 Register scratch,
109 Condition cc, 77 Condition cc,
110 Label* branch) { 78 Label* branch) {
111 ASSERT(cc == equal || cc == not_equal); 79 ASSERT(cc == equal || cc == not_equal);
112 if (Serializer::enabled()) { 80 if (Serializer::enabled()) {
113 // Can't do arithmetic on external references if it might get serialized. 81 // Can't do arithmetic on external references if it might get serialized.
114 mov(scratch, Operand(object)); 82 mov(scratch, Operand(object));
115 // The mask isn't really an address. We load it as an external reference in 83 // The mask isn't really an address. We load it as an external reference in
116 // case the size of the new space is different between the snapshot maker 84 // case the size of the new space is different between the snapshot maker
117 // and the running system. 85 // and the running system.
118 and_(Operand(scratch), Immediate(ExternalReference::new_space_mask())); 86 and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
119 cmp(Operand(scratch), Immediate(ExternalReference::new_space_start())); 87 cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
120 j(cc, branch); 88 j(cc, branch);
121 } else { 89 } else {
122 int32_t new_space_start = reinterpret_cast<int32_t>( 90 int32_t new_space_start = reinterpret_cast<int32_t>(
123 ExternalReference::new_space_start().address()); 91 ExternalReference::new_space_start().address());
124 lea(scratch, Operand(object, -new_space_start)); 92 lea(scratch, Operand(object, -new_space_start));
125 and_(scratch, Heap::NewSpaceMask()); 93 and_(scratch, Heap::NewSpaceMask());
126 j(cc, branch); 94 j(cc, branch);
127 } 95 }
128 } 96 }
129 97
130 98
131 // Set the remembered set bit for [object+offset]. 99 // For page containing |object| mark region covering [object+offset] dirty.
132 // object is the object being stored into, value is the object being stored. 100 // object is the object being stored into, value is the object being stored.
133 // If offset is zero, then the scratch register contains the array index into 101 // If offset is zero, then the scratch register contains the array index into
134 // the elements array represented as a Smi. 102 // the elements array represented as a Smi.
135 // All registers are clobbered by the operation. 103 // All registers are clobbered by the operation.
136 void MacroAssembler::RecordWrite(Register object, int offset, 104 void MacroAssembler::RecordWrite(Register object, int offset,
137 Register value, Register scratch) { 105 Register value, Register scratch) {
138 // The compiled code assumes that record write doesn't change the 106 // The compiled code assumes that record write doesn't change the
139 // context register, so we check that none of the clobbered 107 // context register, so we check that none of the clobbered
140 // registers are esi. 108 // registers are esi.
141 ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi)); 109 ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
142 110
143 // First, check if a remembered set write is even needed. The tests below 111 // First, check if a write barrier is even needed. The tests below
144 // catch stores of Smis and stores into young gen (which does not have space 112 // catch stores of Smis and stores into young gen.
145 // for the remembered set bits).
146 Label done; 113 Label done;
147 114
148 // Skip barrier if writing a smi. 115 // Skip barrier if writing a smi.
149 ASSERT_EQ(0, kSmiTag); 116 ASSERT_EQ(0, kSmiTag);
150 test(value, Immediate(kSmiTagMask)); 117 test(value, Immediate(kSmiTagMask));
151 j(zero, &done); 118 j(zero, &done);
152 119
153 InNewSpace(object, value, equal, &done); 120 InNewSpace(object, value, equal, &done);
154 121
155 // The offset is relative to a tagged or untagged HeapObject pointer, 122 // The offset is relative to a tagged or untagged HeapObject pointer,
156 // so either offset or offset + kHeapObjectTag must be a 123 // so either offset or offset + kHeapObjectTag must be a
157 // multiple of kPointerSize. 124 // multiple of kPointerSize.
158 ASSERT(IsAligned(offset, kPointerSize) || 125 ASSERT(IsAligned(offset, kPointerSize) ||
159 IsAligned(offset + kHeapObjectTag, kPointerSize)); 126 IsAligned(offset + kHeapObjectTag, kPointerSize));
160 127
161 // We use optimized write barrier code if the word being written to is not in 128 Register dst = scratch;
162 // a large object chunk or is in the first page of a large object chunk. 129 if (offset != 0) {
163 // We make sure that an offset is inside the right limits whether it is 130 lea(dst, Operand(object, offset));
164 // tagged or untagged.
165 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
166 // Compute the bit offset in the remembered set, leave it in 'value'.
167 lea(value, Operand(object, offset));
168 and_(value, Page::kPageAlignmentMask);
169 shr(value, kPointerSizeLog2);
170
171 // Compute the page address from the heap object pointer, leave it in
172 // 'object'.
173 and_(object, ~Page::kPageAlignmentMask);
174
175 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
176 // to limit code size. We should probably evaluate this decision by
177 // measuring the performance of an equivalent implementation using
178 // "simpler" instructions
179 bts(Operand(object, Page::kRSetOffset), value);
180 } else { 131 } else {
181 Register dst = scratch; 132 // Array access: calculate the destination address in the same manner as
182 if (offset != 0) { 133 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
183 lea(dst, Operand(object, offset)); 134 // into an array of words.
184 } else { 135 ASSERT_EQ(1, kSmiTagSize);
185 // array access: calculate the destination address in the same manner as 136 ASSERT_EQ(0, kSmiTag);
186 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset 137 lea(dst, Operand(object, dst, times_half_pointer_size,
187 // into an array of words. 138 FixedArray::kHeaderSize - kHeapObjectTag));
188 ASSERT_EQ(1, kSmiTagSize);
189 ASSERT_EQ(0, kSmiTag);
190 lea(dst, Operand(object, dst, times_half_pointer_size,
191 FixedArray::kHeaderSize - kHeapObjectTag));
192 }
193 // If we are already generating a shared stub, not inlining the
194 // record write code isn't going to save us any memory.
195 if (generating_stub()) {
196 RecordWriteHelper(object, dst, value);
197 } else {
198 RecordWriteStub stub(object, dst, value);
199 CallStub(&stub);
200 }
201 } 139 }
140 RecordWriteHelper(object, dst, value);
202 141
203 bind(&done); 142 bind(&done);
204 143
205 // Clobber all input registers when running with the debug-code flag 144 // Clobber all input registers when running with the debug-code flag
206 // turned on to provoke errors. 145 // turned on to provoke errors.
207 if (FLAG_debug_code) { 146 if (FLAG_debug_code) {
208 mov(object, Immediate(BitCast<int32_t>(kZapValue))); 147 mov(object, Immediate(BitCast<int32_t>(kZapValue)));
209 mov(value, Immediate(BitCast<int32_t>(kZapValue))); 148 mov(value, Immediate(BitCast<int32_t>(kZapValue)));
210 mov(scratch, Immediate(BitCast<int32_t>(kZapValue))); 149 mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
211 } 150 }
(...skipping 1163 matching lines...) Expand 10 before | Expand all | Expand 10 after
1375 } 1314 }
1376 1315
1377 1316
1378 void MacroAssembler::InvokeFunction(Register fun, 1317 void MacroAssembler::InvokeFunction(Register fun,
1379 const ParameterCount& actual, 1318 const ParameterCount& actual,
1380 InvokeFlag flag) { 1319 InvokeFlag flag) {
1381 ASSERT(fun.is(edi)); 1320 ASSERT(fun.is(edi));
1382 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); 1321 mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
1383 mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); 1322 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
1384 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); 1323 mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
1324 SmiUntag(ebx);
1385 mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); 1325 mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
1386 lea(edx, FieldOperand(edx, Code::kHeaderSize)); 1326 lea(edx, FieldOperand(edx, Code::kHeaderSize));
1387 1327
1388 ParameterCount expected(ebx); 1328 ParameterCount expected(ebx);
1389 InvokeCode(Operand(edx), expected, actual, flag); 1329 InvokeCode(Operand(edx), expected, actual, flag);
1390 } 1330 }
1391 1331
1392 1332
1393 void MacroAssembler::InvokeFunction(JSFunction* function, 1333 void MacroAssembler::InvokeFunction(JSFunction* function,
1394 const ParameterCount& actual, 1334 const ParameterCount& actual,
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
1699 // Indicate that code has changed. 1639 // Indicate that code has changed.
1700 CPU::FlushICache(address_, size_); 1640 CPU::FlushICache(address_, size_);
1701 1641
1702 // Check that the code was patched as expected. 1642 // Check that the code was patched as expected.
1703 ASSERT(masm_.pc_ == address_ + size_); 1643 ASSERT(masm_.pc_ == address_ + size_);
1704 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 1644 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
1705 } 1645 }
1706 1646
1707 1647
1708 } } // namespace v8::internal 1648 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/ia32/macro-assembler-ia32.h ('k') | src/ia32/stub-cache-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698