Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(35)

Side by Side Diff: src/ia32/macro-assembler-ia32.cc

Issue 7945009: Merge experimental/gc branch to the bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/macro-assembler-ia32.h ('k') | src/ia32/stub-cache-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
46 generating_stub_(false), 46 generating_stub_(false),
47 allow_stub_calls_(true), 47 allow_stub_calls_(true),
48 has_frame_(false) { 48 has_frame_(false) {
49 if (isolate() != NULL) { 49 if (isolate() != NULL) {
50 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), 50 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
51 isolate()); 51 isolate());
52 } 52 }
53 } 53 }
54 54
55 55
56 void MacroAssembler::RecordWriteHelper(Register object, 56 void MacroAssembler::InNewSpace(
57 Register addr, 57 Register object,
58 Register scratch) { 58 Register scratch,
59 if (emit_debug_code()) { 59 Condition cc,
60 // Check that the object is not in new space. 60 Label* condition_met,
61 Label not_in_new_space; 61 Label::Distance condition_met_distance) {
62 InNewSpace(object, scratch, not_equal, &not_in_new_space); 62 ASSERT(cc == equal || cc == not_equal);
63 Abort("new-space object passed to RecordWriteHelper"); 63 if (scratch.is(object)) {
64 bind(&not_in_new_space); 64 and_(scratch, Immediate(~Page::kPageAlignmentMask));
65 } else {
66 mov(scratch, Immediate(~Page::kPageAlignmentMask));
67 and_(scratch, Operand(object));
65 } 68 }
66 69 // Check that we can use a test_b.
67 // Compute the page start address from the heap object pointer, and reuse 70 ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
68 // the 'object' register for it. 71 ASSERT(MemoryChunk::IN_TO_SPACE < 8);
69 and_(object, ~Page::kPageAlignmentMask); 72 int mask = (1 << MemoryChunk::IN_FROM_SPACE)
70 73 | (1 << MemoryChunk::IN_TO_SPACE);
71 // Compute number of region covering addr. See Page::GetRegionNumberForAddress 74 // If non-zero, the page belongs to new-space.
72 // method for more details. 75 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
73 shr(addr, Page::kRegionSizeLog2); 76 static_cast<uint8_t>(mask));
74 and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2); 77 j(cc, condition_met, condition_met_distance);
75
76 // Set dirty mark for region.
77 // Bit tests with a memory operand should be avoided on Intel processors,
78 // as they usually have long latency and multiple uops. We load the bit base
79 // operand to a register at first and store it back after bit set.
80 mov(scratch, Operand(object, Page::kDirtyFlagOffset));
81 bts(Operand(scratch), addr);
82 mov(Operand(object, Page::kDirtyFlagOffset), scratch);
83 } 78 }
84 79
85 80
81 void MacroAssembler::RememberedSetHelper(
82 Register addr,
83 Register scratch,
84 SaveFPRegsMode save_fp,
85 MacroAssembler::RememberedSetFinalAction and_then) {
86 Label done;
87 if (FLAG_debug_code) {
88 Label ok;
89 JumpIfNotInNewSpace(addr, scratch, &ok, Label::kNear);
90 int3();
91 bind(&ok);
92 }
93 // Load store buffer top.
94 ExternalReference store_buffer =
95 ExternalReference::store_buffer_top(isolate());
96 mov(scratch, Operand::StaticVariable(store_buffer));
97 // Store pointer to buffer.
98 mov(Operand(scratch, 0), addr);
99 // Increment buffer top.
100 add(Operand(scratch), Immediate(kPointerSize));
101 // Write back new top of buffer.
102 mov(Operand::StaticVariable(store_buffer), scratch);
103 // Call stub on end of buffer.
104 // Check for end of buffer.
105 test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
106 if (and_then == kReturnAtEnd) {
107 Label buffer_overflowed;
108 j(not_equal, &buffer_overflowed, Label::kNear);
109 ret(0);
110 bind(&buffer_overflowed);
111 } else {
112 ASSERT(and_then == kFallThroughAtEnd);
113 j(equal, &done, Label::kNear);
114 }
115 StoreBufferOverflowStub store_buffer_overflow =
116 StoreBufferOverflowStub(save_fp);
117 CallStub(&store_buffer_overflow);
118 if (and_then == kReturnAtEnd) {
119 ret(0);
120 } else {
121 ASSERT(and_then == kFallThroughAtEnd);
122 bind(&done);
123 }
124 }
125
126
86 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg, 127 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
87 XMMRegister scratch_reg, 128 XMMRegister scratch_reg,
88 Register result_reg) { 129 Register result_reg) {
89 Label done; 130 Label done;
90 ExternalReference zero_ref = ExternalReference::address_of_zero(); 131 ExternalReference zero_ref = ExternalReference::address_of_zero();
91 movdbl(scratch_reg, Operand::StaticVariable(zero_ref)); 132 movdbl(scratch_reg, Operand::StaticVariable(zero_ref));
92 Set(result_reg, Immediate(0)); 133 Set(result_reg, Immediate(0));
93 ucomisd(input_reg, scratch_reg); 134 ucomisd(input_reg, scratch_reg);
94 j(below, &done, Label::kNear); 135 j(below, &done, Label::kNear);
95 ExternalReference half_ref = ExternalReference::address_of_one_half(); 136 ExternalReference half_ref = ExternalReference::address_of_one_half();
(...skipping 10 matching lines...) Expand all
106 void MacroAssembler::ClampUint8(Register reg) { 147 void MacroAssembler::ClampUint8(Register reg) {
107 Label done; 148 Label done;
108 test(reg, Immediate(0xFFFFFF00)); 149 test(reg, Immediate(0xFFFFFF00));
109 j(zero, &done, Label::kNear); 150 j(zero, &done, Label::kNear);
110 setcc(negative, reg); // 1 if negative, 0 if positive. 151 setcc(negative, reg); // 1 if negative, 0 if positive.
111 dec_b(reg); // 0 if negative, 255 if positive. 152 dec_b(reg); // 0 if negative, 255 if positive.
112 bind(&done); 153 bind(&done);
113 } 154 }
114 155
115 156
116 void MacroAssembler::InNewSpace(Register object, 157 void MacroAssembler::RecordWriteArray(Register object,
117 Register scratch, 158 Register value,
118 Condition cc, 159 Register index,
119 Label* branch, 160 SaveFPRegsMode save_fp,
120 Label::Distance branch_near) { 161 RememberedSetAction remembered_set_action,
121 ASSERT(cc == equal || cc == not_equal); 162 SmiCheck smi_check) {
122 if (Serializer::enabled()) { 163 // First, check if a write barrier is even needed. The tests below
123 // Can't do arithmetic on external references if it might get serialized. 164 // catch stores of Smis.
124 mov(scratch, Operand(object)); 165 Label done;
125 // The mask isn't really an address. We load it as an external reference in 166
126 // case the size of the new space is different between the snapshot maker 167 // Skip barrier if writing a smi.
127 // and the running system. 168 if (smi_check == INLINE_SMI_CHECK) {
128 and_(Operand(scratch), 169 ASSERT_EQ(0, kSmiTag);
129 Immediate(ExternalReference::new_space_mask(isolate()))); 170 test(value, Immediate(kSmiTagMask));
130 cmp(Operand(scratch), 171 j(zero, &done);
131 Immediate(ExternalReference::new_space_start(isolate()))); 172 }
132 j(cc, branch, branch_near); 173
133 } else { 174 // Array access: calculate the destination address in the same manner as
134 int32_t new_space_start = reinterpret_cast<int32_t>( 175 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
135 ExternalReference::new_space_start(isolate()).address()); 176 // into an array of words.
136 lea(scratch, Operand(object, -new_space_start)); 177 Register dst = index;
137 and_(scratch, isolate()->heap()->NewSpaceMask()); 178 lea(dst, Operand(object, index, times_half_pointer_size,
138 j(cc, branch, branch_near); 179 FixedArray::kHeaderSize - kHeapObjectTag));
180
181 RecordWrite(
182 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
183
184 bind(&done);
185
186 // Clobber clobbered input registers when running with the debug-code flag
187 // turned on to provoke errors.
188 if (emit_debug_code()) {
189 mov(value, Immediate(BitCast<int32_t>(kZapValue)));
190 mov(index, Immediate(BitCast<int32_t>(kZapValue)));
139 } 191 }
140 } 192 }
141 193
142 194
143 void MacroAssembler::RecordWrite(Register object, 195 void MacroAssembler::RecordWriteField(
144 int offset, 196 Register object,
145 Register value, 197 int offset,
146 Register scratch) { 198 Register value,
199 Register dst,
200 SaveFPRegsMode save_fp,
201 RememberedSetAction remembered_set_action,
202 SmiCheck smi_check) {
147 // First, check if a write barrier is even needed. The tests below 203 // First, check if a write barrier is even needed. The tests below
148 // catch stores of Smis and stores into young gen. 204 // catch stores of Smis.
149 Label done; 205 Label done;
150 206
151 // Skip barrier if writing a smi. 207 // Skip barrier if writing a smi.
152 STATIC_ASSERT(kSmiTag == 0); 208 if (smi_check == INLINE_SMI_CHECK) {
153 JumpIfSmi(value, &done, Label::kNear); 209 JumpIfSmi(value, &done, Label::kNear);
210 }
154 211
155 InNewSpace(object, value, equal, &done, Label::kNear); 212 // Although the object register is tagged, the offset is relative to the start
213 // of the object, so so offset must be a multiple of kPointerSize.
214 ASSERT(IsAligned(offset, kPointerSize));
156 215
157 // The offset is relative to a tagged or untagged HeapObject pointer, 216 lea(dst, FieldOperand(object, offset));
158 // so either offset or offset + kHeapObjectTag must be a 217 if (emit_debug_code()) {
159 // multiple of kPointerSize. 218 Label ok;
160 ASSERT(IsAligned(offset, kPointerSize) || 219 test_b(Operand(dst), (1 << kPointerSizeLog2) - 1);
161 IsAligned(offset + kHeapObjectTag, kPointerSize)); 220 j(zero, &ok, Label::kNear);
221 int3();
222 bind(&ok);
223 }
162 224
163 Register dst = scratch; 225 RecordWrite(
164 if (offset != 0) { 226 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
165 lea(dst, Operand(object, offset));
166 } else {
167 // Array access: calculate the destination address in the same manner as
168 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
169 // into an array of words.
170 STATIC_ASSERT(kSmiTagSize == 1);
171 STATIC_ASSERT(kSmiTag == 0);
172 lea(dst, Operand(object, dst, times_half_pointer_size,
173 FixedArray::kHeaderSize - kHeapObjectTag));
174 }
175 RecordWriteHelper(object, dst, value);
176 227
177 bind(&done); 228 bind(&done);
178 229
179 // Clobber all input registers when running with the debug-code flag 230 // Clobber clobbered input registers when running with the debug-code flag
180 // turned on to provoke errors. 231 // turned on to provoke errors.
181 if (emit_debug_code()) { 232 if (emit_debug_code()) {
182 mov(object, Immediate(BitCast<int32_t>(kZapValue)));
183 mov(value, Immediate(BitCast<int32_t>(kZapValue))); 233 mov(value, Immediate(BitCast<int32_t>(kZapValue)));
184 mov(scratch, Immediate(BitCast<int32_t>(kZapValue))); 234 mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
185 } 235 }
186 } 236 }
187 237
188 238
189 void MacroAssembler::RecordWrite(Register object, 239 void MacroAssembler::RecordWrite(Register object,
190 Register address, 240 Register address,
191 Register value) { 241 Register value,
242 SaveFPRegsMode fp_mode,
243 RememberedSetAction remembered_set_action,
244 SmiCheck smi_check) {
245 ASSERT(!object.is(value));
246 ASSERT(!object.is(address));
247 ASSERT(!value.is(address));
248 if (emit_debug_code()) {
249 AbortIfSmi(object);
250 }
251
252 if (remembered_set_action == OMIT_REMEMBERED_SET &&
253 !FLAG_incremental_marking) {
254 return;
255 }
256
257 if (FLAG_debug_code) {
258 Label ok;
259 cmp(value, Operand(address, 0));
260 j(equal, &ok, Label::kNear);
261 int3();
262 bind(&ok);
263 }
264
192 // First, check if a write barrier is even needed. The tests below 265 // First, check if a write barrier is even needed. The tests below
193 // catch stores of Smis and stores into young gen. 266 // catch stores of Smis and stores into young gen.
194 Label done; 267 Label done;
195 268
196 // Skip barrier if writing a smi. 269 if (smi_check == INLINE_SMI_CHECK) {
197 STATIC_ASSERT(kSmiTag == 0); 270 // Skip barrier if writing a smi.
198 JumpIfSmi(value, &done, Label::kNear); 271 JumpIfSmi(value, &done, Label::kNear);
272 }
199 273
200 InNewSpace(object, value, equal, &done); 274 CheckPageFlag(value,
275 value, // Used as scratch.
276 MemoryChunk::kPointersToHereAreInterestingMask,
277 zero,
278 &done,
279 Label::kNear);
280 CheckPageFlag(object,
281 value, // Used as scratch.
282 MemoryChunk::kPointersFromHereAreInterestingMask,
283 zero,
284 &done,
285 Label::kNear);
201 286
202 RecordWriteHelper(object, address, value); 287 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
288 CallStub(&stub);
203 289
204 bind(&done); 290 bind(&done);
205 291
206 // Clobber all input registers when running with the debug-code flag 292 // Clobber clobbered registers when running with the debug-code flag
207 // turned on to provoke errors. 293 // turned on to provoke errors.
208 if (emit_debug_code()) { 294 if (emit_debug_code()) {
209 mov(object, Immediate(BitCast<int32_t>(kZapValue)));
210 mov(address, Immediate(BitCast<int32_t>(kZapValue))); 295 mov(address, Immediate(BitCast<int32_t>(kZapValue)));
211 mov(value, Immediate(BitCast<int32_t>(kZapValue))); 296 mov(value, Immediate(BitCast<int32_t>(kZapValue)));
212 } 297 }
213 } 298 }
214 299
215 300
216 #ifdef ENABLE_DEBUGGER_SUPPORT 301 #ifdef ENABLE_DEBUGGER_SUPPORT
217 void MacroAssembler::DebugBreak() { 302 void MacroAssembler::DebugBreak() {
218 Set(eax, Immediate(0)); 303 Set(eax, Immediate(0));
219 mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate()))); 304 mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
(...skipping 1221 matching lines...) Expand 10 before | Expand all | Expand 10 after
1441 1526
1442 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) { 1527 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
1443 CallRuntime(Runtime::FunctionForId(id), num_arguments); 1528 CallRuntime(Runtime::FunctionForId(id), num_arguments);
1444 } 1529 }
1445 1530
1446 1531
1447 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { 1532 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1448 const Runtime::Function* function = Runtime::FunctionForId(id); 1533 const Runtime::Function* function = Runtime::FunctionForId(id);
1449 Set(eax, Immediate(function->nargs)); 1534 Set(eax, Immediate(function->nargs));
1450 mov(ebx, Immediate(ExternalReference(function, isolate()))); 1535 mov(ebx, Immediate(ExternalReference(function, isolate())));
1451 CEntryStub ces(1); 1536 CEntryStub ces(1, kSaveFPRegs);
1452 ces.SaveDoubles();
1453 CallStub(&ces); 1537 CallStub(&ces);
1454 } 1538 }
1455 1539
1456 1540
1457 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id, 1541 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
1458 int num_arguments) { 1542 int num_arguments) {
1459 return TryCallRuntime(Runtime::FunctionForId(id), num_arguments); 1543 return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
1460 } 1544 }
1461 1545
1462 1546
(...skipping 824 matching lines...) Expand 10 before | Expand all | Expand 10 after
2287 2371
2288 call(Operand(function)); 2372 call(Operand(function));
2289 if (OS::ActivationFrameAlignment() != 0) { 2373 if (OS::ActivationFrameAlignment() != 0) {
2290 mov(esp, Operand(esp, num_arguments * kPointerSize)); 2374 mov(esp, Operand(esp, num_arguments * kPointerSize));
2291 } else { 2375 } else {
2292 add(Operand(esp), Immediate(num_arguments * kPointerSize)); 2376 add(Operand(esp), Immediate(num_arguments * kPointerSize));
2293 } 2377 }
2294 } 2378 }
2295 2379
2296 2380
2381 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
2382 if (r1.is(r2)) return true;
2383 if (r1.is(r3)) return true;
2384 if (r1.is(r4)) return true;
2385 if (r2.is(r3)) return true;
2386 if (r2.is(r4)) return true;
2387 if (r3.is(r4)) return true;
2388 return false;
2389 }
2390
2391
2297 CodePatcher::CodePatcher(byte* address, int size) 2392 CodePatcher::CodePatcher(byte* address, int size)
2298 : address_(address), 2393 : address_(address),
2299 size_(size), 2394 size_(size),
2300 masm_(Isolate::Current(), address, size + Assembler::kGap) { 2395 masm_(Isolate::Current(), address, size + Assembler::kGap) {
2301 // Create a new macro assembler pointing to the address of the code to patch. 2396 // Create a new macro assembler pointing to the address of the code to patch.
2302 // The size is adjusted with kGap on order for the assembler to generate size 2397 // The size is adjusted with kGap on order for the assembler to generate size
2303 // bytes of instructions without failing with buffer size constraints. 2398 // bytes of instructions without failing with buffer size constraints.
2304 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2399 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2305 } 2400 }
2306 2401
2307 2402
2308 CodePatcher::~CodePatcher() { 2403 CodePatcher::~CodePatcher() {
2309 // Indicate that code has changed. 2404 // Indicate that code has changed.
2310 CPU::FlushICache(address_, size_); 2405 CPU::FlushICache(address_, size_);
2311 2406
2312 // Check that the code was patched as expected. 2407 // Check that the code was patched as expected.
2313 ASSERT(masm_.pc_ == address_ + size_); 2408 ASSERT(masm_.pc_ == address_ + size_);
2314 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2409 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2315 } 2410 }
2316 2411
2317 2412
2413 void MacroAssembler::CheckPageFlag(
2414 Register object,
2415 Register scratch,
2416 int mask,
2417 Condition cc,
2418 Label* condition_met,
2419 Label::Distance condition_met_distance) {
2420 ASSERT(cc == zero || cc == not_zero);
2421 if (scratch.is(object)) {
2422 and_(scratch, Immediate(~Page::kPageAlignmentMask));
2423 } else {
2424 mov(scratch, Immediate(~Page::kPageAlignmentMask));
2425 and_(scratch, Operand(object));
2426 }
2427 if (mask < (1 << kBitsPerByte)) {
2428 test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
2429 static_cast<uint8_t>(mask));
2430 } else {
2431 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2432 }
2433 j(cc, condition_met, condition_met_distance);
2434 }
2435
2436
2437 void MacroAssembler::JumpIfBlack(Register object,
2438 Register scratch0,
2439 Register scratch1,
2440 Label* on_black,
2441 Label::Distance on_black_near) {
2442 HasColor(object, scratch0, scratch1,
2443 on_black, on_black_near,
2444 1, 0); // kBlackBitPattern.
2445 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
2446 }
2447
2448
2449 void MacroAssembler::HasColor(Register object,
2450 Register bitmap_scratch,
2451 Register mask_scratch,
2452 Label* has_color,
2453 Label::Distance has_color_distance,
2454 int first_bit,
2455 int second_bit) {
2456 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
2457
2458 GetMarkBits(object, bitmap_scratch, mask_scratch);
2459
2460 Label other_color, word_boundary;
2461 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2462 j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
2463 add(mask_scratch, Operand(mask_scratch)); // Shift left 1 by adding.
2464 j(zero, &word_boundary, Label::kNear);
2465 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2466 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2467 jmp(&other_color, Label::kNear);
2468
2469 bind(&word_boundary);
2470 test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
2471
2472 j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2473 bind(&other_color);
2474 }
2475
2476
2477 void MacroAssembler::GetMarkBits(Register addr_reg,
2478 Register bitmap_reg,
2479 Register mask_reg) {
2480 ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
2481 mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
2482 and_(Operand(bitmap_reg), addr_reg);
2483 mov(ecx, Operand(addr_reg));
2484 int shift =
2485 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
2486 shr(ecx, shift);
2487 and_(ecx,
2488 (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
2489
2490 add(bitmap_reg, Operand(ecx));
2491 mov(ecx, Operand(addr_reg));
2492 shr(ecx, kPointerSizeLog2);
2493 and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
2494 mov(mask_reg, Immediate(1));
2495 shl_cl(mask_reg);
2496 }
2497
2498
2499 void MacroAssembler::EnsureNotWhite(
2500 Register value,
2501 Register bitmap_scratch,
2502 Register mask_scratch,
2503 Label* value_is_white_and_not_data,
2504 Label::Distance distance) {
2505 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
2506 GetMarkBits(value, bitmap_scratch, mask_scratch);
2507
2508 // If the value is black or grey we don't need to do anything.
2509 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2510 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
2511 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
2512 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2513
2514 Label done;
2515
2516 // Since both black and grey have a 1 in the first position and white does
2517 // not have a 1 there we only need to check one bit.
2518 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2519 j(not_zero, &done, Label::kNear);
2520
2521 if (FLAG_debug_code) {
2522 // Check for impossible bit pattern.
2523 Label ok;
2524 push(mask_scratch);
2525 // shl. May overflow making the check conservative.
2526 add(mask_scratch, Operand(mask_scratch));
2527 test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2528 j(zero, &ok, Label::kNear);
2529 int3();
2530 bind(&ok);
2531 pop(mask_scratch);
2532 }
2533
2534 // Value is white. We check whether it is data that doesn't need scanning.
2535 // Currently only checks for HeapNumber and non-cons strings.
2536 Register map = ecx; // Holds map while checking type.
2537 Register length = ecx; // Holds length of object after checking type.
2538 Label not_heap_number;
2539 Label is_data_object;
2540
2541 // Check for heap-number
2542 mov(map, FieldOperand(value, HeapObject::kMapOffset));
2543 cmp(map, FACTORY->heap_number_map());
2544 j(not_equal, &not_heap_number, Label::kNear);
2545 mov(length, Immediate(HeapNumber::kSize));
2546 jmp(&is_data_object, Label::kNear);
2547
2548 bind(&not_heap_number);
2549 // Check for strings.
2550 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
2551 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
2552 // If it's a string and it's not a cons string then it's an object containing
2553 // no GC pointers.
2554 Register instance_type = ecx;
2555 movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
2556 test_b(Operand(instance_type), kIsIndirectStringMask | kIsNotStringMask);
2557 j(not_zero, value_is_white_and_not_data);
2558 // It's a non-indirect (non-cons and non-slice) string.
2559 // If it's external, the length is just ExternalString::kSize.
2560 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
2561 Label not_external;
2562 // External strings are the only ones with the kExternalStringTag bit
2563 // set.
2564 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
2565 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
2566 test_b(Operand(instance_type), kExternalStringTag);
2567 j(zero, &not_external, Label::kNear);
2568 mov(length, Immediate(ExternalString::kSize));
2569 jmp(&is_data_object, Label::kNear);
2570
2571 bind(&not_external);
2572 // Sequential string, either ASCII or UC16.
2573 ASSERT(kAsciiStringTag == 0x04);
2574 and_(Operand(length), Immediate(kStringEncodingMask));
2575 xor_(Operand(length), Immediate(kStringEncodingMask));
2576 add(Operand(length), Immediate(0x04));
2577 // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
2578 // by 2. If we multiply the string length as smi by this, it still
2579 // won't overflow a 32-bit value.
2580 ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
2581 ASSERT(SeqAsciiString::kMaxSize <=
2582 static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
2583 imul(length, FieldOperand(value, String::kLengthOffset));
2584 shr(length, 2 + kSmiTagSize);
2585 add(Operand(length),
2586 Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
2587 and_(Operand(length),
2588 Immediate(~kObjectAlignmentMask));
2589
2590 bind(&is_data_object);
2591 // Value is a data object, and it is white. Mark it black. Since we know
2592 // that the object is white we can make it black by flipping one bit.
2593 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
2594
2595 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
2596 add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
2597 length);
2598
2599 bind(&done);
2600 }
2601
2318 } } // namespace v8::internal 2602 } } // namespace v8::internal
2319 2603
2320 #endif // V8_TARGET_ARCH_IA32 2604 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/macro-assembler-ia32.h ('k') | src/ia32/stub-cache-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698