Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(833)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 7945009: Merge experimental/gc branch to the bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/stub-cache-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
190 190
191 void MacroAssembler::CompareRoot(const Operand& with, 191 void MacroAssembler::CompareRoot(const Operand& with,
192 Heap::RootListIndex index) { 192 Heap::RootListIndex index) {
193 ASSERT(root_array_available_); 193 ASSERT(root_array_available_);
194 ASSERT(!with.AddressUsesRegister(kScratchRegister)); 194 ASSERT(!with.AddressUsesRegister(kScratchRegister));
195 LoadRoot(kScratchRegister, index); 195 LoadRoot(kScratchRegister, index);
196 cmpq(with, kScratchRegister); 196 cmpq(with, kScratchRegister);
197 } 197 }
198 198
199 199
200 void MacroAssembler::RecordWriteHelper(Register object, 200 void MacroAssembler::RememberedSetHelper(Register addr,
201 Register addr, 201 Register scratch,
202 Register scratch) { 202 SaveFPRegsMode save_fp,
203 if (emit_debug_code()) { 203 RememberedSetFinalAction and_then) {
204 // Check that the object is not in new space. 204 if (FLAG_debug_code) {
205 Label not_in_new_space; 205 Label ok;
206 InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear); 206 JumpIfNotInNewSpace(addr, scratch, &ok, Label::kNear);
207 Abort("new-space object passed to RecordWriteHelper"); 207 int3();
208 bind(&not_in_new_space); 208 bind(&ok);
209 } 209 }
210 210 // Load store buffer top.
211 // Compute the page start address from the heap object pointer, and reuse 211 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
212 // the 'object' register for it. 212 // Store pointer to buffer.
213 and_(object, Immediate(~Page::kPageAlignmentMask)); 213 movq(Operand(scratch, 0), addr);
214 214 // Increment buffer top.
215 // Compute number of region covering addr. See Page::GetRegionNumberForAddress 215 addq(scratch, Immediate(kPointerSize));
216 // method for more details. 216 // Write back new top of buffer.
217 shrl(addr, Immediate(Page::kRegionSizeLog2)); 217 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
218 andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2)); 218 // Call stub on end of buffer.
219 219 Label done;
220 // Set dirty mark for region. 220 // Check for end of buffer.
221 bts(Operand(object, Page::kDirtyFlagOffset), addr); 221 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
222 if (and_then == kReturnAtEnd) {
223 Label buffer_overflowed;
224 j(not_equal, &buffer_overflowed, Label::kNear);
225 ret(0);
226 bind(&buffer_overflowed);
227 } else {
228 ASSERT(and_then == kFallThroughAtEnd);
229 j(equal, &done, Label::kNear);
230 }
231 StoreBufferOverflowStub store_buffer_overflow =
232 StoreBufferOverflowStub(save_fp);
233 CallStub(&store_buffer_overflow);
234 if (and_then == kReturnAtEnd) {
235 ret(0);
236 } else {
237 ASSERT(and_then == kFallThroughAtEnd);
238 bind(&done);
239 }
222 } 240 }
223 241
224 242
225 void MacroAssembler::InNewSpace(Register object, 243 void MacroAssembler::InNewSpace(Register object,
226 Register scratch, 244 Register scratch,
227 Condition cc, 245 Condition cc,
228 Label* branch, 246 Label* branch,
229 Label::Distance near_jump) { 247 Label::Distance distance) {
230 if (Serializer::enabled()) { 248 if (Serializer::enabled()) {
231 // Can't do arithmetic on external references if it might get serialized. 249 // Can't do arithmetic on external references if it might get serialized.
232 // The mask isn't really an address. We load it as an external reference in 250 // The mask isn't really an address. We load it as an external reference in
233 // case the size of the new space is different between the snapshot maker 251 // case the size of the new space is different between the snapshot maker
234 // and the running system. 252 // and the running system.
235 if (scratch.is(object)) { 253 if (scratch.is(object)) {
236 movq(kScratchRegister, ExternalReference::new_space_mask(isolate())); 254 movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
237 and_(scratch, kScratchRegister); 255 and_(scratch, kScratchRegister);
238 } else { 256 } else {
239 movq(scratch, ExternalReference::new_space_mask(isolate())); 257 movq(scratch, ExternalReference::new_space_mask(isolate()));
240 and_(scratch, object); 258 and_(scratch, object);
241 } 259 }
242 movq(kScratchRegister, ExternalReference::new_space_start(isolate())); 260 movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
243 cmpq(scratch, kScratchRegister); 261 cmpq(scratch, kScratchRegister);
244 j(cc, branch, near_jump); 262 j(cc, branch, distance);
245 } else { 263 } else {
246 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); 264 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
247 intptr_t new_space_start = 265 intptr_t new_space_start =
248 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart()); 266 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
249 movq(kScratchRegister, -new_space_start, RelocInfo::NONE); 267 movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
250 if (scratch.is(object)) { 268 if (scratch.is(object)) {
251 addq(scratch, kScratchRegister); 269 addq(scratch, kScratchRegister);
252 } else { 270 } else {
253 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); 271 lea(scratch, Operand(object, kScratchRegister, times_1, 0));
254 } 272 }
255 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); 273 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
256 j(cc, branch, near_jump); 274 j(cc, branch, distance);
257 } 275 }
258 } 276 }
259 277
260 278
261 void MacroAssembler::RecordWrite(Register object, 279 void MacroAssembler::RecordWriteField(
262 int offset, 280 Register object,
263 Register value, 281 int offset,
264 Register index) { 282 Register value,
283 Register dst,
284 SaveFPRegsMode save_fp,
285 RememberedSetAction remembered_set_action,
286 SmiCheck smi_check) {
265 // The compiled code assumes that record write doesn't change the 287 // The compiled code assumes that record write doesn't change the
266 // context register, so we check that none of the clobbered 288 // context register, so we check that none of the clobbered
267 // registers are rsi. 289 // registers are rsi.
268 ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi)); 290 ASSERT(!value.is(rsi) && !dst.is(rsi));
269 291
270 // First, check if a write barrier is even needed. The tests below 292 // First, check if a write barrier is even needed. The tests below
271 // catch stores of smis and stores into the young generation. 293 // catch stores of Smis.
272 Label done; 294 Label done;
273 JumpIfSmi(value, &done);
274 295
275 RecordWriteNonSmi(object, offset, value, index); 296 // Skip barrier if writing a smi.
297 if (smi_check == INLINE_SMI_CHECK) {
298 JumpIfSmi(value, &done);
299 }
300
301 // Although the object register is tagged, the offset is relative to the start
302 // of the object, so so offset must be a multiple of kPointerSize.
303 ASSERT(IsAligned(offset, kPointerSize));
304
305 lea(dst, FieldOperand(object, offset));
306 if (emit_debug_code()) {
307 Label ok;
308 testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
309 j(zero, &ok, Label::kNear);
310 int3();
311 bind(&ok);
312 }
313
314 RecordWrite(
315 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
316
276 bind(&done); 317 bind(&done);
277 318
278 // Clobber all input registers when running with the debug-code flag 319 // Clobber clobbered input registers when running with the debug-code flag
279 // turned on to provoke errors. This clobbering repeats the 320 // turned on to provoke errors.
280 // clobbering done inside RecordWriteNonSmi but it's necessary to
281 // avoid having the fast case for smis leave the registers
282 // unchanged.
283 if (emit_debug_code()) { 321 if (emit_debug_code()) {
284 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
285 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 322 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
286 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 323 movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
287 } 324 }
288 } 325 }
289 326
290 327
291 void MacroAssembler::RecordWrite(Register object, 328 void MacroAssembler::RecordWrite(Register object,
292 Register address, 329 Register address,
293 Register value) { 330 Register value,
331 SaveFPRegsMode fp_mode,
332 RememberedSetAction remembered_set_action,
333 SmiCheck smi_check) {
294 // The compiled code assumes that record write doesn't change the 334 // The compiled code assumes that record write doesn't change the
295 // context register, so we check that none of the clobbered 335 // context register, so we check that none of the clobbered
296 // registers are rsi. 336 // registers are rsi.
297 ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi)); 337 ASSERT(!value.is(rsi) && !address.is(rsi));
338
339 ASSERT(!object.is(value));
340 ASSERT(!object.is(address));
341 ASSERT(!value.is(address));
342 if (emit_debug_code()) {
343 AbortIfSmi(object);
344 }
345
346 if (remembered_set_action == OMIT_REMEMBERED_SET &&
347 !FLAG_incremental_marking) {
348 return;
349 }
350
351 if (FLAG_debug_code) {
352 Label ok;
353 cmpq(value, Operand(address, 0));
354 j(equal, &ok, Label::kNear);
355 int3();
356 bind(&ok);
357 }
298 358
299 // First, check if a write barrier is even needed. The tests below 359 // First, check if a write barrier is even needed. The tests below
300 // catch stores of smis and stores into the young generation. 360 // catch stores of smis and stores into the young generation.
301 Label done; 361 Label done;
302 JumpIfSmi(value, &done);
303 362
304 InNewSpace(object, value, equal, &done); 363 if (smi_check == INLINE_SMI_CHECK) {
364 // Skip barrier if writing a smi.
365 JumpIfSmi(value, &done);
366 }
305 367
306 RecordWriteHelper(object, address, value); 368 CheckPageFlag(value,
369 value, // Used as scratch.
370 MemoryChunk::kPointersToHereAreInterestingMask,
371 zero,
372 &done,
373 Label::kNear);
374
375 CheckPageFlag(object,
376 value, // Used as scratch.
377 MemoryChunk::kPointersFromHereAreInterestingMask,
378 zero,
379 &done,
380 Label::kNear);
381
382 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
383 CallStub(&stub);
307 384
308 bind(&done); 385 bind(&done);
309 386
310 // Clobber all input registers when running with the debug-code flag 387 // Clobber clobbered registers when running with the debug-code flag
311 // turned on to provoke errors. 388 // turned on to provoke errors.
312 if (emit_debug_code()) { 389 if (emit_debug_code()) {
313 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
314 movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 390 movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
315 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 391 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
316 } 392 }
317 } 393 }
318 394
319 395
320 void MacroAssembler::RecordWriteNonSmi(Register object,
321 int offset,
322 Register scratch,
323 Register index) {
324 Label done;
325
326 if (emit_debug_code()) {
327 Label okay;
328 JumpIfNotSmi(object, &okay, Label::kNear);
329 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
330 bind(&okay);
331
332 if (offset == 0) {
333 // index must be int32.
334 Register tmp = index.is(rax) ? rbx : rax;
335 push(tmp);
336 movl(tmp, index);
337 cmpq(tmp, index);
338 Check(equal, "Index register for RecordWrite must be untagged int32.");
339 pop(tmp);
340 }
341 }
342
343 // Test that the object address is not in the new space. We cannot
344 // update page dirty marks for new space pages.
345 InNewSpace(object, scratch, equal, &done);
346
347 // The offset is relative to a tagged or untagged HeapObject pointer,
348 // so either offset or offset + kHeapObjectTag must be a
349 // multiple of kPointerSize.
350 ASSERT(IsAligned(offset, kPointerSize) ||
351 IsAligned(offset + kHeapObjectTag, kPointerSize));
352
353 Register dst = index;
354 if (offset != 0) {
355 lea(dst, Operand(object, offset));
356 } else {
357 // array access: calculate the destination address in the same manner as
358 // KeyedStoreIC::GenerateGeneric.
359 lea(dst, FieldOperand(object,
360 index,
361 times_pointer_size,
362 FixedArray::kHeaderSize));
363 }
364 RecordWriteHelper(object, dst, scratch);
365
366 bind(&done);
367
368 // Clobber all input registers when running with the debug-code flag
369 // turned on to provoke errors.
370 if (emit_debug_code()) {
371 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
372 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
373 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
374 }
375 }
376
377 void MacroAssembler::Assert(Condition cc, const char* msg) { 396 void MacroAssembler::Assert(Condition cc, const char* msg) {
378 if (emit_debug_code()) Check(cc, msg); 397 if (emit_debug_code()) Check(cc, msg);
379 } 398 }
380 399
381 400
382 void MacroAssembler::AssertFastElements(Register elements) { 401 void MacroAssembler::AssertFastElements(Register elements) {
383 if (emit_debug_code()) { 402 if (emit_debug_code()) {
384 Label ok; 403 Label ok;
385 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), 404 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
386 Heap::kFixedArrayMapRootIndex); 405 Heap::kFixedArrayMapRootIndex);
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
544 563
545 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) { 564 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
546 CallRuntime(Runtime::FunctionForId(id), num_arguments); 565 CallRuntime(Runtime::FunctionForId(id), num_arguments);
547 } 566 }
548 567
549 568
550 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { 569 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
551 const Runtime::Function* function = Runtime::FunctionForId(id); 570 const Runtime::Function* function = Runtime::FunctionForId(id);
552 Set(rax, function->nargs); 571 Set(rax, function->nargs);
553 LoadAddress(rbx, ExternalReference(function, isolate())); 572 LoadAddress(rbx, ExternalReference(function, isolate()));
554 CEntryStub ces(1); 573 CEntryStub ces(1, kSaveFPRegs);
555 ces.SaveDoubles();
556 CallStub(&ces); 574 CallStub(&ces);
557 } 575 }
558 576
559 577
560 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id, 578 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
561 int num_arguments) { 579 int num_arguments) {
562 return TryCallRuntime(Runtime::FunctionForId(id), num_arguments); 580 return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
563 } 581 }
564 582
565 583
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after
829 847
830 848
831 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 849 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
832 ASSERT(!target.is(rdi)); 850 ASSERT(!target.is(rdi));
833 // Load the JavaScript builtin function from the builtins object. 851 // Load the JavaScript builtin function from the builtins object.
834 GetBuiltinFunction(rdi, id); 852 GetBuiltinFunction(rdi, id);
835 movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); 853 movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
836 } 854 }
837 855
838 856
857 static const Register saved_regs[] =
858 { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
859 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
860
861
862 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
863 Register exclusion1,
864 Register exclusion2,
865 Register exclusion3) {
866 // We don't allow a GC during a store buffer overflow so there is no need to
867 // store the registers in any particular way, but we do have to store and
868 // restore them.
869 for (int i = 0; i < kNumberOfSavedRegs; i++) {
870 Register reg = saved_regs[i];
871 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
872 push(reg);
873 }
874 }
875 // R12 to r15 are callee save on all platforms.
876 if (fp_mode == kSaveFPRegs) {
877 CpuFeatures::Scope scope(SSE2);
878 subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
879 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
880 XMMRegister reg = XMMRegister::from_code(i);
881 movsd(Operand(rsp, i * kDoubleSize), reg);
882 }
883 }
884 }
885
886
887 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
888 Register exclusion1,
889 Register exclusion2,
890 Register exclusion3) {
891 if (fp_mode == kSaveFPRegs) {
892 CpuFeatures::Scope scope(SSE2);
893 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
894 XMMRegister reg = XMMRegister::from_code(i);
895 movsd(reg, Operand(rsp, i * kDoubleSize));
896 }
897 addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
898 }
899 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
900 Register reg = saved_regs[i];
901 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
902 pop(reg);
903 }
904 }
905 }
906
907
839 void MacroAssembler::Set(Register dst, int64_t x) { 908 void MacroAssembler::Set(Register dst, int64_t x) {
840 if (x == 0) { 909 if (x == 0) {
841 xorl(dst, dst); 910 xorl(dst, dst);
842 } else if (is_uint32(x)) { 911 } else if (is_uint32(x)) {
843 movl(dst, Immediate(static_cast<uint32_t>(x))); 912 movl(dst, Immediate(static_cast<uint32_t>(x)));
844 } else if (is_int32(x)) { 913 } else if (is_int32(x)) {
845 movq(dst, Immediate(static_cast<int32_t>(x))); 914 movq(dst, Immediate(static_cast<int32_t>(x)));
846 } else { 915 } else {
847 movq(dst, x, RelocInfo::NONE); 916 movq(dst, x, RelocInfo::NONE);
848 } 917 }
(...skipping 3040 matching lines...) Expand 10 before | Expand all | Expand 10 after
3889 3958
3890 call(function); 3959 call(function);
3891 ASSERT(OS::ActivationFrameAlignment() != 0); 3960 ASSERT(OS::ActivationFrameAlignment() != 0);
3892 ASSERT(num_arguments >= 0); 3961 ASSERT(num_arguments >= 0);
3893 int argument_slots_on_stack = 3962 int argument_slots_on_stack =
3894 ArgumentStackSlotsForCFunctionCall(num_arguments); 3963 ArgumentStackSlotsForCFunctionCall(num_arguments);
3895 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); 3964 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
3896 } 3965 }
3897 3966
3898 3967
3968 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
3969 if (r1.is(r2)) return true;
3970 if (r1.is(r3)) return true;
3971 if (r1.is(r4)) return true;
3972 if (r2.is(r3)) return true;
3973 if (r2.is(r4)) return true;
3974 if (r3.is(r4)) return true;
3975 return false;
3976 }
3977
3978
3899 CodePatcher::CodePatcher(byte* address, int size) 3979 CodePatcher::CodePatcher(byte* address, int size)
3900 : address_(address), 3980 : address_(address),
3901 size_(size), 3981 size_(size),
3902 masm_(Isolate::Current(), address, size + Assembler::kGap) { 3982 masm_(Isolate::Current(), address, size + Assembler::kGap) {
3903 // Create a new macro assembler pointing to the address of the code to patch. 3983 // Create a new macro assembler pointing to the address of the code to patch.
3904 // The size is adjusted with kGap on order for the assembler to generate size 3984 // The size is adjusted with kGap on order for the assembler to generate size
3905 // bytes of instructions without failing with buffer size constraints. 3985 // bytes of instructions without failing with buffer size constraints.
3906 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3986 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3907 } 3987 }
3908 3988
3909 3989
3910 CodePatcher::~CodePatcher() { 3990 CodePatcher::~CodePatcher() {
3911 // Indicate that code has changed. 3991 // Indicate that code has changed.
3912 CPU::FlushICache(address_, size_); 3992 CPU::FlushICache(address_, size_);
3913 3993
3914 // Check that the code was patched as expected. 3994 // Check that the code was patched as expected.
3915 ASSERT(masm_.pc_ == address_ + size_); 3995 ASSERT(masm_.pc_ == address_ + size_);
3916 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3996 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3917 } 3997 }
3918 3998
3999
4000 void MacroAssembler::CheckPageFlag(
4001 Register object,
4002 Register scratch,
4003 int mask,
4004 Condition cc,
4005 Label* condition_met,
4006 Label::Distance condition_met_distance) {
4007 ASSERT(cc == zero || cc == not_zero);
4008 if (scratch.is(object)) {
4009 and_(scratch, Immediate(~Page::kPageAlignmentMask));
4010 } else {
4011 movq(scratch, Immediate(~Page::kPageAlignmentMask));
4012 and_(scratch, object);
4013 }
4014 if (mask < (1 << kBitsPerByte)) {
4015 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4016 Immediate(static_cast<uint8_t>(mask)));
4017 } else {
4018 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4019 }
4020 j(cc, condition_met, condition_met_distance);
4021 }
4022
4023
4024 void MacroAssembler::JumpIfBlack(Register object,
4025 Register bitmap_scratch,
4026 Register mask_scratch,
4027 Label* on_black,
4028 Label::Distance on_black_distance) {
4029 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4030 GetMarkBits(object, bitmap_scratch, mask_scratch);
4031
4032 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4033 // The mask_scratch register contains a 1 at the position of the first bit
4034 // and a 0 at all other positions, including the position of the second bit.
4035 movq(rcx, mask_scratch);
4036 // Make rcx into a mask that covers both marking bits using the operation
4037 // rcx = mask | (mask << 1).
4038 lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4039 // Note that we are using a 4-byte aligned 8-byte load.
4040 and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4041 cmpq(mask_scratch, rcx);
4042 j(equal, on_black, on_black_distance);
4043 }
4044
4045
4046 // Detect some, but not all, common pointer-free objects. This is used by the
4047 // incremental write barrier which doesn't care about oddballs (they are always
4048 // marked black immediately so this code is not hit).
4049 void MacroAssembler::JumpIfDataObject(
4050 Register value,
4051 Register scratch,
4052 Label* not_data_object,
4053 Label::Distance not_data_object_distance) {
4054 Label is_data_object;
4055 movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
4056 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4057 j(equal, &is_data_object, Label::kNear);
4058 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4059 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4060 // If it's a string and it's not a cons string then it's an object containing
4061 // no GC pointers.
4062 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4063 Immediate(kIsIndirectStringMask | kIsNotStringMask));
4064 j(not_zero, not_data_object, not_data_object_distance);
4065 bind(&is_data_object);
4066 }
4067
4068
4069 void MacroAssembler::GetMarkBits(Register addr_reg,
4070 Register bitmap_reg,
4071 Register mask_reg) {
4072 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4073 movq(bitmap_reg, addr_reg);
4074 // Sign extended 32 bit immediate.
4075 and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4076 movq(rcx, addr_reg);
4077 int shift =
4078 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4079 shrl(rcx, Immediate(shift));
4080 and_(rcx,
4081 Immediate((Page::kPageAlignmentMask >> shift) &
4082 ~(Bitmap::kBytesPerCell - 1)));
4083
4084 addq(bitmap_reg, rcx);
4085 movq(rcx, addr_reg);
4086 shrl(rcx, Immediate(kPointerSizeLog2));
4087 and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4088 movl(mask_reg, Immediate(1));
4089 shl_cl(mask_reg);
4090 }
4091
4092
4093 void MacroAssembler::EnsureNotWhite(
4094 Register value,
4095 Register bitmap_scratch,
4096 Register mask_scratch,
4097 Label* value_is_white_and_not_data,
4098 Label::Distance distance) {
4099 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4100 GetMarkBits(value, bitmap_scratch, mask_scratch);
4101
4102 // If the value is black or grey we don't need to do anything.
4103 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4104 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4105 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4106 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4107
4108 Label done;
4109
4110 // Since both black and grey have a 1 in the first position and white does
4111 // not have a 1 there we only need to check one bit.
4112 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4113 j(not_zero, &done, Label::kNear);
4114
4115 if (FLAG_debug_code) {
4116 // Check for impossible bit pattern.
4117 Label ok;
4118 push(mask_scratch);
4119 // shl. May overflow making the check conservative.
4120 addq(mask_scratch, mask_scratch);
4121 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4122 j(zero, &ok, Label::kNear);
4123 int3();
4124 bind(&ok);
4125 pop(mask_scratch);
4126 }
4127
4128 // Value is white. We check whether it is data that doesn't need scanning.
4129 // Currently only checks for HeapNumber and non-cons strings.
4130 Register map = rcx; // Holds map while checking type.
4131 Register length = rcx; // Holds length of object after checking type.
4132 Label not_heap_number;
4133 Label is_data_object;
4134
4135 // Check for heap-number
4136 movq(map, FieldOperand(value, HeapObject::kMapOffset));
4137 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4138 j(not_equal, &not_heap_number, Label::kNear);
4139 movq(length, Immediate(HeapNumber::kSize));
4140 jmp(&is_data_object, Label::kNear);
4141
4142 bind(&not_heap_number);
4143 // Check for strings.
4144 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4145 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4146 // If it's a string and it's not a cons string then it's an object containing
4147 // no GC pointers.
4148 Register instance_type = rcx;
4149 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4150 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4151 j(not_zero, value_is_white_and_not_data);
4152 // It's a non-indirect (non-cons and non-slice) string.
4153 // If it's external, the length is just ExternalString::kSize.
4154 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4155 Label not_external;
4156 // External strings are the only ones with the kExternalStringTag bit
4157 // set.
4158 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4159 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4160 testb(instance_type, Immediate(kExternalStringTag));
4161 j(zero, &not_external, Label::kNear);
4162 movq(length, Immediate(ExternalString::kSize));
4163 jmp(&is_data_object, Label::kNear);
4164
4165 bind(&not_external);
4166 // Sequential string, either ASCII or UC16.
4167 ASSERT(kAsciiStringTag == 0x04);
4168 and_(length, Immediate(kStringEncodingMask));
4169 xor_(length, Immediate(kStringEncodingMask));
4170 addq(length, Immediate(0x04));
4171 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4172 imul(length, FieldOperand(value, String::kLengthOffset));
4173 shr(length, Immediate(2 + kSmiTagSize));
4174 addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4175 and_(length, Immediate(~kObjectAlignmentMask));
4176
4177 bind(&is_data_object);
4178 // Value is a data object, and it is white. Mark it black. Since we know
4179 // that the object is white we can make it black by flipping one bit.
4180 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4181
4182 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4183 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4184
4185 bind(&done);
4186 }
4187
3919 } } // namespace v8::internal 4188 } } // namespace v8::internal
3920 4189
3921 #endif // V8_TARGET_ARCH_X64 4190 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/stub-cache-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698