Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(172)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 7104107: Incremental mode now works for x64. The only difference (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
193 ASSERT(!with.AddressUsesRegister(kScratchRegister)); 193 ASSERT(!with.AddressUsesRegister(kScratchRegister));
194 LoadRoot(kScratchRegister, index); 194 LoadRoot(kScratchRegister, index);
195 cmpq(with, kScratchRegister); 195 cmpq(with, kScratchRegister);
196 } 196 }
197 197
198 198
199 void MacroAssembler::RememberedSetHelper(Register addr, 199 void MacroAssembler::RememberedSetHelper(Register addr,
200 Register scratch, 200 Register scratch,
201 SaveFPRegsMode save_fp, 201 SaveFPRegsMode save_fp,
202 RememberedSetFinalAction and_then) { 202 RememberedSetFinalAction and_then) {
203 if (FLAG_debug_code) {
204 Label ok;
205 JumpIfNotInNewSpace(addr, scratch, &ok, Label::kNear);
206 int3();
207 bind(&ok);
208 }
203 // Load store buffer top. 209 // Load store buffer top.
204 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex); 210 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
205 // Store pointer to buffer. 211 // Store pointer to buffer.
206 movq(Operand(scratch, 0), addr); 212 movq(Operand(scratch, 0), addr);
207 // Increment buffer top. 213 // Increment buffer top.
208 addq(scratch, Immediate(kPointerSize)); 214 addq(scratch, Immediate(kPointerSize));
209 // Write back new top of buffer. 215 // Write back new top of buffer.
210 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); 216 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
211 // Call stub on end of buffer. 217 // Call stub on end of buffer.
212 Label done; 218 Label done;
(...skipping 17 matching lines...) Expand all
230 ASSERT(and_then == kFallThroughAtEnd); 236 ASSERT(and_then == kFallThroughAtEnd);
231 bind(&done); 237 bind(&done);
232 } 238 }
233 } 239 }
234 240
235 241
236 void MacroAssembler::InNewSpace(Register object, 242 void MacroAssembler::InNewSpace(Register object,
237 Register scratch, 243 Register scratch,
238 Condition cc, 244 Condition cc,
239 Label* branch, 245 Label* branch,
240 Label::Distance near_jump) { 246 Label::Distance distance) {
241 if (Serializer::enabled()) { 247 if (Serializer::enabled()) {
242 // Can't do arithmetic on external references if it might get serialized. 248 // Can't do arithmetic on external references if it might get serialized.
243 // The mask isn't really an address. We load it as an external reference in 249 // The mask isn't really an address. We load it as an external reference in
244 // case the size of the new space is different between the snapshot maker 250 // case the size of the new space is different between the snapshot maker
245 // and the running system. 251 // and the running system.
246 if (scratch.is(object)) { 252 if (scratch.is(object)) {
247 movq(kScratchRegister, ExternalReference::new_space_mask(isolate())); 253 movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
248 and_(scratch, kScratchRegister); 254 and_(scratch, kScratchRegister);
249 } else { 255 } else {
250 movq(scratch, ExternalReference::new_space_mask(isolate())); 256 movq(scratch, ExternalReference::new_space_mask(isolate()));
251 and_(scratch, object); 257 and_(scratch, object);
252 } 258 }
253 movq(kScratchRegister, ExternalReference::new_space_start(isolate())); 259 movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
254 cmpq(scratch, kScratchRegister); 260 cmpq(scratch, kScratchRegister);
255 j(cc, branch, near_jump); 261 j(cc, branch, distance);
256 } else { 262 } else {
257 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); 263 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
258 intptr_t new_space_start = 264 intptr_t new_space_start =
259 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart()); 265 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
260 movq(kScratchRegister, -new_space_start, RelocInfo::NONE); 266 movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
261 if (scratch.is(object)) { 267 if (scratch.is(object)) {
262 addq(scratch, kScratchRegister); 268 addq(scratch, kScratchRegister);
263 } else { 269 } else {
264 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); 270 lea(scratch, Operand(object, kScratchRegister, times_1, 0));
265 } 271 }
266 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); 272 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
267 j(cc, branch, near_jump); 273 j(cc, branch, distance);
268 } 274 }
269 } 275 }
270 276
271 277
272 void MacroAssembler::RecordWriteField( 278 void MacroAssembler::RecordWriteField(
273 Register object, 279 Register object,
274 int offset, 280 int offset,
275 Register value, 281 Register value,
276 Register dst, 282 Register dst,
277 SaveFPRegsMode save_fp, 283 SaveFPRegsMode save_fp,
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
321 void MacroAssembler::RecordWrite(Register object, 327 void MacroAssembler::RecordWrite(Register object,
322 Register address, 328 Register address,
323 Register value, 329 Register value,
324 SaveFPRegsMode fp_mode, 330 SaveFPRegsMode fp_mode,
325 RememberedSetAction remembered_set_action, 331 RememberedSetAction remembered_set_action,
326 SmiCheck smi_check) { 332 SmiCheck smi_check) {
327 // The compiled code assumes that record write doesn't change the 333 // The compiled code assumes that record write doesn't change the
328 // context register, so we check that none of the clobbered 334 // context register, so we check that none of the clobbered
329 // registers are rsi. 335 // registers are rsi.
330 ASSERT(!value.is(rsi) && !address.is(rsi)); 336 ASSERT(!value.is(rsi) && !address.is(rsi));
331 if (remembered_set_action == OMIT_REMEMBERED_SET &&
332 !FLAG_incremental_marking) {
333 return;
334 }
335 337
336 ASSERT(!object.is(value)); 338 ASSERT(!object.is(value));
337 ASSERT(!object.is(address)); 339 ASSERT(!object.is(address));
338 ASSERT(!value.is(address)); 340 ASSERT(!value.is(address));
339 if (emit_debug_code()) { 341 if (emit_debug_code()) {
340 AbortIfSmi(object); 342 AbortIfSmi(object);
341 } 343 }
342 344
343 if (remembered_set_action == OMIT_REMEMBERED_SET && 345 if (remembered_set_action == OMIT_REMEMBERED_SET &&
344 !FLAG_incremental_marking) { 346 !FLAG_incremental_marking) {
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
474 push(kScratchRegister); 476 push(kScratchRegister);
475 CallRuntime(Runtime::kAbort, 2); 477 CallRuntime(Runtime::kAbort, 2);
476 // will not return here 478 // will not return here
477 int3(); 479 int3();
478 } 480 }
479 481
480 482
481 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) { 483 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
482 // ASSERT(allow_stub_calls()); // calls are not allowed in some stubs 484 // ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
483 // TODO(gc): Fix this! 485 // TODO(gc): Fix this!
484 // TODO(gc): Fix this!
485 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); 486 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
486 } 487 }
487 488
488 489
489 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { 490 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
490 ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. 491 ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
491 MaybeObject* result = stub->TryGetCode(); 492 MaybeObject* result = stub->TryGetCode();
492 if (!result->IsFailure()) { 493 if (!result->IsFailure()) {
493 call(Handle<Code>(Code::cast(result->ToObjectUnchecked())), 494 call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
494 RelocInfo::CODE_TARGET); 495 RelocInfo::CODE_TARGET);
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after
833 834
834 835
835 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 836 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
836 ASSERT(!target.is(rdi)); 837 ASSERT(!target.is(rdi));
837 // Load the JavaScript builtin function from the builtins object. 838 // Load the JavaScript builtin function from the builtins object.
838 GetBuiltinFunction(rdi, id); 839 GetBuiltinFunction(rdi, id);
839 movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); 840 movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
840 } 841 }
841 842
842 843
844 static const Register saved_regs[] =
845 { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
846 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
847
848
849 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
850 Register exclusion1,
851 Register exclusion2,
852 Register exclusion3) {
853 // We don't allow a GC during a store buffer overflow so there is no need to
854 // store the registers in any particular way, but we do have to store and
855 // restore them.
856 for (int i = 0; i < kNumberOfSavedRegs; i++) {
857 Register reg = saved_regs[i];
858 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
859 push(reg);
860 }
861 }
862 // R12 to r15 are callee save on all platforms.
863 if (fp_mode == kSaveFPRegs) {
864 CpuFeatures::Scope scope(SSE2);
865 subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
866 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
867 XMMRegister reg = XMMRegister::from_code(i);
868 movsd(Operand(rsp, i * kDoubleSize), reg);
869 }
870 }
871 }
872
873
874 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
875 Register exclusion1,
876 Register exclusion2,
877 Register exclusion3) {
878 if (fp_mode == kSaveFPRegs) {
879 CpuFeatures::Scope scope(SSE2);
880 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
881 XMMRegister reg = XMMRegister::from_code(i);
882 movsd(reg, Operand(rsp, i * kDoubleSize));
883 }
884 addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
885 }
886 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
887 Register reg = saved_regs[i];
888 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
889 pop(reg);
890 }
891 }
892 }
893
894
843 void MacroAssembler::Set(Register dst, int64_t x) { 895 void MacroAssembler::Set(Register dst, int64_t x) {
844 if (x == 0) { 896 if (x == 0) {
845 xorl(dst, dst); 897 xorl(dst, dst);
846 } else if (is_uint32(x)) { 898 } else if (is_uint32(x)) {
847 movl(dst, Immediate(static_cast<uint32_t>(x))); 899 movl(dst, Immediate(static_cast<uint32_t>(x)));
848 } else if (is_int32(x)) { 900 } else if (is_int32(x)) {
849 movq(dst, Immediate(static_cast<int32_t>(x))); 901 movq(dst, Immediate(static_cast<int32_t>(x)));
850 } else { 902 } else {
851 movq(dst, x, RelocInfo::NONE); 903 movq(dst, x, RelocInfo::NONE);
852 } 904 }
(...skipping 2933 matching lines...) Expand 10 before | Expand all | Expand 10 after
3786 } 3838 }
3787 if (flag < kBitsPerByte) { 3839 if (flag < kBitsPerByte) {
3788 testb(Operand(scratch, MemoryChunk::kFlagsOffset), 3840 testb(Operand(scratch, MemoryChunk::kFlagsOffset),
3789 Immediate(static_cast<uint8_t>(1u << flag))); 3841 Immediate(static_cast<uint8_t>(1u << flag)));
3790 } else { 3842 } else {
3791 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(1 << flag)); 3843 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(1 << flag));
3792 } 3844 }
3793 j(cc, condition_met, condition_met_distance); 3845 j(cc, condition_met, condition_met_distance);
3794 } 3846 }
3795 3847
3848
3849 void MacroAssembler::IsBlack(Register object,
3850 Register bitmap_scratch,
3851 Register mask_scratch,
3852 Label* is_black,
3853 Label::Distance is_black_distance) {
3854 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
3855 GetMarkBits(object, bitmap_scratch, mask_scratch);
3856
3857 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3858 // The mask_scratch register contains a 1 at the position of the first bit
3859 // and a 0 at all other positions, including the position of the second bit.
3860 movq(rcx, mask_scratch);
3861 // Make rcx into a mask that covers both marking bits using the operation
3862 // rcx = mask | (mask << 1).
3863 lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
3864 // Note that we are using a 4-byte aligned 8-byte load.
Lasse Reichstein 2011/06/10 13:55:44 Badness. If it also overlaps a cache boundary, it
Erik Corry 2011/06/10 21:57:29 Lets see. The alternative, on IA32 is a mispredic
3865 and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3866 cmpq(mask_scratch, rcx);
3867 j(equal, is_black, is_black_distance);
3868 }
3869
3870
3871 void MacroAssembler::IsDataObject(Register value,
3872 Register scratch,
3873 Label* not_data_object,
3874 Label::Distance not_data_object_distance) {
3875 Label is_data_object;
3876 movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
3877 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3878 j(equal, &is_data_object, Label::kNear);
3879 ASSERT(kConsStringTag == 1 && kIsConsStringMask == 1);
3880 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3881 // If it's a string and it's not a cons string then it's an object that
3882 // doesn't need scanning.
3883 testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
3884 Immediate(kIsConsStringMask | kIsNotStringMask));
3885 // Jump if we need to mark it grey and push it.
3886 j(not_zero, not_data_object, not_data_object_distance);
3887 bind(&is_data_object);
3888 }
3889
3890
3891 void MacroAssembler::GetMarkBits(Register addr_reg,
3892 Register bitmap_reg,
3893 Register mask_reg) {
3894 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
3895 movq(bitmap_reg, addr_reg);
3896 // Sign extended 32 bit immediate.
3897 and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
3898 movq(rcx, addr_reg);
3899 int shift =
3900 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
3901 shrl(rcx, Immediate(shift));
3902 and_(rcx,
3903 Immediate((Page::kPageAlignmentMask >> shift) &
3904 ~(Bitmap::kBytesPerCell - 1)));
3905
3906 addq(bitmap_reg, rcx);
3907 movq(rcx, addr_reg);
3908 shrl(rcx, Immediate(kPointerSizeLog2));
3909 and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
3910 movl(mask_reg, Immediate(1));
3911 shl_cl(mask_reg);
3912 }
3913
3796 } } // namespace v8::internal 3914 } } // namespace v8::internal
3797 3915
3798 #endif // V8_TARGET_ARCH_X64 3916 #endif // V8_TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698