Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(128)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 2274001: Revert r4715. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/stub-cache-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
83 Register addr, 83 Register addr,
84 Register scratch) { 84 Register scratch) {
85 if (FLAG_debug_code) { 85 if (FLAG_debug_code) {
86 // Check that the object is not in new space. 86 // Check that the object is not in new space.
87 Label not_in_new_space; 87 Label not_in_new_space;
88 InNewSpace(object, scratch, not_equal, &not_in_new_space); 88 InNewSpace(object, scratch, not_equal, &not_in_new_space);
89 Abort("new-space object passed to RecordWriteHelper"); 89 Abort("new-space object passed to RecordWriteHelper");
90 bind(&not_in_new_space); 90 bind(&not_in_new_space);
91 } 91 }
92 92
93 Label fast;
94
93 // Compute the page start address from the heap object pointer, and reuse 95 // Compute the page start address from the heap object pointer, and reuse
94 // the 'object' register for it. 96 // the 'object' register for it.
95 and_(object, Immediate(~Page::kPageAlignmentMask)); 97 ASSERT(is_int32(~Page::kPageAlignmentMask));
98 and_(object,
99 Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
100 Register page_start = object;
96 101
97 // Compute number of region covering addr. See Page::GetRegionNumberForAddress 102 // Compute the bit addr in the remembered set/index of the pointer in the
98 // method for more details. 103 // page. Reuse 'addr' as pointer_offset.
99 and_(addr, Immediate(Page::kPageAlignmentMask)); 104 subq(addr, page_start);
100 shrl(addr, Immediate(Page::kRegionSizeLog2)); 105 shr(addr, Immediate(kPointerSizeLog2));
106 Register pointer_offset = addr;
101 107
102 // Set dirty mark for region. 108 // If the bit offset lies beyond the normal remembered set range, it is in
103 bts(Operand(object, Page::kDirtyFlagOffset), addr); 109 // the extra remembered set area of a large object.
110 cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
111 j(below, &fast);
112
113 // We have a large object containing pointers. It must be a FixedArray.
114
115 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
116 // extra remembered set after the large object.
117
118 // Load the array length into 'scratch'.
119 movl(scratch,
120 Operand(page_start,
121 Page::kObjectStartOffset + FixedArray::kLengthOffset));
122 Register array_length = scratch;
123
124 // Extra remembered set starts right after the large object (a FixedArray), at
125 // page_start + kObjectStartOffset + objectSize
126 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
127 // Add the delta between the end of the normal RSet and the start of the
128 // extra RSet to 'page_start', so that addressing the bit using
129 // 'pointer_offset' hits the extra RSet words.
130 lea(page_start,
131 Operand(page_start, array_length, times_pointer_size,
132 Page::kObjectStartOffset + FixedArray::kHeaderSize
133 - Page::kRSetEndOffset));
134
135 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
136 // to limit code size. We should probably evaluate this decision by
137 // measuring the performance of an equivalent implementation using
138 // "simpler" instructions
139 bind(&fast);
140 bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
104 } 141 }
105 142
106 143
107 // For page containing |object| mark region covering [object+offset] dirty. 144 // Set the remembered set bit for [object+offset].
108 // object is the object being stored into, value is the object being stored. 145 // object is the object being stored into, value is the object being stored.
109 // If offset is zero, then the smi_index register contains the array index into 146 // If offset is zero, then the smi_index register contains the array index into
110 // the elements array represented as a smi. Otherwise it can be used as a 147 // the elements array represented as a smi. Otherwise it can be used as a
111 // scratch register. 148 // scratch register.
112 // All registers are clobbered by the operation. 149 // All registers are clobbered by the operation.
113 void MacroAssembler::RecordWrite(Register object, 150 void MacroAssembler::RecordWrite(Register object,
114 int offset, 151 int offset,
115 Register value, 152 Register value,
116 Register smi_index) { 153 Register smi_index) {
117 // The compiled code assumes that record write doesn't change the 154 // The compiled code assumes that record write doesn't change the
118 // context register, so we check that none of the clobbered 155 // context register, so we check that none of the clobbered
119 // registers are rsi. 156 // registers are rsi.
120 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); 157 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
121 158
122 // First, check if a write barrier is even needed. The tests below 159 // First, check if a remembered set write is even needed. The tests below
123 // catch stores of Smis and stores into young gen. 160 // catch stores of Smis and stores into young gen (which does not have space
161 // for the remembered set bits).
124 Label done; 162 Label done;
125 JumpIfSmi(value, &done); 163 JumpIfSmi(value, &done);
126 164
127 RecordWriteNonSmi(object, offset, value, smi_index); 165 RecordWriteNonSmi(object, offset, value, smi_index);
128 bind(&done); 166 bind(&done);
129 167
130 // Clobber all input registers when running with the debug-code flag 168 // Clobber all input registers when running with the debug-code flag
131 // turned on to provoke errors. This clobbering repeats the 169 // turned on to provoke errors. This clobbering repeats the
132 // clobbering done inside RecordWriteNonSmi but it's necessary to 170 // clobbering done inside RecordWriteNonSmi but it's necessary to
133 // avoid having the fast case for smis leave the registers 171 // avoid having the fast case for smis leave the registers
(...skipping 12 matching lines...) Expand all
146 Register smi_index) { 184 Register smi_index) {
147 Label done; 185 Label done;
148 186
149 if (FLAG_debug_code) { 187 if (FLAG_debug_code) {
150 Label okay; 188 Label okay;
151 JumpIfNotSmi(object, &okay); 189 JumpIfNotSmi(object, &okay);
152 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); 190 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
153 bind(&okay); 191 bind(&okay);
154 } 192 }
155 193
156 // Test that the object address is not in the new space. We cannot 194 // Test that the object address is not in the new space. We cannot
157 // update page dirty marks for new space pages. 195 // set remembered set bits in the new space.
158 InNewSpace(object, scratch, equal, &done); 196 InNewSpace(object, scratch, equal, &done);
159 197
160 // The offset is relative to a tagged or untagged HeapObject pointer, 198 // The offset is relative to a tagged or untagged HeapObject pointer,
161 // so either offset or offset + kHeapObjectTag must be a 199 // so either offset or offset + kHeapObjectTag must be a
162 // multiple of kPointerSize. 200 // multiple of kPointerSize.
163 ASSERT(IsAligned(offset, kPointerSize) || 201 ASSERT(IsAligned(offset, kPointerSize) ||
164 IsAligned(offset + kHeapObjectTag, kPointerSize)); 202 IsAligned(offset + kHeapObjectTag, kPointerSize));
165 203
166 Register dst = smi_index; 204 // We use optimized write barrier code if the word being written to is not in
167 if (offset != 0) { 205 // a large object page, or is in the first "page" of a large object page.
168 lea(dst, Operand(object, offset)); 206 // We make sure that an offset is inside the right limits whether it is
207 // tagged or untagged.
208 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
209 // Compute the bit offset in the remembered set, leave it in 'scratch'.
210 lea(scratch, Operand(object, offset));
211 ASSERT(is_int32(Page::kPageAlignmentMask));
212 and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
213 shr(scratch, Immediate(kPointerSizeLog2));
214
215 // Compute the page address from the heap object pointer, leave it in
216 // 'object' (immediate value is sign extended).
217 and_(object, Immediate(~Page::kPageAlignmentMask));
218
219 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
220 // to limit code size. We should probably evaluate this decision by
221 // measuring the performance of an equivalent implementation using
222 // "simpler" instructions
223 bts(Operand(object, Page::kRSetOffset), scratch);
169 } else { 224 } else {
170 // array access: calculate the destination address in the same manner as 225 Register dst = smi_index;
171 // KeyedStoreIC::GenerateGeneric. 226 if (offset != 0) {
172 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); 227 lea(dst, Operand(object, offset));
173 lea(dst, FieldOperand(object, 228 } else {
174 index.reg, 229 // array access: calculate the destination address in the same manner as
175 index.scale, 230 // KeyedStoreIC::GenerateGeneric.
176 FixedArray::kHeaderSize)); 231 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
232 lea(dst, FieldOperand(object,
233 index.reg,
234 index.scale,
235 FixedArray::kHeaderSize));
236 }
237 // If we are already generating a shared stub, not inlining the
238 // record write code isn't going to save us any memory.
239 if (generating_stub()) {
240 RecordWriteHelper(object, dst, scratch);
241 } else {
242 RecordWriteStub stub(object, dst, scratch);
243 CallStub(&stub);
244 }
177 } 245 }
178 RecordWriteHelper(object, dst, scratch);
179 246
180 bind(&done); 247 bind(&done);
181 248
182 // Clobber all input registers when running with the debug-code flag 249 // Clobber all input registers when running with the debug-code flag
183 // turned on to provoke errors. 250 // turned on to provoke errors.
184 if (FLAG_debug_code) { 251 if (FLAG_debug_code) {
185 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 252 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
186 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 253 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
187 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); 254 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
188 } 255 }
(...skipping 381 matching lines...) Expand 10 before | Expand all | Expand 10 after
570 movq(dst, src); 637 movq(dst, src);
571 } 638 }
572 if (power < kSmiShift) { 639 if (power < kSmiShift) {
573 sar(dst, Immediate(kSmiShift - power)); 640 sar(dst, Immediate(kSmiShift - power));
574 } else if (power > kSmiShift) { 641 } else if (power > kSmiShift) {
575 shl(dst, Immediate(power - kSmiShift)); 642 shl(dst, Immediate(power - kSmiShift));
576 } 643 }
577 } 644 }
578 645
579 646
580 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
581 Register src,
582 int power) {
583 ASSERT((0 <= power) && (power < 32));
584 if (dst.is(src)) {
585 shr(dst, Immediate(power + kSmiShift));
586 } else {
587 UNIMPLEMENTED(); // Not used.
588 }
589 }
590
591
592 Condition MacroAssembler::CheckSmi(Register src) { 647 Condition MacroAssembler::CheckSmi(Register src) {
593 ASSERT_EQ(0, kSmiTag); 648 ASSERT_EQ(0, kSmiTag);
594 testb(src, Immediate(kSmiTagMask)); 649 testb(src, Immediate(kSmiTagMask));
595 return zero; 650 return zero;
596 } 651 }
597 652
598 653
599 Condition MacroAssembler::CheckPositiveSmi(Register src) { 654 Condition MacroAssembler::CheckPositiveSmi(Register src) {
600 ASSERT_EQ(0, kSmiTag); 655 ASSERT_EQ(0, kSmiTag);
601 movq(kScratchRegister, src); 656 movq(kScratchRegister, src);
(...skipping 1943 matching lines...) Expand 10 before | Expand all | Expand 10 after
2545 scratch2, 2600 scratch2,
2546 scratch3, 2601 scratch3,
2547 gc_required, 2602 gc_required,
2548 TAG_OBJECT); 2603 TAG_OBJECT);
2549 2604
2550 // Set the map, length and hash field. 2605 // Set the map, length and hash field.
2551 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); 2606 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
2552 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); 2607 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2553 Integer32ToSmi(scratch1, length); 2608 Integer32ToSmi(scratch1, length);
2554 movq(FieldOperand(result, String::kLengthOffset), scratch1); 2609 movq(FieldOperand(result, String::kLengthOffset), scratch1);
2555 movq(FieldOperand(result, String::kHashFieldOffset), 2610 movl(FieldOperand(result, String::kHashFieldOffset),
2556 Immediate(String::kEmptyHashField)); 2611 Immediate(String::kEmptyHashField));
2557 } 2612 }
2558 2613
2559 2614
2560 void MacroAssembler::AllocateAsciiString(Register result, 2615 void MacroAssembler::AllocateAsciiString(Register result,
2561 Register length, 2616 Register length,
2562 Register scratch1, 2617 Register scratch1,
2563 Register scratch2, 2618 Register scratch2,
2564 Register scratch3, 2619 Register scratch3,
2565 Label* gc_required) { 2620 Label* gc_required) {
(...skipping 17 matching lines...) Expand all
2583 scratch2, 2638 scratch2,
2584 scratch3, 2639 scratch3,
2585 gc_required, 2640 gc_required,
2586 TAG_OBJECT); 2641 TAG_OBJECT);
2587 2642
2588 // Set the map, length and hash field. 2643 // Set the map, length and hash field.
2589 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); 2644 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
2590 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); 2645 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2591 Integer32ToSmi(scratch1, length); 2646 Integer32ToSmi(scratch1, length);
2592 movq(FieldOperand(result, String::kLengthOffset), scratch1); 2647 movq(FieldOperand(result, String::kLengthOffset), scratch1);
2593 movq(FieldOperand(result, String::kHashFieldOffset), 2648 movl(FieldOperand(result, String::kHashFieldOffset),
2594 Immediate(String::kEmptyHashField)); 2649 Immediate(String::kEmptyHashField));
2595 } 2650 }
2596 2651
2597 2652
2598 void MacroAssembler::AllocateConsString(Register result, 2653 void MacroAssembler::AllocateConsString(Register result,
2599 Register scratch1, 2654 Register scratch1,
2600 Register scratch2, 2655 Register scratch2,
2601 Label* gc_required) { 2656 Label* gc_required) {
2602 // Allocate heap number in new space. 2657 // Allocate heap number in new space.
2603 AllocateInNewSpace(ConsString::kSize, 2658 AllocateInNewSpace(ConsString::kSize,
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
2714 CPU::FlushICache(address_, size_); 2769 CPU::FlushICache(address_, size_);
2715 2770
2716 // Check that the code was patched as expected. 2771 // Check that the code was patched as expected.
2717 ASSERT(masm_.pc_ == address_ + size_); 2772 ASSERT(masm_.pc_ == address_ + size_);
2718 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2773 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2719 } 2774 }
2720 2775
2721 } } // namespace v8::internal 2776 } } // namespace v8::internal
2722 2777
2723 #endif // V8_TARGET_ARCH_X64 2778 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/stub-cache-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698