Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(526)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 1858002: Port inlined version of swap primitive for sorting from ia32 to x64.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
65 cmpq(with, kScratchRegister); 65 cmpq(with, kScratchRegister);
66 } 66 }
67 67
68 68
69 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { 69 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
70 CompareRoot(rsp, Heap::kStackLimitRootIndex); 70 CompareRoot(rsp, Heap::kStackLimitRootIndex);
71 j(below, on_stack_overflow); 71 j(below, on_stack_overflow);
72 } 72 }
73 73
74 74
75 static void RecordWriteHelper(MacroAssembler* masm, 75 void MacroAssembler::RecordWriteHelper(Register object,
76 Register object, 76 Register addr,
77 Register addr, 77 Register scratch) {
78 Register scratch) {
79 Label fast; 78 Label fast;
80 79
81 // Compute the page start address from the heap object pointer, and reuse 80 // Compute the page start address from the heap object pointer, and reuse
82 // the 'object' register for it. 81 // the 'object' register for it.
83 ASSERT(is_int32(~Page::kPageAlignmentMask)); 82 ASSERT(is_int32(~Page::kPageAlignmentMask));
84 masm->and_(object, 83 and_(object,
85 Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask))); 84 Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
86 Register page_start = object; 85 Register page_start = object;
87 86
88 // Compute the bit addr in the remembered set/index of the pointer in the 87 // Compute the bit addr in the remembered set/index of the pointer in the
89 // page. Reuse 'addr' as pointer_offset. 88 // page. Reuse 'addr' as pointer_offset.
90 masm->subq(addr, page_start); 89 subq(addr, page_start);
91 masm->shr(addr, Immediate(kPointerSizeLog2)); 90 shr(addr, Immediate(kPointerSizeLog2));
92 Register pointer_offset = addr; 91 Register pointer_offset = addr;
93 92
94 // If the bit offset lies beyond the normal remembered set range, it is in 93 // If the bit offset lies beyond the normal remembered set range, it is in
95 // the extra remembered set area of a large object. 94 // the extra remembered set area of a large object.
96 masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize)); 95 cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
97 masm->j(less, &fast); 96 j(less, &fast);
98 97
99 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the 98 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
100 // extra remembered set after the large object. 99 // extra remembered set after the large object.
101 100
102 // Load the array length into 'scratch'. 101 // Load the array length into 'scratch'.
103 masm->movl(scratch, 102 movl(scratch,
104 Operand(page_start, 103 Operand(page_start,
105 Page::kObjectStartOffset + FixedArray::kLengthOffset)); 104 Page::kObjectStartOffset + FixedArray::kLengthOffset));
106 Register array_length = scratch; 105 Register array_length = scratch;
107 106
108 // Extra remembered set starts right after the large object (a FixedArray), at 107 // Extra remembered set starts right after the large object (a FixedArray), at
109 // page_start + kObjectStartOffset + objectSize 108 // page_start + kObjectStartOffset + objectSize
110 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. 109 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
111 // Add the delta between the end of the normal RSet and the start of the 110 // Add the delta between the end of the normal RSet and the start of the
112 // extra RSet to 'page_start', so that addressing the bit using 111 // extra RSet to 'page_start', so that addressing the bit using
113 // 'pointer_offset' hits the extra RSet words. 112 // 'pointer_offset' hits the extra RSet words.
114 masm->lea(page_start, 113 lea(page_start,
115 Operand(page_start, array_length, times_pointer_size, 114 Operand(page_start, array_length, times_pointer_size,
116 Page::kObjectStartOffset + FixedArray::kHeaderSize 115 Page::kObjectStartOffset + FixedArray::kHeaderSize
117 - Page::kRSetEndOffset)); 116 - Page::kRSetEndOffset));
118 117
119 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction 118 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
120 // to limit code size. We should probably evaluate this decision by 119 // to limit code size. We should probably evaluate this decision by
121 // measuring the performance of an equivalent implementation using 120 // measuring the performance of an equivalent implementation using
122 // "simpler" instructions 121 // "simpler" instructions
123 masm->bind(&fast); 122 bind(&fast);
124 masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset); 123 bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
125 }
126
127
128 class RecordWriteStub : public CodeStub {
129 public:
130 RecordWriteStub(Register object, Register addr, Register scratch)
131 : object_(object), addr_(addr), scratch_(scratch) { }
132
133 void Generate(MacroAssembler* masm);
134
135 private:
136 Register object_;
137 Register addr_;
138 Register scratch_;
139
140 #ifdef DEBUG
141 void Print() {
142 PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
143 object_.code(), addr_.code(), scratch_.code());
144 }
145 #endif
146
147 // Minor key encoding in 12 bits of three registers (object, address and
148 // scratch) OOOOAAAASSSS.
149 class ScratchBits : public BitField<uint32_t, 0, 4> {};
150 class AddressBits : public BitField<uint32_t, 4, 4> {};
151 class ObjectBits : public BitField<uint32_t, 8, 4> {};
152
153 Major MajorKey() { return RecordWrite; }
154
155 int MinorKey() {
156 // Encode the registers.
157 return ObjectBits::encode(object_.code()) |
158 AddressBits::encode(addr_.code()) |
159 ScratchBits::encode(scratch_.code());
160 }
161 };
162
163
164 void RecordWriteStub::Generate(MacroAssembler* masm) {
165 RecordWriteHelper(masm, object_, addr_, scratch_);
166 masm->ret(0);
167 } 124 }
168 125
169 126
170 void MacroAssembler::InNewSpace(Register object, 127 void MacroAssembler::InNewSpace(Register object,
171 Register scratch, 128 Register scratch,
172 Condition cc, 129 Condition cc,
173 Label* branch) { 130 Label* branch) {
174 ASSERT(cc == equal || cc == not_equal); 131 ASSERT(cc == equal || cc == not_equal);
175 if (!scratch.is(object)) { 132 if (!scratch.is(object)) {
176 movq(scratch, object); 133 movq(scratch, object);
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
272 // KeyedStoreIC::GenerateGeneric. 229 // KeyedStoreIC::GenerateGeneric.
273 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); 230 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
274 lea(dst, Operand(object, 231 lea(dst, Operand(object,
275 index.reg, 232 index.reg,
276 index.scale, 233 index.scale,
277 FixedArray::kHeaderSize - kHeapObjectTag)); 234 FixedArray::kHeaderSize - kHeapObjectTag));
278 } 235 }
279 // If we are already generating a shared stub, not inlining the 236 // If we are already generating a shared stub, not inlining the
280 // record write code isn't going to save us any memory. 237 // record write code isn't going to save us any memory.
281 if (generating_stub()) { 238 if (generating_stub()) {
282 RecordWriteHelper(this, object, dst, scratch); 239 RecordWriteHelper(object, dst, scratch);
283 } else { 240 } else {
284 RecordWriteStub stub(object, dst, scratch); 241 RecordWriteStub stub(object, dst, scratch);
285 CallStub(&stub); 242 CallStub(&stub);
286 } 243 }
287 } 244 }
288 245
289 bind(&done); 246 bind(&done);
290 247
291 // Clobber all input registers when running with the debug-code flag 248 // Clobber all input registers when running with the debug-code flag
292 // turned on to provoke errors. 249 // turned on to provoke errors.
(...skipping 2419 matching lines...) Expand 10 before | Expand all | Expand 10 after
2712 CodePatcher::~CodePatcher() { 2669 CodePatcher::~CodePatcher() {
2713 // Indicate that code has changed. 2670 // Indicate that code has changed.
2714 CPU::FlushICache(address_, size_); 2671 CPU::FlushICache(address_, size_);
2715 2672
2716 // Check that the code was patched as expected. 2673 // Check that the code was patched as expected.
2717 ASSERT(masm_.pc_ == address_ + size_); 2674 ASSERT(masm_.pc_ == address_ + size_);
2718 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2675 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2719 } 2676 }
2720 2677
2721 } } // namespace v8::internal 2678 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698