Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(634)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 12920009: Use generated Neon version of MemCopy() on ARM, if platform supports it. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
107 OS::ProtectCode(buffer, actual_size); 107 OS::ProtectCode(buffer, actual_size);
108 108
109 #if !defined(USE_SIMULATOR) 109 #if !defined(USE_SIMULATOR)
110 return FUNCTION_CAST<UnaryMathFunction>(buffer); 110 return FUNCTION_CAST<UnaryMathFunction>(buffer);
111 #else 111 #else
112 fast_exp_arm_machine_code = buffer; 112 fast_exp_arm_machine_code = buffer;
113 return &fast_exp_simulator; 113 return &fast_exp_simulator;
114 #endif 114 #endif
115 } 115 }
116 116
117 static void MemCopyWrapper(void* dest, const void* src, size_t size) {
118 memcpy(dest, src, size);
119 }
120
121 // Based on Bionic's memcpy.s
hans 2013/04/02 12:34:29 period at the end of comments, here and below
Nike 2013/04/03 15:04:06 Done.
122 OS::MemCopyFunction CreateMemCopyFunction() {
123 size_t actual_size;
124 static const int kCacheLineSize = 64;
125 static const int kPrefetchDistance = kCacheLineSize * 4;
126 // Allocate buffer in executable space.
127 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
128 &actual_size,
129 true));
130 if (buffer == NULL) return &MemCopyWrapper;
131 if (!CpuFeatures::IsSupported(NEON)) return &MemCopyWrapper;
132
133 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
134
135 CpuFeatureScope use_neon(&masm, NEON);
136 Label less16, aligned16, aligned8, skip_copy8, skip_copy4,
137 fix_remainder, main_loop, has32, less32;
138
139 // ----------- S t a t e -------------
140 // -- r0 : dest
141 // -- r1 : src
142 // -- r2 : count
143 // -----------------------------------
144
145 __ push(lr);
146
147 // Start preloading as early as possible
148 __ pld(r1, kCacheLineSize * 0);
149 __ pld(r1, kCacheLineSize * 1);
150
151 #ifdef DEBUG
152 Label check_ok;
153 // Do we have at least 16-bytes to copy (needed for alignment below)
154 ASSERT(OS::kMinComplexMemCopy >= 16);
155 __ cmp(r2, Operand(OS::kMinComplexMemCopy));
156 __ b(&check_ok, hs);
157 __ bkpt(0);
158 __ bind(&check_ok);
159 #endif
160
161 // Align destination to half cache-line for the write-buffer
162 __ rsb(r3, r0, Operand(0));
163 __ and_(r3, r3, Operand(0xf), SetCC);
164 __ b(&aligned16, eq);
165
166 // Copy up to 15-bytes (count in r3)
167 __ sub(r2, r2, r3);
168 __ mov(ip, Operand(r3, LSL, 31), SetCC);
169 __ ldrb(lr, MemOperand(r1, 1, PostIndex), mi);
170 __ strb(lr, MemOperand(r0, 1, PostIndex), mi);
171 __ ldrb(ip, MemOperand(r1, 1, PostIndex), cs);
172 __ ldrb(lr, MemOperand(r1, 1, PostIndex), cs);
173 __ strb(ip, MemOperand(r0, 1, PostIndex), cs);
174 __ strb(lr, MemOperand(r0, 1, PostIndex), cs);
175 __ mov(ip, Operand(r3, LSL, 29), SetCC);
176 __ b(&aligned8, ge);
177 // Copies 4 bytes, destination 32-bits aligned
178 __ vld4(8, r1, d0, element_0, Writeback);
179 __ vst4(8, r0, d0, element_0, Writeback, 32 / 8);
180 __ bind(&aligned8);
181 __ b(&aligned16, cc);
182 // Copies 8 bytes, destination 64-bits aligned
183 __ vld1(8, r1, d0, d0, Writeback);
184 __ vst1(8, r0, d0, d0, Writeback, 64 / 8);
185
186 __ bind(&aligned16);
187 // Preload immediately the next cache line, which we may need
188 __ pld(r1, kCacheLineSize * 0);
189 __ pld(r1, kCacheLineSize * 1);
190
191 // Make sure we have at least 64 bytes to copy
192 __ sub(r2, r2, Operand(64), SetCC);
193 __ b(&fix_remainder, lo);
194
195 // Preload all the cache lines we need.
196 // NOTE: the number of pld below depends on PREFETCH_DISTANCE,
197 // ideally would would increase the distance in the main loop to
198 // avoid the goofy code below. In practice this doesn't seem to make
199 // a big difference.
200 __ pld(r1, kCacheLineSize * 2);
201 __ pld(r1, kCacheLineSize * 3);
202 __ pld(r1, kPrefetchDistance);
203
204 // The main loop copies 64 bytes at a time
205 __ bind(&main_loop);
206 __ vld1(8, r1, d0, d3, Writeback);
207 __ vld1(8, r1, d4, d7, Writeback);
208 __ pld(r1, kPrefetchDistance);
209 __ sub(r2, r2, Operand(64), SetCC);
210 __ vst1(8, r0, d0, d3, Writeback, 128 / 8);
211 __ vst1(8, r0, d4, d7, Writeback, 128 / 8);
212 __ b(&main_loop, hs);
213
214 // Fix-up the remaining count and make sure we have >= 32 bytes left
215 __ bind(&fix_remainder);
216 __ add(r2, r2, Operand(64));
217 __ sub(r2, r2, Operand(32), SetCC);
218 __ b(&less32, lo);
219
220 // 32 bytes at a time. These cache lines were already preloaded
221 __ bind(&has32);
222 __ vld1(8, r1, d0, d3, Writeback);
223 __ sub(r2, r2, Operand(32), SetCC);
224 __ vst1(8, r0, d0, d3, Writeback, 128 / 8);
225 __ b(&has32, hs);
226
227 // Less than 32 left
228 __ bind(&less32);
229 __ add(r2, r2, Operand(32));
230 __ tst(r2, Operand(0x10));
231 __ b(&less16, eq);
232 // Copies 16 bytes, 128-bits aligned
233 __ vld1(8, r1, d0, d1, Writeback);
234 __ vst1(8, r0, d0, d1, Writeback, 128 / 8);
235
236 // copy up to 15-bytes (count in r2)
237 __ bind(&less16);
238 __ mov(ip, Operand(r2, LSL, 29), SetCC);
239 __ b(&skip_copy8, cc);
240 __ vld1(8, r1, d0, d0, Writeback);
241 __ vst1(8, r0, d0, d0, Writeback);
242 __ bind(&skip_copy8);
243 __ b(&skip_copy4, ge);
244 __ vld4(8, r1, d0, element_0, Writeback);
245 __ vst4(8, r0, d0, element_0, Writeback);
246 __ bind(&skip_copy4);
247 __ mov(ip, Operand(r2, LSL, 31), SetCC);
248 __ ldrb(r3, MemOperand(r1, 1, PostIndex), mi);
249 __ ldrb(ip, MemOperand(r1, 1, PostIndex), cs);
250 __ ldrb(lr, MemOperand(r1, 1, PostIndex), cs);
251 __ strb(r3, MemOperand(r0, 1, PostIndex), mi);
252 __ strb(ip, MemOperand(r0, 1, PostIndex), cs);
253 __ strb(lr, MemOperand(r0, 1, PostIndex), cs);
254
255 __ pop(lr);
256 __ bx(lr);
257
258 CodeDesc desc;
259 masm.GetCode(&desc);
260 ASSERT(!RelocInfo::RequiresRelocation(desc));
261
262 CPU::FlushICache(buffer, actual_size);
263 OS::ProtectCode(buffer, actual_size);
264 return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
265 }
117 266
118 #undef __ 267 #undef __
119 268
120
121 UnaryMathFunction CreateSqrtFunction() { 269 UnaryMathFunction CreateSqrtFunction() {
122 return &sqrt; 270 return &sqrt;
123 } 271 }
124 272
125 // ------------------------------------------------------------------------- 273 // -------------------------------------------------------------------------
126 // Platform-specific RuntimeCallHelper functions. 274 // Platform-specific RuntimeCallHelper functions.
127 275
128 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 276 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
129 masm->EnterFrame(StackFrame::INTERNAL); 277 masm->EnterFrame(StackFrame::INTERNAL);
130 ASSERT(!masm->has_frame()); 278 ASSERT(!masm->has_frame());
(...skipping 568 matching lines...) Expand 10 before | Expand all | Expand 10 after
699 patcher.masm()->add(r0, pc, Operand(-8)); 847 patcher.masm()->add(r0, pc, Operand(-8));
700 patcher.masm()->ldr(pc, MemOperand(pc, -4)); 848 patcher.masm()->ldr(pc, MemOperand(pc, -4));
701 patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start())); 849 patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
702 } 850 }
703 } 851 }
704 852
705 853
706 } } // namespace v8::internal 854 } } // namespace v8::internal
707 855
708 #endif // V8_TARGET_ARCH_ARM 856 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« src/arm/assembler-arm.cc ('K') | « src/arm/assembler-arm.cc ('k') | src/platform.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698