Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1904)

Side by Side Diff: src/mips/codegen-mips.cc

Issue 104353002: MIPS: Faster memcpy. (Closed) Base URL: git://github.com/v8/v8.git@bleeding_edge
Patch Set: Rebased Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/assembler-mips.cc ('k') | src/mips/constants-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
103 103
104 #if !defined(USE_SIMULATOR) 104 #if !defined(USE_SIMULATOR)
105 return FUNCTION_CAST<UnaryMathFunction>(buffer); 105 return FUNCTION_CAST<UnaryMathFunction>(buffer);
106 #else 106 #else
107 fast_exp_mips_machine_code = buffer; 107 fast_exp_mips_machine_code = buffer;
108 return &fast_exp_simulator; 108 return &fast_exp_simulator;
109 #endif 109 #endif
110 } 110 }
111 111
112 112
113 #if defined(V8_HOST_ARCH_MIPS)
114 OS::MemCopyUint8Function CreateMemCopyUint8Function(
115 OS::MemCopyUint8Function stub) {
116 #if defined(USE_SIMULATOR)
117 return stub;
118 #else
119 if (Serializer::enabled()) {
120 return stub;
121 }
122
123 size_t actual_size;
124 byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
125 if (buffer == NULL) return stub;
126
127 // This code assumes that cache lines are 32 bytes and if the cache line is
128 // larger it will not work correctly.
129 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
130
131 {
132 Label lastb, unaligned, aligned, chkw,
133 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
134 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
135 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
136
137 // The size of each prefetch.
138 uint32_t pref_chunk = 32;
139 // The maximum size of a prefetch, it must not be less then pref_chunk.
140 // If the real size of a prefetch is greater then max_pref_size and
141 // the kPrefHintPrepareForStore hint is used, the code will not work
142 // correctly.
143 uint32_t max_pref_size = 128;
144 ASSERT(pref_chunk < max_pref_size);
145
146 // pref_limit is set based on the fact that we never use an offset
147 // greater then 5 on a store pref and that a single pref can
148 // never be larger then max_pref_size.
149 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
150 int32_t pref_hint_load = kPrefHintLoadStreamed;
151 int32_t pref_hint_store = kPrefHintPrepareForStore;
152 uint32_t loadstore_chunk = 4;
153
154 // The initial prefetches may fetch bytes that are before the buffer being
155 // copied. Start copies with an offset of 4 so avoid this situation when
156 // using kPrefHintPrepareForStore.
157 ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
158 pref_chunk * 4 >= max_pref_size);
159
160 // If the size is less than 8, go to lastb. Regardless of size,
161 // copy dst pointer to v0 for the retuen value.
162 __ slti(t2, a2, 2 * loadstore_chunk);
163 __ bne(t2, zero_reg, &lastb);
164 __ mov(v0, a0); // In delay slot.
165
166 // If src and dst have different alignments, go to unaligned, if they
167 // have the same alignment (but are not actually aligned) do a partial
168 // load/store to make them aligned. If they are both already aligned
169 // we can start copying at aligned.
170 __ xor_(t8, a1, a0);
171 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
172 __ bne(t8, zero_reg, &unaligned);
173 __ subu(a3, zero_reg, a0); // In delay slot.
174
175 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
176 __ beq(a3, zero_reg, &aligned); // Already aligned.
177 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
178
179 __ lwr(t8, MemOperand(a1));
180 __ addu(a1, a1, a3);
181 __ swr(t8, MemOperand(a0));
182 __ addu(a0, a0, a3);
183
184 // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
185 // count how many bytes we have to copy after all the 64 byte chunks are
186 // copied and a3 to the dst pointer after all the 64 byte chunks have been
187 // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
188 __ bind(&aligned);
189 __ andi(t8, a2, 0x3f);
190 __ beq(a2, t8, &chkw); // Less than 64?
191 __ subu(a3, a2, t8); // In delay slot.
192 __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
193
194 // When in the loop we prefetch with kPrefHintPrepareForStore hint,
195 // in this case the a0+x should be past the "t0-32" address. This means:
196 // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
197 // x=64 the last "safe" a0 address is "t0-96". In the current version we
198 // will use "pref hint, 128(a0)", so "t0-160" is the limit.
199 if (pref_hint_store == kPrefHintPrepareForStore) {
200 __ addu(t0, a0, a2); // t0 is the "past the end" address.
201 __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
202 }
203
204 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
205 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
206 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
207 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
208
209 if (pref_hint_store != kPrefHintPrepareForStore) {
210 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
211 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
212 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
213 }
214 __ bind(&loop16w);
215 __ lw(t0, MemOperand(a1));
216
217 if (pref_hint_store == kPrefHintPrepareForStore) {
218 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
219 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
220 }
221 __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
222
223 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
224 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
225
226 __ bind(&skip_pref);
227 __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
228 __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
229 __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
230 __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
231 __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
232 __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
233 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
234
235 __ sw(t0, MemOperand(a0));
236 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
237 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
238 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
239 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
240 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
241 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
242 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
243
244 __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
245 __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
246 __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
247 __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
248 __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
249 __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
250 __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
251 __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
252 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
253
254 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
255 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
256 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
257 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
258 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
259 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
260 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
261 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
262 __ addiu(a0, a0, 16 * loadstore_chunk);
263 __ bne(a0, a3, &loop16w);
264 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
265 __ mov(a2, t8);
266
267 // Here we have src and dest word-aligned but less than 64-bytes to go.
268 // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
269 // down to chk1w to handle the tail end of the copy.
270 __ bind(&chkw);
271 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
272 __ andi(t8, a2, 0x1f);
273 __ beq(a2, t8, &chk1w); // Less than 32?
274 __ nop(); // In delay slot.
275 __ lw(t0, MemOperand(a1));
276 __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
277 __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
278 __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
279 __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
280 __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
281 __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
282 __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
283 __ addiu(a1, a1, 8 * loadstore_chunk);
284 __ sw(t0, MemOperand(a0));
285 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
286 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
287 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
288 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
289 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
290 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
291 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
292 __ addiu(a0, a0, 8 * loadstore_chunk);
293
294 // Here we have less than 32 bytes to copy. Set up for a loop to copy
295 // one word at a time. Set a2 to count how many bytes we have to copy
296 // after all the word chunks are copied and a3 to the dst pointer after
297 // all the word chunks have been copied. We will loop, incrementing a0
298 // and a1 untill a0 equals a3.
299 __ bind(&chk1w);
300 __ andi(a2, t8, loadstore_chunk - 1);
301 __ beq(a2, t8, &lastb);
302 __ subu(a3, t8, a2); // In delay slot.
303 __ addu(a3, a0, a3);
304
305 __ bind(&wordCopy_loop);
306 __ lw(t3, MemOperand(a1));
307 __ addiu(a0, a0, loadstore_chunk);
308 __ addiu(a1, a1, loadstore_chunk);
309 __ bne(a0, a3, &wordCopy_loop);
310 __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
311
312 __ bind(&lastb);
313 __ Branch(&leave, le, a2, Operand(zero_reg));
314 __ addu(a3, a0, a2);
315
316 __ bind(&lastbloop);
317 __ lb(v1, MemOperand(a1));
318 __ addiu(a0, a0, 1);
319 __ addiu(a1, a1, 1);
320 __ bne(a0, a3, &lastbloop);
321 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
322
323 __ bind(&leave);
324 __ jr(ra);
325 __ nop();
326
327 // Unaligned case. Only the dst gets aligned so we need to do partial
328 // loads of the source followed by normal stores to the dst (once we
329 // have aligned the destination).
330 __ bind(&unaligned);
331 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
332 __ beq(a3, zero_reg, &ua_chk16w);
333 __ subu(a2, a2, a3); // In delay slot.
334
335 __ lwr(v1, MemOperand(a1));
336 __ lwl(v1,
337 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
338 __ addu(a1, a1, a3);
339 __ swr(v1, MemOperand(a0));
340 __ addu(a0, a0, a3);
341
342 // Now the dst (but not the source) is aligned. Set a2 to count how many
343 // bytes we have to copy after all the 64 byte chunks are copied and a3 to
344 // the dst pointer after all the 64 byte chunks have been copied. We will
345 // loop, incrementing a0 and a1 until a0 equals a3.
346 __ bind(&ua_chk16w);
347 __ andi(t8, a2, 0x3f);
348 __ beq(a2, t8, &ua_chkw);
349 __ subu(a3, a2, t8); // In delay slot.
350 __ addu(a3, a0, a3);
351
352 if (pref_hint_store == kPrefHintPrepareForStore) {
353 __ addu(t0, a0, a2);
354 __ Subu(t9, t0, pref_limit);
355 }
356
357 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
358 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
359 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
360
361 if (pref_hint_store != kPrefHintPrepareForStore) {
362 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
363 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
364 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
365 }
366
367 __ bind(&ua_loop16w);
368 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
369 __ lwr(t0, MemOperand(a1));
370 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
371 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
372
373 if (pref_hint_store == kPrefHintPrepareForStore) {
374 __ sltu(v1, t9, a0);
375 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
376 }
377 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
378
379 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
380 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
381
382 __ bind(&ua_skip_pref);
383 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
384 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
385 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
386 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
387 __ lwl(t0,
388 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
389 __ lwl(t1,
390 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
391 __ lwl(t2,
392 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
393 __ lwl(t3,
394 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
395 __ lwl(t4,
396 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
397 __ lwl(t5,
398 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
399 __ lwl(t6,
400 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
401 __ lwl(t7,
402 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
403 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
404 __ sw(t0, MemOperand(a0));
405 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
406 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
407 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
408 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
409 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
410 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
411 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
412 __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
413 __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
414 __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
415 __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
416 __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
417 __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
418 __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
419 __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
420 __ lwl(t0,
421 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
422 __ lwl(t1,
423 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
424 __ lwl(t2,
425 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
426 __ lwl(t3,
427 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
428 __ lwl(t4,
429 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
430 __ lwl(t5,
431 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
432 __ lwl(t6,
433 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
434 __ lwl(t7,
435 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
436 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
437 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
438 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
439 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
440 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
441 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
442 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
443 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
444 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
445 __ addiu(a0, a0, 16 * loadstore_chunk);
446 __ bne(a0, a3, &ua_loop16w);
447 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
448 __ mov(a2, t8);
449
450 // Here less than 64-bytes. Check for
451 // a 32 byte chunk and copy if there is one. Otherwise jump down to
452 // ua_chk1w to handle the tail end of the copy.
453 __ bind(&ua_chkw);
454 __ Pref(pref_hint_load, MemOperand(a1));
455 __ andi(t8, a2, 0x1f);
456
457 __ beq(a2, t8, &ua_chk1w);
458 __ nop(); // In delay slot.
459 __ lwr(t0, MemOperand(a1));
460 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
461 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
462 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
463 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
464 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
465 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
466 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
467 __ lwl(t0,
468 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
469 __ lwl(t1,
470 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
471 __ lwl(t2,
472 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
473 __ lwl(t3,
474 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
475 __ lwl(t4,
476 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
477 __ lwl(t5,
478 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
479 __ lwl(t6,
480 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
481 __ lwl(t7,
482 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
483 __ addiu(a1, a1, 8 * loadstore_chunk);
484 __ sw(t0, MemOperand(a0));
485 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
486 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
487 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
488 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
489 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
490 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
491 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
492 __ addiu(a0, a0, 8 * loadstore_chunk);
493
494 // Less than 32 bytes to copy. Set up for a loop to
495 // copy one word at a time.
496 __ bind(&ua_chk1w);
497 __ andi(a2, t8, loadstore_chunk - 1);
498 __ beq(a2, t8, &ua_smallCopy);
499 __ subu(a3, t8, a2); // In delay slot.
500 __ addu(a3, a0, a3);
501
502 __ bind(&ua_wordCopy_loop);
503 __ lwr(v1, MemOperand(a1));
504 __ lwl(v1,
505 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
506 __ addiu(a0, a0, loadstore_chunk);
507 __ addiu(a1, a1, loadstore_chunk);
508 __ bne(a0, a3, &ua_wordCopy_loop);
509 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
510
511 // Copy the last 8 bytes.
512 __ bind(&ua_smallCopy);
513 __ beq(a2, zero_reg, &leave);
514 __ addu(a3, a0, a2); // In delay slot.
515
516 __ bind(&ua_smallCopy_loop);
517 __ lb(v1, MemOperand(a1));
518 __ addiu(a0, a0, 1);
519 __ addiu(a1, a1, 1);
520 __ bne(a0, a3, &ua_smallCopy_loop);
521 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
522
523 __ jr(ra);
524 __ nop();
525 }
526 CodeDesc desc;
527 masm.GetCode(&desc);
528 ASSERT(!RelocInfo::RequiresRelocation(desc));
529
530 CPU::FlushICache(buffer, actual_size);
531 OS::ProtectCode(buffer, actual_size);
532 return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
533 #endif
534 }
535 #endif
536
113 #undef __ 537 #undef __
114 538
115 539
116 UnaryMathFunction CreateSqrtFunction() { 540 UnaryMathFunction CreateSqrtFunction() {
117 return &sqrt; 541 return &sqrt;
118 } 542 }
119 543
120 544
121 // ------------------------------------------------------------------------- 545 // -------------------------------------------------------------------------
122 // Platform-specific RuntimeCallHelper functions. 546 // Platform-specific RuntimeCallHelper functions.
(...skipping 550 matching lines...) Expand 10 before | Expand all | Expand 10 after
673 patcher.masm()->nop(); // Pad the empty space. 1097 patcher.masm()->nop(); // Pad the empty space.
674 } 1098 }
675 } 1099 }
676 1100
677 1101
678 #undef __ 1102 #undef __
679 1103
680 } } // namespace v8::internal 1104 } } // namespace v8::internal
681 1105
682 #endif // V8_TARGET_ARCH_MIPS 1106 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/assembler-mips.cc ('k') | src/mips/constants-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698