Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(267)

Side by Side Diff: src/mips/codegen-mips.cc

Issue 228943009: MIPS: Add big-endian support for MIPS. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Comments addressed Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/code-stubs-mips.cc ('k') | src/mips/constants-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
60 60
61 { 61 {
62 DoubleRegister input = f12; 62 DoubleRegister input = f12;
63 DoubleRegister result = f0; 63 DoubleRegister result = f0;
64 DoubleRegister double_scratch1 = f4; 64 DoubleRegister double_scratch1 = f4;
65 DoubleRegister double_scratch2 = f6; 65 DoubleRegister double_scratch2 = f6;
66 Register temp1 = t0; 66 Register temp1 = t0;
67 Register temp2 = t1; 67 Register temp2 = t1;
68 Register temp3 = t2; 68 Register temp3 = t2;
69 69
70 if (!IsMipsSoftFloatABI) { 70 __ MovFromFloatParameter(input);
71 // Input value is in f12 anyway, nothing to do.
72 } else {
73 __ Move(input, a0, a1);
74 }
75 __ Push(temp3, temp2, temp1); 71 __ Push(temp3, temp2, temp1);
76 MathExpGenerator::EmitMathExp( 72 MathExpGenerator::EmitMathExp(
77 &masm, input, result, double_scratch1, double_scratch2, 73 &masm, input, result, double_scratch1, double_scratch2,
78 temp1, temp2, temp3); 74 temp1, temp2, temp3);
79 __ Pop(temp3, temp2, temp1); 75 __ Pop(temp3, temp2, temp1);
80 if (!IsMipsSoftFloatABI) { 76 __ MovToFloatResult(result);
81 // Result is already in f0, nothing to do.
82 } else {
83 __ Move(v0, v1, result);
84 }
85 __ Ret(); 77 __ Ret();
86 } 78 }
87 79
88 CodeDesc desc; 80 CodeDesc desc;
89 masm.GetCode(&desc); 81 masm.GetCode(&desc);
90 ASSERT(!RelocInfo::RequiresRelocation(desc)); 82 ASSERT(!RelocInfo::RequiresRelocation(desc));
91 83
92 CPU::FlushICache(buffer, actual_size); 84 CPU::FlushICache(buffer, actual_size);
93 OS::ProtectCode(buffer, actual_size); 85 OS::ProtectCode(buffer, actual_size);
94 86
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
160 // we can start copying at aligned. 152 // we can start copying at aligned.
161 __ xor_(t8, a1, a0); 153 __ xor_(t8, a1, a0);
162 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement. 154 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
163 __ bne(t8, zero_reg, &unaligned); 155 __ bne(t8, zero_reg, &unaligned);
164 __ subu(a3, zero_reg, a0); // In delay slot. 156 __ subu(a3, zero_reg, a0); // In delay slot.
165 157
166 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. 158 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
167 __ beq(a3, zero_reg, &aligned); // Already aligned. 159 __ beq(a3, zero_reg, &aligned); // Already aligned.
168 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count. 160 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
169 161
170 __ lwr(t8, MemOperand(a1)); 162 if (kArchEndian == kLittle) {
171 __ addu(a1, a1, a3); 163 __ lwr(t8, MemOperand(a1));
172 __ swr(t8, MemOperand(a0)); 164 __ addu(a1, a1, a3);
173 __ addu(a0, a0, a3); 165 __ swr(t8, MemOperand(a0));
174 166 __ addu(a0, a0, a3);
167 } else {
168 __ lwl(t8, MemOperand(a1));
169 __ addu(a1, a1, a3);
170 __ swl(t8, MemOperand(a0));
171 __ addu(a0, a0, a3);
172 }
175 // Now dst/src are both aligned to (word) aligned addresses. Set a2 to 173 // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
176 // count how many bytes we have to copy after all the 64 byte chunks are 174 // count how many bytes we have to copy after all the 64 byte chunks are
177 // copied and a3 to the dst pointer after all the 64 byte chunks have been 175 // copied and a3 to the dst pointer after all the 64 byte chunks have been
178 // copied. We will loop, incrementing a0 and a1 until a0 equals a3. 176 // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
179 __ bind(&aligned); 177 __ bind(&aligned);
180 __ andi(t8, a2, 0x3f); 178 __ andi(t8, a2, 0x3f);
181 __ beq(a2, t8, &chkw); // Less than 64? 179 __ beq(a2, t8, &chkw); // Less than 64?
182 __ subu(a3, a2, t8); // In delay slot. 180 __ subu(a3, a2, t8); // In delay slot.
183 __ addu(a3, a0, a3); // Now a3 is the final dst after loop. 181 __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
184 182
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
316 __ nop(); 314 __ nop();
317 315
318 // Unaligned case. Only the dst gets aligned so we need to do partial 316 // Unaligned case. Only the dst gets aligned so we need to do partial
319 // loads of the source followed by normal stores to the dst (once we 317 // loads of the source followed by normal stores to the dst (once we
320 // have aligned the destination). 318 // have aligned the destination).
321 __ bind(&unaligned); 319 __ bind(&unaligned);
322 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. 320 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
323 __ beq(a3, zero_reg, &ua_chk16w); 321 __ beq(a3, zero_reg, &ua_chk16w);
324 __ subu(a2, a2, a3); // In delay slot. 322 __ subu(a2, a2, a3); // In delay slot.
325 323
326 __ lwr(v1, MemOperand(a1)); 324 if (kArchEndian == kLittle) {
327 __ lwl(v1, 325 __ lwr(v1, MemOperand(a1));
328 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 326 __ lwl(v1,
329 __ addu(a1, a1, a3); 327 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
330 __ swr(v1, MemOperand(a0)); 328 __ addu(a1, a1, a3);
331 __ addu(a0, a0, a3); 329 __ swr(v1, MemOperand(a0));
330 __ addu(a0, a0, a3);
331 } else {
332 __ lwl(v1, MemOperand(a1));
333 __ lwr(v1,
334 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
335 __ addu(a1, a1, a3);
336 __ swl(v1, MemOperand(a0));
337 __ addu(a0, a0, a3);
338 }
332 339
333 // Now the dst (but not the source) is aligned. Set a2 to count how many 340 // Now the dst (but not the source) is aligned. Set a2 to count how many
334 // bytes we have to copy after all the 64 byte chunks are copied and a3 to 341 // bytes we have to copy after all the 64 byte chunks are copied and a3 to
335 // the dst pointer after all the 64 byte chunks have been copied. We will 342 // the dst pointer after all the 64 byte chunks have been copied. We will
336 // loop, incrementing a0 and a1 until a0 equals a3. 343 // loop, incrementing a0 and a1 until a0 equals a3.
337 __ bind(&ua_chk16w); 344 __ bind(&ua_chk16w);
338 __ andi(t8, a2, 0x3f); 345 __ andi(t8, a2, 0x3f);
339 __ beq(a2, t8, &ua_chkw); 346 __ beq(a2, t8, &ua_chkw);
340 __ subu(a3, a2, t8); // In delay slot. 347 __ subu(a3, a2, t8); // In delay slot.
341 __ addu(a3, a0, a3); 348 __ addu(a3, a0, a3);
342 349
343 if (pref_hint_store == kPrefHintPrepareForStore) { 350 if (pref_hint_store == kPrefHintPrepareForStore) {
344 __ addu(t0, a0, a2); 351 __ addu(t0, a0, a2);
345 __ Subu(t9, t0, pref_limit); 352 __ Subu(t9, t0, pref_limit);
346 } 353 }
347 354
348 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); 355 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
349 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); 356 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
350 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); 357 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
351 358
352 if (pref_hint_store != kPrefHintPrepareForStore) { 359 if (pref_hint_store != kPrefHintPrepareForStore) {
353 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); 360 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
354 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); 361 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
355 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); 362 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
356 } 363 }
357 364
358 __ bind(&ua_loop16w); 365 __ bind(&ua_loop16w);
359 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); 366 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
360 __ lwr(t0, MemOperand(a1)); 367 if (kArchEndian == kLittle) {
361 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); 368 __ lwr(t0, MemOperand(a1));
362 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); 369 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
370 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
363 371
364 if (pref_hint_store == kPrefHintPrepareForStore) { 372 if (pref_hint_store == kPrefHintPrepareForStore) {
365 __ sltu(v1, t9, a0); 373 __ sltu(v1, t9, a0);
366 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); 374 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
375 }
376 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
377
378 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
379 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
380
381 __ bind(&ua_skip_pref);
382 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
383 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
384 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
385 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
386 __ lwl(t0,
387 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
388 __ lwl(t1,
389 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
390 __ lwl(t2,
391 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
392 __ lwl(t3,
393 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
394 __ lwl(t4,
395 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
396 __ lwl(t5,
397 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
398 __ lwl(t6,
399 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
400 __ lwl(t7,
401 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
402 } else {
403 __ lwl(t0, MemOperand(a1));
404 __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
405 __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
406
407 if (pref_hint_store == kPrefHintPrepareForStore) {
408 __ sltu(v1, t9, a0);
409 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
410 }
411 __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
412
413 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
414 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
415
416 __ bind(&ua_skip_pref);
417 __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
418 __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
419 __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
420 __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
421 __ lwr(t0,
422 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
423 __ lwr(t1,
424 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
425 __ lwr(t2,
426 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
427 __ lwr(t3,
428 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
429 __ lwr(t4,
430 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
431 __ lwr(t5,
432 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
433 __ lwr(t6,
434 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
435 __ lwr(t7,
436 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
367 } 437 }
368 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
369
370 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
371 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
372
373 __ bind(&ua_skip_pref);
374 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
375 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
376 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
377 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
378 __ lwl(t0,
379 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
380 __ lwl(t1,
381 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
382 __ lwl(t2,
383 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
384 __ lwl(t3,
385 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
386 __ lwl(t4,
387 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
388 __ lwl(t5,
389 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
390 __ lwl(t6,
391 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
392 __ lwl(t7,
393 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
394 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); 438 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
395 __ sw(t0, MemOperand(a0)); 439 __ sw(t0, MemOperand(a0));
396 __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); 440 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
397 __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); 441 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
398 __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); 442 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
399 __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); 443 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
400 __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); 444 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
401 __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); 445 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
402 __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); 446 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
403 __ lwr(t0, MemOperand(a1, 8, loadstore_chunk)); 447 if (kArchEndian == kLittle) {
404 __ lwr(t1, MemOperand(a1, 9, loadstore_chunk)); 448 __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
405 __ lwr(t2, MemOperand(a1, 10, loadstore_chunk)); 449 __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
406 __ lwr(t3, MemOperand(a1, 11, loadstore_chunk)); 450 __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
407 __ lwr(t4, MemOperand(a1, 12, loadstore_chunk)); 451 __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
408 __ lwr(t5, MemOperand(a1, 13, loadstore_chunk)); 452 __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
409 __ lwr(t6, MemOperand(a1, 14, loadstore_chunk)); 453 __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
410 __ lwr(t7, MemOperand(a1, 15, loadstore_chunk)); 454 __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
411 __ lwl(t0, 455 __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
412 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); 456 __ lwl(t0,
413 __ lwl(t1, 457 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
414 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); 458 __ lwl(t1,
415 __ lwl(t2, 459 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
416 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); 460 __ lwl(t2,
417 __ lwl(t3, 461 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
418 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); 462 __ lwl(t3,
419 __ lwl(t4, 463 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
420 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); 464 __ lwl(t4,
421 __ lwl(t5, 465 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
422 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); 466 __ lwl(t5,
423 __ lwl(t6, 467 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
424 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); 468 __ lwl(t6,
425 __ lwl(t7, 469 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
426 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); 470 __ lwl(t7,
471 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
472 } else {
473 __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
474 __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
475 __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
476 __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
477 __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
478 __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
479 __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
480 __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
481 __ lwr(t0,
482 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
483 __ lwr(t1,
484 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
485 __ lwr(t2,
486 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
487 __ lwr(t3,
488 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
489 __ lwr(t4,
490 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
491 __ lwr(t5,
492 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
493 __ lwr(t6,
494 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
495 __ lwr(t7,
496 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
497 }
427 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); 498 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
428 __ sw(t0, MemOperand(a0, 8, loadstore_chunk)); 499 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
429 __ sw(t1, MemOperand(a0, 9, loadstore_chunk)); 500 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
430 __ sw(t2, MemOperand(a0, 10, loadstore_chunk)); 501 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
431 __ sw(t3, MemOperand(a0, 11, loadstore_chunk)); 502 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
432 __ sw(t4, MemOperand(a0, 12, loadstore_chunk)); 503 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
433 __ sw(t5, MemOperand(a0, 13, loadstore_chunk)); 504 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
434 __ sw(t6, MemOperand(a0, 14, loadstore_chunk)); 505 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
435 __ sw(t7, MemOperand(a0, 15, loadstore_chunk)); 506 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
436 __ addiu(a0, a0, 16 * loadstore_chunk); 507 __ addiu(a0, a0, 16 * loadstore_chunk);
437 __ bne(a0, a3, &ua_loop16w); 508 __ bne(a0, a3, &ua_loop16w);
438 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. 509 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
439 __ mov(a2, t8); 510 __ mov(a2, t8);
440 511
441 // Here less than 64-bytes. Check for 512 // Here less than 64-bytes. Check for
442 // a 32 byte chunk and copy if there is one. Otherwise jump down to 513 // a 32 byte chunk and copy if there is one. Otherwise jump down to
443 // ua_chk1w to handle the tail end of the copy. 514 // ua_chk1w to handle the tail end of the copy.
444 __ bind(&ua_chkw); 515 __ bind(&ua_chkw);
445 __ Pref(pref_hint_load, MemOperand(a1)); 516 __ Pref(pref_hint_load, MemOperand(a1));
446 __ andi(t8, a2, 0x1f); 517 __ andi(t8, a2, 0x1f);
447 518
448 __ beq(a2, t8, &ua_chk1w); 519 __ beq(a2, t8, &ua_chk1w);
449 __ nop(); // In delay slot. 520 __ nop(); // In delay slot.
450 __ lwr(t0, MemOperand(a1)); 521 if (kArchEndian == kLittle) {
451 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); 522 __ lwr(t0, MemOperand(a1));
452 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); 523 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
453 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); 524 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
454 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); 525 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
455 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); 526 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
456 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); 527 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
457 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); 528 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
458 __ lwl(t0, 529 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
459 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 530 __ lwl(t0,
460 __ lwl(t1, 531 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
461 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); 532 __ lwl(t1,
462 __ lwl(t2, 533 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
463 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); 534 __ lwl(t2,
464 __ lwl(t3, 535 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
465 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); 536 __ lwl(t3,
466 __ lwl(t4, 537 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
467 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); 538 __ lwl(t4,
468 __ lwl(t5, 539 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
469 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); 540 __ lwl(t5,
470 __ lwl(t6, 541 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
471 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); 542 __ lwl(t6,
472 __ lwl(t7, 543 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
473 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); 544 __ lwl(t7,
545 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
546 } else {
547 __ lwl(t0, MemOperand(a1));
548 __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
549 __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
550 __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
551 __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
552 __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
553 __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
554 __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
555 __ lwr(t0,
556 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
557 __ lwr(t1,
558 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
559 __ lwr(t2,
560 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
561 __ lwr(t3,
562 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
563 __ lwr(t4,
564 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
565 __ lwr(t5,
566 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
567 __ lwr(t6,
568 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
569 __ lwr(t7,
570 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
571 }
474 __ addiu(a1, a1, 8 * loadstore_chunk); 572 __ addiu(a1, a1, 8 * loadstore_chunk);
475 __ sw(t0, MemOperand(a0)); 573 __ sw(t0, MemOperand(a0));
476 __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); 574 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
477 __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); 575 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
478 __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); 576 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
479 __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); 577 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
480 __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); 578 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
481 __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); 579 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
482 __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); 580 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
483 __ addiu(a0, a0, 8 * loadstore_chunk); 581 __ addiu(a0, a0, 8 * loadstore_chunk);
484 582
485 // Less than 32 bytes to copy. Set up for a loop to 583 // Less than 32 bytes to copy. Set up for a loop to
486 // copy one word at a time. 584 // copy one word at a time.
487 __ bind(&ua_chk1w); 585 __ bind(&ua_chk1w);
488 __ andi(a2, t8, loadstore_chunk - 1); 586 __ andi(a2, t8, loadstore_chunk - 1);
489 __ beq(a2, t8, &ua_smallCopy); 587 __ beq(a2, t8, &ua_smallCopy);
490 __ subu(a3, t8, a2); // In delay slot. 588 __ subu(a3, t8, a2); // In delay slot.
491 __ addu(a3, a0, a3); 589 __ addu(a3, a0, a3);
492 590
493 __ bind(&ua_wordCopy_loop); 591 __ bind(&ua_wordCopy_loop);
494 __ lwr(v1, MemOperand(a1)); 592 if (kArchEndian == kLittle) {
495 __ lwl(v1, 593 __ lwr(v1, MemOperand(a1));
496 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 594 __ lwl(v1,
595 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
596 } else {
597 __ lwl(v1, MemOperand(a1));
598 __ lwr(v1,
599 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
600 }
497 __ addiu(a0, a0, loadstore_chunk); 601 __ addiu(a0, a0, loadstore_chunk);
498 __ addiu(a1, a1, loadstore_chunk); 602 __ addiu(a1, a1, loadstore_chunk);
499 __ bne(a0, a3, &ua_wordCopy_loop); 603 __ bne(a0, a3, &ua_wordCopy_loop);
500 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. 604 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
501 605
502 // Copy the last 8 bytes. 606 // Copy the last 8 bytes.
503 __ bind(&ua_smallCopy); 607 __ bind(&ua_smallCopy);
504 __ beq(a2, zero_reg, &leave); 608 __ beq(a2, zero_reg, &leave);
505 __ addu(a3, a0, a2); // In delay slot. 609 __ addu(a3, a0, a2); // In delay slot.
506 610
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
715 819
716 // Hole found, store the-hole NaN. 820 // Hole found, store the-hole NaN.
717 __ bind(&convert_hole); 821 __ bind(&convert_hole);
718 if (FLAG_debug_code) { 822 if (FLAG_debug_code) {
719 // Restore a "smi-untagged" heap object. 823 // Restore a "smi-untagged" heap object.
720 __ SmiTag(t5); 824 __ SmiTag(t5);
721 __ Or(t5, t5, Operand(1)); 825 __ Or(t5, t5, Operand(1));
722 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 826 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
723 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5)); 827 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
724 } 828 }
725 __ sw(t0, MemOperand(t3)); // mantissa 829 __ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa
726 __ sw(t1, MemOperand(t3, kIntSize)); // exponent 830 __ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent
727 __ Addu(t3, t3, kDoubleSize); 831 __ Addu(t3, t3, kDoubleSize);
728 832
729 __ bind(&entry); 833 __ bind(&entry);
730 __ Branch(&loop, lt, t3, Operand(t2)); 834 __ Branch(&loop, lt, t3, Operand(t2));
731 835
732 __ pop(ra); 836 __ pop(ra);
733 __ bind(&done); 837 __ bind(&done);
734 } 838 }
735 839
736 840
(...skipping 29 matching lines...) Expand all
766 __ sll(a0, t1, 1); 870 __ sll(a0, t1, 1);
767 __ Addu(a0, a0, FixedDoubleArray::kHeaderSize); 871 __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
768 __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS); 872 __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
769 // t2: destination FixedArray, not tagged as heap object 873 // t2: destination FixedArray, not tagged as heap object
770 // Set destination FixedDoubleArray's length and map. 874 // Set destination FixedDoubleArray's length and map.
771 __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex); 875 __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
772 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); 876 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
773 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); 877 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
774 878
775 // Prepare for conversion loop. 879 // Prepare for conversion loop.
776 __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); 880 __ Addu(t0, t0, Operand(
881 FixedDoubleArray::kHeaderSize - kHeapObjectTag
882 + Register::kExponentOffset));
777 __ Addu(a3, t2, Operand(FixedArray::kHeaderSize)); 883 __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
778 __ Addu(t2, t2, Operand(kHeapObjectTag)); 884 __ Addu(t2, t2, Operand(kHeapObjectTag));
779 __ sll(t1, t1, 1); 885 __ sll(t1, t1, 1);
780 __ Addu(t1, a3, t1); 886 __ Addu(t1, a3, t1);
781 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); 887 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
782 __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex); 888 __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
783 // Using offsetted addresses. 889 // Using offsetted addresses.
784 // a3: begin of destination FixedArray element fields, not tagged 890 // a3: begin of destination FixedArray element fields, not tagged
785 // t0: begin of source FixedDoubleArray element fields, not tagged, +4 891 // t0: begin of source FixedDoubleArray element fields, not tagged,
892 // points to the exponent
786 // t1: end of destination FixedArray, not tagged 893 // t1: end of destination FixedArray, not tagged
787 // t2: destination FixedArray 894 // t2: destination FixedArray
788 // t3: the-hole pointer 895 // t3: the-hole pointer
789 // t5: heap number map 896 // t5: heap number map
790 __ Branch(&entry); 897 __ Branch(&entry);
791 898
792 // Call into runtime if GC is required. 899 // Call into runtime if GC is required.
793 __ bind(&gc_required); 900 __ bind(&gc_required);
794 __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); 901 __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
795 902
796 __ Branch(fail); 903 __ Branch(fail);
797 904
798 __ bind(&loop); 905 __ bind(&loop);
799 __ lw(a1, MemOperand(t0)); 906 __ lw(a1, MemOperand(t0));
800 __ Addu(t0, t0, kDoubleSize); 907 __ Addu(t0, t0, kDoubleSize);
801 // a1: current element's upper 32 bit 908 // a1: current element's upper 32 bit
802 // t0: address of next element's upper 32 bit 909 // t0: address of next element's upper 32 bit
803 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); 910 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
804 911
805 // Non-hole double, copy value into a heap number. 912 // Non-hole double, copy value into a heap number.
806 __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required); 913 __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
807 // a2: new heap number 914 // a2: new heap number
808 __ lw(a0, MemOperand(t0, -12)); 915 // Load mantissa of current element, t0 point to exponent of next element.
916 __ lw(a0, MemOperand(t0, (Register::kMantissaOffset
917 - Register::kExponentOffset - kDoubleSize)));
809 __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset)); 918 __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
810 __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset)); 919 __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
811 __ mov(a0, a3); 920 __ mov(a0, a3);
812 __ sw(a2, MemOperand(a3)); 921 __ sw(a2, MemOperand(a3));
813 __ Addu(a3, a3, kIntSize); 922 __ Addu(a3, a3, kIntSize);
814 __ RecordWrite(t2, 923 __ RecordWrite(t2,
815 a0, 924 a0,
816 a2, 925 a2,
817 kRAHasBeenSaved, 926 kRAHasBeenSaved,
818 kDontSaveFPRegs, 927 kDontSaveFPRegs,
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
1003 __ Move(double_scratch2, 1); 1112 __ Move(double_scratch2, 1);
1004 __ add_d(result, result, double_scratch2); 1113 __ add_d(result, result, double_scratch2);
1005 __ srl(temp1, temp2, 11); 1114 __ srl(temp1, temp2, 11);
1006 __ Ext(temp2, temp2, 0, 11); 1115 __ Ext(temp2, temp2, 0, 11);
1007 __ Addu(temp1, temp1, Operand(0x3ff)); 1116 __ Addu(temp1, temp1, Operand(0x3ff));
1008 1117
1009 // Must not call ExpConstant() after overwriting temp3! 1118 // Must not call ExpConstant() after overwriting temp3!
1010 __ li(temp3, Operand(ExternalReference::math_exp_log_table())); 1119 __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1011 __ sll(at, temp2, 3); 1120 __ sll(at, temp2, 3);
1012 __ Addu(temp3, temp3, Operand(at)); 1121 __ Addu(temp3, temp3, Operand(at));
1013 __ lw(temp2, MemOperand(temp3, 0)); 1122 __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
1014 __ lw(temp3, MemOperand(temp3, kPointerSize)); 1123 __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
1015 // The first word is loaded is the lower number register. 1124 // The first word is loaded is the lower number register.
1016 if (temp2.code() < temp3.code()) { 1125 if (temp2.code() < temp3.code()) {
1017 __ sll(at, temp1, 20); 1126 __ sll(at, temp1, 20);
1018 __ Or(temp1, temp3, at); 1127 __ Or(temp1, temp3, at);
1019 __ Move(double_scratch1, temp2, temp1); 1128 __ Move(double_scratch1, temp2, temp1);
1020 } else { 1129 } else {
1021 __ sll(at, temp1, 20); 1130 __ sll(at, temp1, 20);
1022 __ Or(temp1, temp2, at); 1131 __ Or(temp1, temp2, at);
1023 __ Move(double_scratch1, temp3, temp1); 1132 __ Move(double_scratch1, temp3, temp1);
1024 } 1133 }
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
1113 patcher.masm()->nop(); // Pad the empty space. 1222 patcher.masm()->nop(); // Pad the empty space.
1114 } 1223 }
1115 } 1224 }
1116 1225
1117 1226
1118 #undef __ 1227 #undef __
1119 1228
1120 } } // namespace v8::internal 1229 } } // namespace v8::internal
1121 1230
1122 #endif // V8_TARGET_ARCH_MIPS 1231 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/code-stubs-mips.cc ('k') | src/mips/constants-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698