Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(102)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 1133163005: MIPS64: Enable shorten-64-to-32 warning. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix type related to mach_timespec. Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips64/lithium-codegen-mips64.cc ('k') | src/mips64/simulator-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #if V8_TARGET_ARCH_MIPS64 9 #if V8_TARGET_ARCH_MIPS64
10 10
(...skipping 604 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 615
616 616
617 // --------------------------------------------------------------------------- 617 // ---------------------------------------------------------------------------
618 // Instruction macros. 618 // Instruction macros.
619 619
620 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { 620 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
621 if (rt.is_reg()) { 621 if (rt.is_reg()) {
622 addu(rd, rs, rt.rm()); 622 addu(rd, rs, rt.rm());
623 } else { 623 } else {
624 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 624 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
625 addiu(rd, rs, rt.imm64_); 625 addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
626 } else { 626 } else {
627 // li handles the relocation. 627 // li handles the relocation.
628 DCHECK(!rs.is(at)); 628 DCHECK(!rs.is(at));
629 li(at, rt); 629 li(at, rt);
630 addu(rd, rs, at); 630 addu(rd, rs, at);
631 } 631 }
632 } 632 }
633 } 633 }
634 634
635 635
636 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) { 636 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
637 if (rt.is_reg()) { 637 if (rt.is_reg()) {
638 daddu(rd, rs, rt.rm()); 638 daddu(rd, rs, rt.rm());
639 } else { 639 } else {
640 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 640 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
641 daddiu(rd, rs, rt.imm64_); 641 daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
642 } else { 642 } else {
643 // li handles the relocation. 643 // li handles the relocation.
644 DCHECK(!rs.is(at)); 644 DCHECK(!rs.is(at));
645 li(at, rt); 645 li(at, rt);
646 daddu(rd, rs, at); 646 daddu(rd, rs, at);
647 } 647 }
648 } 648 }
649 } 649 }
650 650
651 651
652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { 652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
653 if (rt.is_reg()) { 653 if (rt.is_reg()) {
654 subu(rd, rs, rt.rm()); 654 subu(rd, rs, rt.rm());
655 } else { 655 } else {
656 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 656 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
657 addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm). 657 addiu(rd, rs, static_cast<int32_t>(
658 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
658 } else { 659 } else {
659 // li handles the relocation. 660 // li handles the relocation.
660 DCHECK(!rs.is(at)); 661 DCHECK(!rs.is(at));
661 li(at, rt); 662 li(at, rt);
662 subu(rd, rs, at); 663 subu(rd, rs, at);
663 } 664 }
664 } 665 }
665 } 666 }
666 667
667 668
668 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { 669 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
669 if (rt.is_reg()) { 670 if (rt.is_reg()) {
670 dsubu(rd, rs, rt.rm()); 671 dsubu(rd, rs, rt.rm());
671 } else { 672 } else {
672 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 673 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
673 daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm). 674 daddiu(rd, rs,
675 static_cast<int32_t>(
676 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
674 } else { 677 } else {
675 // li handles the relocation. 678 // li handles the relocation.
676 DCHECK(!rs.is(at)); 679 DCHECK(!rs.is(at));
677 li(at, rt); 680 li(at, rt);
678 dsubu(rd, rs, at); 681 dsubu(rd, rs, at);
679 } 682 }
680 } 683 }
681 } 684 }
682 685
683 686
(...skipping 376 matching lines...) Expand 10 before | Expand all | Expand 10 after
1060 } 1063 }
1061 } 1064 }
1062 } 1065 }
1063 1066
1064 1067
1065 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { 1068 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1066 if (rt.is_reg()) { 1069 if (rt.is_reg()) {
1067 and_(rd, rs, rt.rm()); 1070 and_(rd, rs, rt.rm());
1068 } else { 1071 } else {
1069 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 1072 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1070 andi(rd, rs, rt.imm64_); 1073 andi(rd, rs, static_cast<int32_t>(rt.imm64_));
1071 } else { 1074 } else {
1072 // li handles the relocation. 1075 // li handles the relocation.
1073 DCHECK(!rs.is(at)); 1076 DCHECK(!rs.is(at));
1074 li(at, rt); 1077 li(at, rt);
1075 and_(rd, rs, at); 1078 and_(rd, rs, at);
1076 } 1079 }
1077 } 1080 }
1078 } 1081 }
1079 1082
1080 1083
1081 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { 1084 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1082 if (rt.is_reg()) { 1085 if (rt.is_reg()) {
1083 or_(rd, rs, rt.rm()); 1086 or_(rd, rs, rt.rm());
1084 } else { 1087 } else {
1085 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 1088 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1086 ori(rd, rs, rt.imm64_); 1089 ori(rd, rs, static_cast<int32_t>(rt.imm64_));
1087 } else { 1090 } else {
1088 // li handles the relocation. 1091 // li handles the relocation.
1089 DCHECK(!rs.is(at)); 1092 DCHECK(!rs.is(at));
1090 li(at, rt); 1093 li(at, rt);
1091 or_(rd, rs, at); 1094 or_(rd, rs, at);
1092 } 1095 }
1093 } 1096 }
1094 } 1097 }
1095 1098
1096 1099
1097 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { 1100 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1098 if (rt.is_reg()) { 1101 if (rt.is_reg()) {
1099 xor_(rd, rs, rt.rm()); 1102 xor_(rd, rs, rt.rm());
1100 } else { 1103 } else {
1101 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 1104 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1102 xori(rd, rs, rt.imm64_); 1105 xori(rd, rs, static_cast<int32_t>(rt.imm64_));
1103 } else { 1106 } else {
1104 // li handles the relocation. 1107 // li handles the relocation.
1105 DCHECK(!rs.is(at)); 1108 DCHECK(!rs.is(at));
1106 li(at, rt); 1109 li(at, rt);
1107 xor_(rd, rs, at); 1110 xor_(rd, rs, at);
1108 } 1111 }
1109 } 1112 }
1110 } 1113 }
1111 1114
1112 1115
(...skipping 16 matching lines...) Expand all
1129 li(at, -1); 1132 li(at, -1);
1130 xor_(rs, rt.rm(), at); 1133 xor_(rs, rt.rm(), at);
1131 } 1134 }
1132 1135
1133 1136
1134 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { 1137 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1135 if (rt.is_reg()) { 1138 if (rt.is_reg()) {
1136 slt(rd, rs, rt.rm()); 1139 slt(rd, rs, rt.rm());
1137 } else { 1140 } else {
1138 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 1141 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1139 slti(rd, rs, rt.imm64_); 1142 slti(rd, rs, static_cast<int32_t>(rt.imm64_));
1140 } else { 1143 } else {
1141 // li handles the relocation. 1144 // li handles the relocation.
1142 DCHECK(!rs.is(at)); 1145 DCHECK(!rs.is(at));
1143 li(at, rt); 1146 li(at, rt);
1144 slt(rd, rs, at); 1147 slt(rd, rs, at);
1145 } 1148 }
1146 } 1149 }
1147 } 1150 }
1148 1151
1149 1152
1150 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { 1153 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1151 if (rt.is_reg()) { 1154 if (rt.is_reg()) {
1152 sltu(rd, rs, rt.rm()); 1155 sltu(rd, rs, rt.rm());
1153 } else { 1156 } else {
1154 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 1157 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1155 sltiu(rd, rs, rt.imm64_); 1158 sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
1156 } else { 1159 } else {
1157 // li handles the relocation. 1160 // li handles the relocation.
1158 DCHECK(!rs.is(at)); 1161 DCHECK(!rs.is(at));
1159 li(at, rt); 1162 li(at, rt);
1160 sltu(rd, rs, at); 1163 sltu(rd, rs, at);
1161 } 1164 }
1162 } 1165 }
1163 } 1166 }
1164 1167
1165 1168
(...skipping 1158 matching lines...) Expand 10 before | Expand all | Expand 10 after
2324 r2 = scratch; 2327 r2 = scratch;
2325 li(r2, rt); 2328 li(r2, rt);
2326 slt(scratch, r2, rs); 2329 slt(scratch, r2, rs);
2327 bne(scratch, zero_reg, offset); 2330 bne(scratch, zero_reg, offset);
2328 } 2331 }
2329 break; 2332 break;
2330 case greater_equal: 2333 case greater_equal:
2331 if (rt.imm64_ == 0) { 2334 if (rt.imm64_ == 0) {
2332 bgez(rs, offset); 2335 bgez(rs, offset);
2333 } else if (is_int16(rt.imm64_)) { 2336 } else if (is_int16(rt.imm64_)) {
2334 slti(scratch, rs, rt.imm64_); 2337 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2335 beq(scratch, zero_reg, offset); 2338 beq(scratch, zero_reg, offset);
2336 } else { 2339 } else {
2337 r2 = scratch; 2340 r2 = scratch;
2338 li(r2, rt); 2341 li(r2, rt);
2339 slt(scratch, rs, r2); 2342 slt(scratch, rs, r2);
2340 beq(scratch, zero_reg, offset); 2343 beq(scratch, zero_reg, offset);
2341 } 2344 }
2342 break; 2345 break;
2343 case less: 2346 case less:
2344 if (rt.imm64_ == 0) { 2347 if (rt.imm64_ == 0) {
2345 bltz(rs, offset); 2348 bltz(rs, offset);
2346 } else if (is_int16(rt.imm64_)) { 2349 } else if (is_int16(rt.imm64_)) {
2347 slti(scratch, rs, rt.imm64_); 2350 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2348 bne(scratch, zero_reg, offset); 2351 bne(scratch, zero_reg, offset);
2349 } else { 2352 } else {
2350 r2 = scratch; 2353 r2 = scratch;
2351 li(r2, rt); 2354 li(r2, rt);
2352 slt(scratch, rs, r2); 2355 slt(scratch, rs, r2);
2353 bne(scratch, zero_reg, offset); 2356 bne(scratch, zero_reg, offset);
2354 } 2357 }
2355 break; 2358 break;
2356 case less_equal: 2359 case less_equal:
2357 if (rt.imm64_ == 0) { 2360 if (rt.imm64_ == 0) {
(...skipping 13 matching lines...) Expand all
2371 r2 = scratch; 2374 r2 = scratch;
2372 li(r2, rt); 2375 li(r2, rt);
2373 sltu(scratch, r2, rs); 2376 sltu(scratch, r2, rs);
2374 bne(scratch, zero_reg, offset); 2377 bne(scratch, zero_reg, offset);
2375 } 2378 }
2376 break; 2379 break;
2377 case Ugreater_equal: 2380 case Ugreater_equal:
2378 if (rt.imm64_ == 0) { 2381 if (rt.imm64_ == 0) {
2379 b(offset); 2382 b(offset);
2380 } else if (is_int16(rt.imm64_)) { 2383 } else if (is_int16(rt.imm64_)) {
2381 sltiu(scratch, rs, rt.imm64_); 2384 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2382 beq(scratch, zero_reg, offset); 2385 beq(scratch, zero_reg, offset);
2383 } else { 2386 } else {
2384 r2 = scratch; 2387 r2 = scratch;
2385 li(r2, rt); 2388 li(r2, rt);
2386 sltu(scratch, rs, r2); 2389 sltu(scratch, rs, r2);
2387 beq(scratch, zero_reg, offset); 2390 beq(scratch, zero_reg, offset);
2388 } 2391 }
2389 break; 2392 break;
2390 case Uless: 2393 case Uless:
2391 if (rt.imm64_ == 0) { 2394 if (rt.imm64_ == 0) {
2392 // No code needs to be emitted. 2395 // No code needs to be emitted.
2393 return; 2396 return;
2394 } else if (is_int16(rt.imm64_)) { 2397 } else if (is_int16(rt.imm64_)) {
2395 sltiu(scratch, rs, rt.imm64_); 2398 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2396 bne(scratch, zero_reg, offset); 2399 bne(scratch, zero_reg, offset);
2397 } else { 2400 } else {
2398 r2 = scratch; 2401 r2 = scratch;
2399 li(r2, rt); 2402 li(r2, rt);
2400 sltu(scratch, rs, r2); 2403 sltu(scratch, rs, r2);
2401 bne(scratch, zero_reg, offset); 2404 bne(scratch, zero_reg, offset);
2402 } 2405 }
2403 break; 2406 break;
2404 case Uless_equal: 2407 case Uless_equal:
2405 if (rt.imm64_ == 0) { 2408 if (rt.imm64_ == 0) {
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after
2591 slt(scratch, r2, rs); 2594 slt(scratch, r2, rs);
2592 offset = shifted_branch_offset(L, false); 2595 offset = shifted_branch_offset(L, false);
2593 bne(scratch, zero_reg, offset); 2596 bne(scratch, zero_reg, offset);
2594 } 2597 }
2595 break; 2598 break;
2596 case greater_equal: 2599 case greater_equal:
2597 if (rt.imm64_ == 0) { 2600 if (rt.imm64_ == 0) {
2598 offset = shifted_branch_offset(L, false); 2601 offset = shifted_branch_offset(L, false);
2599 bgez(rs, offset); 2602 bgez(rs, offset);
2600 } else if (is_int16(rt.imm64_)) { 2603 } else if (is_int16(rt.imm64_)) {
2601 slti(scratch, rs, rt.imm64_); 2604 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2602 offset = shifted_branch_offset(L, false); 2605 offset = shifted_branch_offset(L, false);
2603 beq(scratch, zero_reg, offset); 2606 beq(scratch, zero_reg, offset);
2604 } else { 2607 } else {
2605 DCHECK(!scratch.is(rs)); 2608 DCHECK(!scratch.is(rs));
2606 r2 = scratch; 2609 r2 = scratch;
2607 li(r2, rt); 2610 li(r2, rt);
2608 slt(scratch, rs, r2); 2611 slt(scratch, rs, r2);
2609 offset = shifted_branch_offset(L, false); 2612 offset = shifted_branch_offset(L, false);
2610 beq(scratch, zero_reg, offset); 2613 beq(scratch, zero_reg, offset);
2611 } 2614 }
2612 break; 2615 break;
2613 case less: 2616 case less:
2614 if (rt.imm64_ == 0) { 2617 if (rt.imm64_ == 0) {
2615 offset = shifted_branch_offset(L, false); 2618 offset = shifted_branch_offset(L, false);
2616 bltz(rs, offset); 2619 bltz(rs, offset);
2617 } else if (is_int16(rt.imm64_)) { 2620 } else if (is_int16(rt.imm64_)) {
2618 slti(scratch, rs, rt.imm64_); 2621 slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
2619 offset = shifted_branch_offset(L, false); 2622 offset = shifted_branch_offset(L, false);
2620 bne(scratch, zero_reg, offset); 2623 bne(scratch, zero_reg, offset);
2621 } else { 2624 } else {
2622 DCHECK(!scratch.is(rs)); 2625 DCHECK(!scratch.is(rs));
2623 r2 = scratch; 2626 r2 = scratch;
2624 li(r2, rt); 2627 li(r2, rt);
2625 slt(scratch, rs, r2); 2628 slt(scratch, rs, r2);
2626 offset = shifted_branch_offset(L, false); 2629 offset = shifted_branch_offset(L, false);
2627 bne(scratch, zero_reg, offset); 2630 bne(scratch, zero_reg, offset);
2628 } 2631 }
(...skipping 23 matching lines...) Expand all
2652 sltu(scratch, r2, rs); 2655 sltu(scratch, r2, rs);
2653 offset = shifted_branch_offset(L, false); 2656 offset = shifted_branch_offset(L, false);
2654 bne(scratch, zero_reg, offset); 2657 bne(scratch, zero_reg, offset);
2655 } 2658 }
2656 break; 2659 break;
2657 case Ugreater_equal: 2660 case Ugreater_equal:
2658 if (rt.imm64_ == 0) { 2661 if (rt.imm64_ == 0) {
2659 offset = shifted_branch_offset(L, false); 2662 offset = shifted_branch_offset(L, false);
2660 b(offset); 2663 b(offset);
2661 } else if (is_int16(rt.imm64_)) { 2664 } else if (is_int16(rt.imm64_)) {
2662 sltiu(scratch, rs, rt.imm64_); 2665 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2663 offset = shifted_branch_offset(L, false); 2666 offset = shifted_branch_offset(L, false);
2664 beq(scratch, zero_reg, offset); 2667 beq(scratch, zero_reg, offset);
2665 } else { 2668 } else {
2666 DCHECK(!scratch.is(rs)); 2669 DCHECK(!scratch.is(rs));
2667 r2 = scratch; 2670 r2 = scratch;
2668 li(r2, rt); 2671 li(r2, rt);
2669 sltu(scratch, rs, r2); 2672 sltu(scratch, rs, r2);
2670 offset = shifted_branch_offset(L, false); 2673 offset = shifted_branch_offset(L, false);
2671 beq(scratch, zero_reg, offset); 2674 beq(scratch, zero_reg, offset);
2672 } 2675 }
2673 break; 2676 break;
2674 case Uless: 2677 case Uless:
2675 if (rt.imm64_ == 0) { 2678 if (rt.imm64_ == 0) {
2676 // No code needs to be emitted. 2679 // No code needs to be emitted.
2677 return; 2680 return;
2678 } else if (is_int16(rt.imm64_)) { 2681 } else if (is_int16(rt.imm64_)) {
2679 sltiu(scratch, rs, rt.imm64_); 2682 sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
2680 offset = shifted_branch_offset(L, false); 2683 offset = shifted_branch_offset(L, false);
2681 bne(scratch, zero_reg, offset); 2684 bne(scratch, zero_reg, offset);
2682 } else { 2685 } else {
2683 DCHECK(!scratch.is(rs)); 2686 DCHECK(!scratch.is(rs));
2684 r2 = scratch; 2687 r2 = scratch;
2685 li(r2, rt); 2688 li(r2, rt);
2686 sltu(scratch, rs, r2); 2689 sltu(scratch, rs, r2);
2687 offset = shifted_branch_offset(L, false); 2690 offset = shifted_branch_offset(L, false);
2688 bne(scratch, zero_reg, offset); 2691 bne(scratch, zero_reg, offset);
2689 } 2692 }
(...skipping 759 matching lines...) Expand 10 before | Expand all | Expand 10 after
3449 ld(t9, MemOperand(topaddr, kPointerSize)); 3452 ld(t9, MemOperand(topaddr, kPointerSize));
3450 } else { 3453 } else {
3451 if (emit_debug_code()) { 3454 if (emit_debug_code()) {
3452 // Assert that result actually contains top on entry. t9 is used 3455 // Assert that result actually contains top on entry. t9 is used
3453 // immediately below so this use of t9 does not cause difference with 3456 // immediately below so this use of t9 does not cause difference with
3454 // respect to register content between debug and release mode. 3457 // respect to register content between debug and release mode.
3455 ld(t9, MemOperand(topaddr)); 3458 ld(t9, MemOperand(topaddr));
3456 Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); 3459 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3457 } 3460 }
3458 // Load allocation limit into t9. Result already contains allocation top. 3461 // Load allocation limit into t9. Result already contains allocation top.
3459 ld(t9, MemOperand(topaddr, limit - top)); 3462 ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
3460 } 3463 }
3461 3464
3462 DCHECK(kPointerSize == kDoubleSize); 3465 DCHECK(kPointerSize == kDoubleSize);
3463 if (emit_debug_code()) { 3466 if (emit_debug_code()) {
3464 And(at, result, Operand(kDoubleAlignmentMask)); 3467 And(at, result, Operand(kDoubleAlignmentMask));
3465 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); 3468 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3466 } 3469 }
3467 3470
3468 // Calculate new top and bail out if new space is exhausted. Use result 3471 // Calculate new top and bail out if new space is exhausted. Use result
3469 // to calculate the new top. 3472 // to calculate the new top.
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
3525 ld(t9, MemOperand(topaddr, kPointerSize)); 3528 ld(t9, MemOperand(topaddr, kPointerSize));
3526 } else { 3529 } else {
3527 if (emit_debug_code()) { 3530 if (emit_debug_code()) {
3528 // Assert that result actually contains top on entry. t9 is used 3531 // Assert that result actually contains top on entry. t9 is used
3529 // immediately below so this use of t9 does not cause difference with 3532 // immediately below so this use of t9 does not cause difference with
3530 // respect to register content between debug and release mode. 3533 // respect to register content between debug and release mode.
3531 ld(t9, MemOperand(topaddr)); 3534 ld(t9, MemOperand(topaddr));
3532 Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); 3535 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3533 } 3536 }
3534 // Load allocation limit into t9. Result already contains allocation top. 3537 // Load allocation limit into t9. Result already contains allocation top.
3535 ld(t9, MemOperand(topaddr, limit - top)); 3538 ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
3536 } 3539 }
3537 3540
3538 DCHECK(kPointerSize == kDoubleSize); 3541 DCHECK(kPointerSize == kDoubleSize);
3539 if (emit_debug_code()) { 3542 if (emit_debug_code()) {
3540 And(at, result, Operand(kDoubleAlignmentMask)); 3543 And(at, result, Operand(kDoubleAlignmentMask));
3541 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); 3544 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3542 } 3545 }
3543 3546
3544 // Calculate new top and bail out if new space is exhausted. Use result 3547 // Calculate new top and bail out if new space is exhausted. Use result
3545 // to calculate the new top. Object size may be in words so a shift is 3548 // to calculate the new top. Object size may be in words so a shift is
(...skipping 907 matching lines...) Expand 10 before | Expand all | Expand 10 after
4453 4456
4454 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left, 4457 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4455 const Operand& right, 4458 const Operand& right,
4456 Register overflow_dst, 4459 Register overflow_dst,
4457 Register scratch) { 4460 Register scratch) {
4458 if (right.is_reg()) { 4461 if (right.is_reg()) {
4459 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch); 4462 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4460 } else { 4463 } else {
4461 if (dst.is(left)) { 4464 if (dst.is(left)) {
4462 mov(scratch, left); // Preserve left. 4465 mov(scratch, left); // Preserve left.
4463 daddiu(dst, left, right.immediate()); // Left is overwritten. 4466 daddiu(dst, left,
4467 static_cast<int32_t>(right.immediate())); // Left is overwritten.
4464 xor_(scratch, dst, scratch); // Original left. 4468 xor_(scratch, dst, scratch); // Original left.
4465 // Load right since xori takes uint16 as immediate. 4469 // Load right since xori takes uint16 as immediate.
4466 daddiu(t9, zero_reg, right.immediate()); 4470 daddiu(t9, zero_reg, static_cast<int32_t>(right.immediate()));
4467 xor_(overflow_dst, dst, t9); 4471 xor_(overflow_dst, dst, t9);
4468 and_(overflow_dst, overflow_dst, scratch); 4472 and_(overflow_dst, overflow_dst, scratch);
4469 } else { 4473 } else {
4470 daddiu(dst, left, right.immediate()); 4474 daddiu(dst, left, static_cast<int32_t>(right.immediate()));
4471 xor_(overflow_dst, dst, left); 4475 xor_(overflow_dst, dst, left);
4472 // Load right since xori takes uint16 as immediate. 4476 // Load right since xori takes uint16 as immediate.
4473 daddiu(t9, zero_reg, right.immediate()); 4477 daddiu(t9, zero_reg, static_cast<int32_t>(right.immediate()));
4474 xor_(scratch, dst, t9); 4478 xor_(scratch, dst, t9);
4475 and_(overflow_dst, scratch, overflow_dst); 4479 and_(overflow_dst, scratch, overflow_dst);
4476 } 4480 }
4477 } 4481 }
4478 } 4482 }
4479 4483
4480 4484
4481 void MacroAssembler::AdduAndCheckForOverflow(Register dst, 4485 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4482 Register left, 4486 Register left,
4483 Register right, 4487 Register right,
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
4522 4526
4523 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, 4527 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4524 const Operand& right, 4528 const Operand& right,
4525 Register overflow_dst, 4529 Register overflow_dst,
4526 Register scratch) { 4530 Register scratch) {
4527 if (right.is_reg()) { 4531 if (right.is_reg()) {
4528 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch); 4532 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4529 } else { 4533 } else {
4530 if (dst.is(left)) { 4534 if (dst.is(left)) {
4531 mov(scratch, left); // Preserve left. 4535 mov(scratch, left); // Preserve left.
4532 daddiu(dst, left, -(right.immediate())); // Left is overwritten. 4536 daddiu(dst, left,
4537 static_cast<int32_t>(-right.immediate())); // Left is overwritten.
4533 xor_(overflow_dst, dst, scratch); // scratch is original left. 4538 xor_(overflow_dst, dst, scratch); // scratch is original left.
4534 // Load right since xori takes uint16 as immediate. 4539 // Load right since xori takes uint16 as immediate.
4535 daddiu(t9, zero_reg, right.immediate()); 4540 daddiu(t9, zero_reg, static_cast<int32_t>(right.immediate()));
4536 xor_(scratch, scratch, t9); // scratch is original left. 4541 xor_(scratch, scratch, t9); // scratch is original left.
4537 and_(overflow_dst, scratch, overflow_dst); 4542 and_(overflow_dst, scratch, overflow_dst);
4538 } else { 4543 } else {
4539 daddiu(dst, left, -(right.immediate())); 4544 daddiu(dst, left, static_cast<int32_t>(-right.immediate()));
4540 xor_(overflow_dst, dst, left); 4545 xor_(overflow_dst, dst, left);
4541 // Load right since xori takes uint16 as immediate. 4546 // Load right since xori takes uint16 as immediate.
4542 daddiu(t9, zero_reg, right.immediate()); 4547 daddiu(t9, zero_reg, static_cast<int32_t>(right.immediate()));
4543 xor_(scratch, left, t9); 4548 xor_(scratch, left, t9);
4544 and_(overflow_dst, scratch, overflow_dst); 4549 and_(overflow_dst, scratch, overflow_dst);
4545 } 4550 }
4546 } 4551 }
4547 } 4552 }
4548 4553
4549 4554
4550 void MacroAssembler::SubuAndCheckForOverflow(Register dst, 4555 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4551 Register left, 4556 Register left,
4552 Register right, 4557 Register right,
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after
4833 Label* no_map_match) { 4838 Label* no_map_match) {
4834 // Load the global or builtins object from the current context. 4839 // Load the global or builtins object from the current context.
4835 ld(scratch, 4840 ld(scratch,
4836 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4841 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4837 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); 4842 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4838 4843
4839 // Check that the function's map is the same as the expected cached map. 4844 // Check that the function's map is the same as the expected cached map.
4840 ld(scratch, 4845 ld(scratch,
4841 MemOperand(scratch, 4846 MemOperand(scratch,
4842 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); 4847 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4843 size_t offset = expected_kind * kPointerSize + 4848 int offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
4844 FixedArrayBase::kHeaderSize;
4845 ld(at, FieldMemOperand(scratch, offset)); 4849 ld(at, FieldMemOperand(scratch, offset));
4846 Branch(no_map_match, ne, map_in_out, Operand(at)); 4850 Branch(no_map_match, ne, map_in_out, Operand(at));
4847 4851
4848 // Use the transitioned cached map. 4852 // Use the transitioned cached map.
4849 offset = transitioned_kind * kPointerSize + 4853 offset = transitioned_kind * kPointerSize +
4850 FixedArrayBase::kHeaderSize; 4854 FixedArrayBase::kHeaderSize;
4851 ld(map_in_out, FieldMemOperand(scratch, offset)); 4855 ld(map_in_out, FieldMemOperand(scratch, offset));
4852 } 4856 }
4853 4857
4854 4858
(...skipping 1375 matching lines...) Expand 10 before | Expand all | Expand 10 after
6230 if (mag.shift > 0) sra(result, result, mag.shift); 6234 if (mag.shift > 0) sra(result, result, mag.shift);
6231 srl(at, dividend, 31); 6235 srl(at, dividend, 31);
6232 Addu(result, result, Operand(at)); 6236 Addu(result, result, Operand(at));
6233 } 6237 }
6234 6238
6235 6239
6236 } // namespace internal 6240 } // namespace internal
6237 } // namespace v8 6241 } // namespace v8
6238 6242
6239 #endif // V8_TARGET_ARCH_MIPS64 6243 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/lithium-codegen-mips64.cc ('k') | src/mips64/simulator-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698