Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(227)

Side by Side Diff: src/mips/macro-assembler-mips.cc

Issue 7888004: MIPS: pre-crankshaft updates to macro-assembler and related files. (2/3) (Closed)
Patch Set: removed code-patching, cache-flushing per review comments. Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/macro-assembler-mips.h ('k') | src/mips/regexp-macro-assembler-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
112 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset)); 112 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
113 } 113 }
114 114
115 115
116 // Push and pop all registers that can hold pointers. 116 // Push and pop all registers that can hold pointers.
117 void MacroAssembler::PushSafepointRegisters() { 117 void MacroAssembler::PushSafepointRegisters() {
118 // Safepoints expect a block of kNumSafepointRegisters values on the 118 // Safepoints expect a block of kNumSafepointRegisters values on the
119 // stack, so adjust the stack for unsaved registers. 119 // stack, so adjust the stack for unsaved registers.
120 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 120 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
121 ASSERT(num_unsaved >= 0); 121 ASSERT(num_unsaved >= 0);
122 Subu(sp, sp, Operand(num_unsaved * kPointerSize)); 122 if (num_unsaved > 0) {
123 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
124 }
123 MultiPush(kSafepointSavedRegisters); 125 MultiPush(kSafepointSavedRegisters);
124 } 126 }
125 127
126 128
127 void MacroAssembler::PopSafepointRegisters() { 129 void MacroAssembler::PopSafepointRegisters() {
128 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 130 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
129 MultiPop(kSafepointSavedRegisters); 131 MultiPop(kSafepointSavedRegisters);
130 Addu(sp, sp, Operand(num_unsaved * kPointerSize)); 132 if (num_unsaved > 0) {
133 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
134 }
131 } 135 }
132 136
133 137
134 void MacroAssembler::PushSafepointRegistersAndDoubles() { 138 void MacroAssembler::PushSafepointRegistersAndDoubles() {
135 PushSafepointRegisters(); 139 PushSafepointRegisters();
136 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize)); 140 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
137 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) { 141 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
138 FPURegister reg = FPURegister::FromAllocationIndex(i); 142 FPURegister reg = FPURegister::FromAllocationIndex(i);
139 sdc1(reg, MemOperand(sp, i * kDoubleSize)); 143 sdc1(reg, MemOperand(sp, i * kDoubleSize));
140 } 144 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
173 return kSafepointRegisterStackIndexMap[reg_code]; 177 return kSafepointRegisterStackIndexMap[reg_code];
174 } 178 }
175 179
176 180
177 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { 181 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
178 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); 182 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
179 } 183 }
180 184
181 185
182 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { 186 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
187 UNIMPLEMENTED_MIPS();
183 // General purpose registers are pushed last on the stack. 188 // General purpose registers are pushed last on the stack.
184 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize; 189 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
185 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; 190 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
186 return MemOperand(sp, doubles_size + register_offset); 191 return MemOperand(sp, doubles_size + register_offset);
187 } 192 }
188 193
189 194
190
191
192 void MacroAssembler::InNewSpace(Register object, 195 void MacroAssembler::InNewSpace(Register object,
193 Register scratch, 196 Register scratch,
194 Condition cc, 197 Condition cc,
195 Label* branch) { 198 Label* branch) {
196 ASSERT(cc == eq || cc == ne); 199 ASSERT(cc == eq || cc == ne);
197 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); 200 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
198 Branch(branch, cc, scratch, 201 Branch(branch, cc, scratch,
199 Operand(ExternalReference::new_space_start(isolate()))); 202 Operand(ExternalReference::new_space_start(isolate())));
200 } 203 }
201 204
(...skipping 498 matching lines...) Expand 10 before | Expand all | Expand 10 after
700 ori(rd, rd, (j.imm32_ & kImm16Mask)); 703 ori(rd, rd, (j.imm32_ & kImm16Mask));
701 } 704 }
702 } 705 }
703 706
704 707
705 void MacroAssembler::MultiPush(RegList regs) { 708 void MacroAssembler::MultiPush(RegList regs) {
706 int16_t num_to_push = NumberOfBitsSet(regs); 709 int16_t num_to_push = NumberOfBitsSet(regs);
707 int16_t stack_offset = num_to_push * kPointerSize; 710 int16_t stack_offset = num_to_push * kPointerSize;
708 711
709 Subu(sp, sp, Operand(stack_offset)); 712 Subu(sp, sp, Operand(stack_offset));
710 for (int16_t i = kNumRegisters; i > 0; i--) { 713 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
711 if ((regs & (1 << i)) != 0) { 714 if ((regs & (1 << i)) != 0) {
712 stack_offset -= kPointerSize; 715 stack_offset -= kPointerSize;
713 sw(ToRegister(i), MemOperand(sp, stack_offset)); 716 sw(ToRegister(i), MemOperand(sp, stack_offset));
714 } 717 }
715 } 718 }
716 } 719 }
717 720
718 721
719 void MacroAssembler::MultiPushReversed(RegList regs) { 722 void MacroAssembler::MultiPushReversed(RegList regs) {
720 int16_t num_to_push = NumberOfBitsSet(regs); 723 int16_t num_to_push = NumberOfBitsSet(regs);
(...skipping 18 matching lines...) Expand all
739 stack_offset += kPointerSize; 742 stack_offset += kPointerSize;
740 } 743 }
741 } 744 }
742 addiu(sp, sp, stack_offset); 745 addiu(sp, sp, stack_offset);
743 } 746 }
744 747
745 748
746 void MacroAssembler::MultiPopReversed(RegList regs) { 749 void MacroAssembler::MultiPopReversed(RegList regs) {
747 int16_t stack_offset = 0; 750 int16_t stack_offset = 0;
748 751
749 for (int16_t i = kNumRegisters; i > 0; i--) { 752 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
750 if ((regs & (1 << i)) != 0) { 753 if ((regs & (1 << i)) != 0) {
751 lw(ToRegister(i), MemOperand(sp, stack_offset)); 754 lw(ToRegister(i), MemOperand(sp, stack_offset));
752 stack_offset += kPointerSize; 755 stack_offset += kPointerSize;
753 } 756 }
754 } 757 }
755 addiu(sp, sp, stack_offset); 758 addiu(sp, sp, stack_offset);
756 } 759 }
757 760
758 761
759 void MacroAssembler::MultiPushFPU(RegList regs) { 762 void MacroAssembler::MultiPushFPU(RegList regs) {
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
933 Register rs, 936 Register rs,
934 FPURegister scratch) { 937 FPURegister scratch) {
935 ASSERT(!fd.is(scratch)); 938 ASSERT(!fd.is(scratch));
936 ASSERT(!rs.is(at)); 939 ASSERT(!rs.is(at));
937 940
938 // Load 2^31 into scratch as its float representation. 941 // Load 2^31 into scratch as its float representation.
939 li(at, 0x41E00000); 942 li(at, 0x41E00000);
940 mtc1(at, FPURegister::from_code(scratch.code() + 1)); 943 mtc1(at, FPURegister::from_code(scratch.code() + 1));
941 mtc1(zero_reg, scratch); 944 mtc1(zero_reg, scratch);
942 // Test if scratch > fd. 945 // Test if scratch > fd.
943 c(OLT, D, fd, scratch); 946 // If fd < 2^31 we can convert it normally.
944
945 Label simple_convert; 947 Label simple_convert;
946 // If fd < 2^31 we can convert it normally. 948 BranchF(&simple_convert, NULL, lt, fd, scratch);
947 bc1t(&simple_convert);
948 949
949 // First we subtract 2^31 from fd, then trunc it to rs 950 // First we subtract 2^31 from fd, then trunc it to rs
950 // and add 2^31 to rs. 951 // and add 2^31 to rs.
951 sub_d(scratch, fd, scratch); 952 sub_d(scratch, fd, scratch);
952 trunc_w_d(scratch, scratch); 953 trunc_w_d(scratch, scratch);
953 mfc1(rs, scratch); 954 mfc1(rs, scratch);
954 Or(rs, rs, 1 << 31); 955 Or(rs, rs, 1 << 31);
955 956
956 Label done; 957 Label done;
957 Branch(&done); 958 Branch(&done);
958 // Simple conversion. 959 // Simple conversion.
959 bind(&simple_convert); 960 bind(&simple_convert);
960 trunc_w_d(scratch, fd); 961 trunc_w_d(scratch, fd);
961 mfc1(rs, scratch); 962 mfc1(rs, scratch);
962 963
963 bind(&done); 964 bind(&done);
964 } 965 }
965 966
966 967
968 void MacroAssembler::BranchF(Label* target,
969 Label* nan,
970 Condition cc,
971 FPURegister cmp1,
972 FPURegister cmp2,
973 BranchDelaySlot bd) {
974 if (cc == al) {
975 Branch(bd, target);
976 return;
977 }
978
979 ASSERT(nan || target);
980 // Check for unordered (NaN) cases.
981 if (nan) {
982 c(UN, D, cmp1, cmp2);
983 bc1t(nan);
984 }
985
986 if (target) {
987 // Here NaN cases were either handled by this function or are assumed to
988 // have been handled by the caller.
989 // Unsigned conditions are treated as their signed counterpart.
990 switch (cc) {
991 case Uless:
992 case less:
993 c(OLT, D, cmp1, cmp2);
994 bc1t(target);
995 break;
996 case Ugreater:
997 case greater:
998 c(ULE, D, cmp1, cmp2);
999 bc1f(target);
1000 break;
1001 case Ugreater_equal:
1002 case greater_equal:
1003 c(ULT, D, cmp1, cmp2);
1004 bc1f(target);
1005 break;
1006 case Uless_equal:
1007 case less_equal:
1008 c(OLE, D, cmp1, cmp2);
1009 bc1t(target);
1010 break;
1011 case eq:
1012 c(EQ, D, cmp1, cmp2);
1013 bc1t(target);
1014 break;
1015 case ne:
1016 c(EQ, D, cmp1, cmp2);
1017 bc1f(target);
1018 break;
1019 default:
1020 CHECK(0);
1021 };
1022 }
1023
1024 if (bd == PROTECT) {
1025 nop();
1026 }
1027 }
1028
1029
1030 void MacroAssembler::Move(FPURegister dst, double imm) {
1031 ASSERT(CpuFeatures::IsEnabled(FPU));
1032 static const DoubleRepresentation minus_zero(-0.0);
1033 static const DoubleRepresentation zero(0.0);
1034 DoubleRepresentation value(imm);
1035 // Handle special values first.
1036 bool force_load = dst.is(kDoubleRegZero);
1037 if (value.bits == zero.bits && !force_load) {
1038 mov_d(dst, kDoubleRegZero);
1039 } else if (value.bits == minus_zero.bits && !force_load) {
1040 neg_d(dst, kDoubleRegZero);
1041 } else {
1042 uint32_t lo, hi;
1043 DoubleAsTwoUInt32(imm, &lo, &hi);
1044 // Move the low part of the double into the lower of the corresponding FPU
1045 // register of FPU register pair.
1046 if (lo != 0) {
1047 li(at, Operand(lo));
1048 mtc1(at, dst);
1049 } else {
1050 mtc1(zero_reg, dst);
1051 }
1052 // Move the high part of the double into the higher of the corresponding FPU
1053 // register of FPU register pair.
1054 if (hi != 0) {
1055 li(at, Operand(hi));
1056 mtc1(at, dst.high());
1057 } else {
1058 mtc1(zero_reg, dst.high());
1059 }
1060 }
1061 }
1062
1063
967 // Tries to get a signed int32 out of a double precision floating point heap 1064 // Tries to get a signed int32 out of a double precision floating point heap
968 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the 1065 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
969 // 32bits signed integer range. 1066 // 32bits signed integer range.
970 // This method implementation differs from the ARM version for performance 1067 // This method implementation differs from the ARM version for performance
971 // reasons. 1068 // reasons.
972 void MacroAssembler::ConvertToInt32(Register source, 1069 void MacroAssembler::ConvertToInt32(Register source,
973 Register dest, 1070 Register dest,
974 Register scratch, 1071 Register scratch,
975 Register scratch2, 1072 Register scratch2,
976 FPURegister double_scratch, 1073 FPURegister double_scratch,
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1055 // Trick to check sign bit (msb) held in dest, count leading zero. 1152 // Trick to check sign bit (msb) held in dest, count leading zero.
1056 // 0 indicates negative, save negative version with conditional move. 1153 // 0 indicates negative, save negative version with conditional move.
1057 clz(dest, dest); 1154 clz(dest, dest);
1058 movz(scratch, scratch2, dest); 1155 movz(scratch, scratch2, dest);
1059 mov(dest, scratch); 1156 mov(dest, scratch);
1060 } 1157 }
1061 bind(&done); 1158 bind(&done);
1062 } 1159 }
1063 1160
1064 1161
1162 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1163 FPURegister result,
1164 DoubleRegister double_input,
1165 Register scratch1,
1166 Register except_flag,
1167 CheckForInexactConversion check_inexact) {
1168 ASSERT(CpuFeatures::IsSupported(FPU));
1169 CpuFeatures::Scope scope(FPU);
1170
1171 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1172
1173 if (check_inexact == kDontCheckForInexactConversion) {
1174 // Ingore inexact exceptions.
1175 except_mask &= ~kFCSRInexactFlagMask;
1176 }
1177
1178 // Save FCSR.
1179 cfc1(scratch1, FCSR);
1180 // Disable FPU exceptions.
1181 ctc1(zero_reg, FCSR);
1182
1183 // Do operation based on rounding mode.
1184 switch (rounding_mode) {
1185 case kRoundToNearest:
1186 round_w_d(result, double_input);
1187 break;
1188 case kRoundToZero:
1189 trunc_w_d(result, double_input);
1190 break;
1191 case kRoundToPlusInf:
1192 ceil_w_d(result, double_input);
1193 break;
1194 case kRoundToMinusInf:
1195 floor_w_d(result, double_input);
1196 break;
1197 } // End of switch-statement.
1198
1199 // Retrieve FCSR.
1200 cfc1(except_flag, FCSR);
1201 // Restore FCSR.
1202 ctc1(scratch1, FCSR);
1203
1204 // Check for fpu exceptions.
1205 And(except_flag, except_flag, Operand(except_mask));
1206 }
1207
1208
1065 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, 1209 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1066 Register input_high, 1210 Register input_high,
1067 Register input_low, 1211 Register input_low,
1068 Register scratch) { 1212 Register scratch) {
1069 Label done, normal_exponent, restore_sign; 1213 Label done, normal_exponent, restore_sign;
1070 // Extract the biased exponent in result. 1214 // Extract the biased exponent in result.
1071 Ext(result, 1215 Ext(result,
1072 input_high, 1216 input_high,
1073 HeapNumber::kExponentShift, 1217 HeapNumber::kExponentShift,
1074 HeapNumber::kExponentBits); 1218 HeapNumber::kExponentBits);
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
1141 Subu(result, zero_reg, input_high); 1285 Subu(result, zero_reg, input_high);
1142 movz(result, input_high, scratch); 1286 movz(result, input_high, scratch);
1143 bind(&done); 1287 bind(&done);
1144 } 1288 }
1145 1289
1146 1290
1147 void MacroAssembler::EmitECMATruncate(Register result, 1291 void MacroAssembler::EmitECMATruncate(Register result,
1148 FPURegister double_input, 1292 FPURegister double_input,
1149 FPURegister single_scratch, 1293 FPURegister single_scratch,
1150 Register scratch, 1294 Register scratch,
1151 Register input_high, 1295 Register scratch2,
1152 Register input_low) { 1296 Register scratch3) {
1153 CpuFeatures::Scope scope(FPU); 1297 CpuFeatures::Scope scope(FPU);
1154 ASSERT(!input_high.is(result)); 1298 ASSERT(!scratch2.is(result));
1155 ASSERT(!input_low.is(result)); 1299 ASSERT(!scratch3.is(result));
1156 ASSERT(!input_low.is(input_high)); 1300 ASSERT(!scratch3.is(scratch2));
1157 ASSERT(!scratch.is(result) && 1301 ASSERT(!scratch.is(result) &&
1158 !scratch.is(input_high) && 1302 !scratch.is(scratch2) &&
1159 !scratch.is(input_low)); 1303 !scratch.is(scratch3));
1160 ASSERT(!single_scratch.is(double_input)); 1304 ASSERT(!single_scratch.is(double_input));
1161 1305
1162 Label done; 1306 Label done;
1163 Label manual; 1307 Label manual;
1164 1308
1165 // Clear cumulative exception flags and save the FCSR. 1309 // Clear cumulative exception flags and save the FCSR.
1166 Register scratch2 = input_high;
1167 cfc1(scratch2, FCSR); 1310 cfc1(scratch2, FCSR);
1168 ctc1(zero_reg, FCSR); 1311 ctc1(zero_reg, FCSR);
1169 // Try a conversion to a signed integer. 1312 // Try a conversion to a signed integer.
1170 trunc_w_d(single_scratch, double_input); 1313 trunc_w_d(single_scratch, double_input);
1171 mfc1(result, single_scratch); 1314 mfc1(result, single_scratch);
1172 // Retrieve and restore the FCSR. 1315 // Retrieve and restore the FCSR.
1173 cfc1(scratch, FCSR); 1316 cfc1(scratch, FCSR);
1174 ctc1(scratch2, FCSR); 1317 ctc1(scratch2, FCSR);
1175 // Check for overflow and NaNs. 1318 // Check for overflow and NaNs.
1176 And(scratch, 1319 And(scratch,
1177 scratch, 1320 scratch,
1178 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); 1321 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1179 // If we had no exceptions we are done. 1322 // If we had no exceptions we are done.
1180 Branch(&done, eq, scratch, Operand(zero_reg)); 1323 Branch(&done, eq, scratch, Operand(zero_reg));
1181 1324
1182 // Load the double value and perform a manual truncation. 1325 // Load the double value and perform a manual truncation.
1326 Register input_high = scratch2;
1327 Register input_low = scratch3;
1183 Move(input_low, input_high, double_input); 1328 Move(input_low, input_high, double_input);
1184 EmitOutOfInt32RangeTruncate(result, 1329 EmitOutOfInt32RangeTruncate(result,
1185 input_high, 1330 input_high,
1186 input_low, 1331 input_low,
1187 scratch); 1332 scratch);
1188 bind(&done); 1333 bind(&done);
1189 } 1334 }
1190 1335
1191 1336
1192 void MacroAssembler::GetLeastBitsFromSmi(Register dst, 1337 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
(...skipping 11 matching lines...) Expand all
1204 1349
1205 1350
1206 // Emulated condtional branches do not emit a nop in the branch delay slot. 1351 // Emulated condtional branches do not emit a nop in the branch delay slot.
1207 // 1352 //
1208 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. 1353 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1209 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ 1354 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1210 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ 1355 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1211 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) 1356 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1212 1357
1213 1358
1214 bool MacroAssembler::UseAbsoluteCodePointers() {
1215 if (is_trampoline_emitted()) {
1216 return true;
1217 } else {
1218 return false;
1219 }
1220 }
1221
1222
1223 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { 1359 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1224 BranchShort(offset, bdslot); 1360 BranchShort(offset, bdslot);
1225 } 1361 }
1226 1362
1227 1363
1228 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, 1364 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1229 const Operand& rt, 1365 const Operand& rt,
1230 BranchDelaySlot bdslot) { 1366 BranchDelaySlot bdslot) {
1231 BranchShort(offset, cond, rs, rt, bdslot); 1367 BranchShort(offset, cond, rs, rt, bdslot);
1232 } 1368 }
1233 1369
1234 1370
1235 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { 1371 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1236 bool is_label_near = is_near(L); 1372 if (L->is_bound()) {
1237 if (UseAbsoluteCodePointers() && !is_label_near) { 1373 if (is_near(L)) {
1238 Jr(L, bdslot); 1374 BranchShort(L, bdslot);
1375 } else {
1376 Jr(L, bdslot);
1377 }
1239 } else { 1378 } else {
1240 BranchShort(L, bdslot); 1379 if (is_trampoline_emitted()) {
1380 Jr(L, bdslot);
1381 } else {
1382 BranchShort(L, bdslot);
1383 }
1241 } 1384 }
1242 } 1385 }
1243 1386
1244 1387
1245 void MacroAssembler::Branch(Label* L, Condition cond, Register rs, 1388 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1246 const Operand& rt, 1389 const Operand& rt,
1247 BranchDelaySlot bdslot) { 1390 BranchDelaySlot bdslot) {
1248 bool is_label_near = is_near(L); 1391 if (L->is_bound()) {
1249 if (UseAbsoluteCodePointers() && !is_label_near) { 1392 if (is_near(L)) {
1250 Label skip; 1393 BranchShort(L, cond, rs, rt, bdslot);
1251 Condition neg_cond = NegateCondition(cond); 1394 } else {
1252 BranchShort(&skip, neg_cond, rs, rt); 1395 Label skip;
1253 Jr(L, bdslot); 1396 Condition neg_cond = NegateCondition(cond);
1254 bind(&skip); 1397 BranchShort(&skip, neg_cond, rs, rt);
1398 Jr(L, bdslot);
1399 bind(&skip);
1400 }
1255 } else { 1401 } else {
1256 BranchShort(L, cond, rs, rt, bdslot); 1402 if (is_trampoline_emitted()) {
1403 Label skip;
1404 Condition neg_cond = NegateCondition(cond);
1405 BranchShort(&skip, neg_cond, rs, rt);
1406 Jr(L, bdslot);
1407 bind(&skip);
1408 } else {
1409 BranchShort(L, cond, rs, rt, bdslot);
1410 }
1257 } 1411 }
1258 } 1412 }
1259 1413
1260 1414
1261 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) { 1415 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1262 b(offset); 1416 b(offset);
1263 1417
1264 // Emit a nop in the branch delay slot if required. 1418 // Emit a nop in the branch delay slot if required.
1265 if (bdslot == PROTECT) 1419 if (bdslot == PROTECT)
1266 nop(); 1420 nop();
1267 } 1421 }
1268 1422
1269 1423
1270 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, 1424 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1271 const Operand& rt, 1425 const Operand& rt,
1272 BranchDelaySlot bdslot) { 1426 BranchDelaySlot bdslot) {
1273 BRANCH_ARGS_CHECK(cond, rs, rt); 1427 BRANCH_ARGS_CHECK(cond, rs, rt);
1274 ASSERT(!rs.is(zero_reg)); 1428 ASSERT(!rs.is(zero_reg));
1275 Register r2 = no_reg; 1429 Register r2 = no_reg;
1276 Register scratch = at; 1430 Register scratch = at;
1277 1431
1278 if (rt.is_reg()) { 1432 if (rt.is_reg()) {
1279 // We don't want any other register but scratch clobbered. 1433 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1280 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_)); 1434 // rt.
1281 r2 = rt.rm_; 1435 r2 = rt.rm_;
1282 switch (cond) { 1436 switch (cond) {
1283 case cc_always: 1437 case cc_always:
1284 b(offset); 1438 b(offset);
1285 break; 1439 break;
1286 case eq: 1440 case eq:
1287 beq(rs, r2, offset); 1441 beq(rs, r2, offset);
1288 break; 1442 break;
1289 case ne: 1443 case ne:
1290 bne(rs, r2, offset); 1444 bne(rs, r2, offset);
(...skipping 481 matching lines...) Expand 10 before | Expand all | Expand 10 after
1772 1926
1773 1927
1774 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, 1928 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1775 const Operand& rt, 1929 const Operand& rt,
1776 BranchDelaySlot bdslot) { 1930 BranchDelaySlot bdslot) {
1777 BranchAndLinkShort(offset, cond, rs, rt, bdslot); 1931 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
1778 } 1932 }
1779 1933
1780 1934
1781 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { 1935 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1782 bool is_label_near = is_near(L); 1936 if (L->is_bound()) {
1783 if (UseAbsoluteCodePointers() && !is_label_near) { 1937 if (is_near(L)) {
1784 Jalr(L, bdslot); 1938 BranchAndLinkShort(L, bdslot);
1939 } else {
1940 Jalr(L, bdslot);
1941 }
1785 } else { 1942 } else {
1786 BranchAndLinkShort(L, bdslot); 1943 if (is_trampoline_emitted()) {
1944 Jalr(L, bdslot);
1945 } else {
1946 BranchAndLinkShort(L, bdslot);
1947 }
1787 } 1948 }
1788 } 1949 }
1789 1950
1790 1951
1791 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, 1952 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1792 const Operand& rt, 1953 const Operand& rt,
1793 BranchDelaySlot bdslot) { 1954 BranchDelaySlot bdslot) {
1794 bool is_label_near = is_near(L); 1955 if (L->is_bound()) {
1795 if (UseAbsoluteCodePointers() && !is_label_near) { 1956 if (is_near(L)) {
1796 Label skip; 1957 BranchAndLinkShort(L, cond, rs, rt, bdslot);
1797 Condition neg_cond = NegateCondition(cond); 1958 } else {
1798 BranchShort(&skip, neg_cond, rs, rt); 1959 Label skip;
1799 Jalr(L, bdslot); 1960 Condition neg_cond = NegateCondition(cond);
1800 bind(&skip); 1961 BranchShort(&skip, neg_cond, rs, rt);
1962 Jalr(L, bdslot);
1963 bind(&skip);
1964 }
1801 } else { 1965 } else {
1802 BranchAndLinkShort(L, cond, rs, rt, bdslot); 1966 if (is_trampoline_emitted()) {
1967 Label skip;
1968 Condition neg_cond = NegateCondition(cond);
1969 BranchShort(&skip, neg_cond, rs, rt);
1970 Jalr(L, bdslot);
1971 bind(&skip);
1972 } else {
1973 BranchAndLinkShort(L, cond, rs, rt, bdslot);
1974 }
1803 } 1975 }
1804 } 1976 }
1805 1977
1806 1978
1807 // We need to use a bgezal or bltzal, but they can't be used directly with the 1979 // We need to use a bgezal or bltzal, but they can't be used directly with the
1808 // slt instructions. We could use sub or add instead but we would miss overflow 1980 // slt instructions. We could use sub or add instead but we would miss overflow
1809 // cases, so we keep slt and add an intermediate third instruction. 1981 // cases, so we keep slt and add an intermediate third instruction.
1810 void MacroAssembler::BranchAndLinkShort(int16_t offset, 1982 void MacroAssembler::BranchAndLinkShort(int16_t offset,
1811 BranchDelaySlot bdslot) { 1983 BranchDelaySlot bdslot) {
1812 bal(offset); 1984 bal(offset);
(...skipping 1356 matching lines...) Expand 10 before | Expand all | Expand 10 after
3169 const ParameterCount& expected, 3341 const ParameterCount& expected,
3170 const ParameterCount& actual, 3342 const ParameterCount& actual,
3171 InvokeFlag flag, 3343 InvokeFlag flag,
3172 const CallWrapper& call_wrapper, 3344 const CallWrapper& call_wrapper,
3173 CallKind call_kind) { 3345 CallKind call_kind) {
3174 Label done; 3346 Label done;
3175 3347
3176 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, 3348 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
3177 call_wrapper, call_kind); 3349 call_wrapper, call_kind);
3178 if (flag == CALL_FUNCTION) { 3350 if (flag == CALL_FUNCTION) {
3351 call_wrapper.BeforeCall(CallSize(code));
3179 SetCallKind(t1, call_kind); 3352 SetCallKind(t1, call_kind);
3180 Call(code); 3353 Call(code);
3354 call_wrapper.AfterCall();
3181 } else { 3355 } else {
3182 ASSERT(flag == JUMP_FUNCTION); 3356 ASSERT(flag == JUMP_FUNCTION);
3183 SetCallKind(t1, call_kind); 3357 SetCallKind(t1, call_kind);
3184 Jump(code); 3358 Jump(code);
3185 } 3359 }
3186 // Continue here if InvokePrologue does handle the invocation due to 3360 // Continue here if InvokePrologue does handle the invocation due to
3187 // mismatched parameter counts. 3361 // mismatched parameter counts.
3188 bind(&done); 3362 bind(&done);
3189 } 3363 }
3190 3364
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
3242 ASSERT(function->is_compiled()); 3416 ASSERT(function->is_compiled());
3243 3417
3244 // Get the function and setup the context. 3418 // Get the function and setup the context.
3245 li(a1, Operand(Handle<JSFunction>(function))); 3419 li(a1, Operand(Handle<JSFunction>(function)));
3246 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 3420 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3247 3421
3248 // Invoke the cached code. 3422 // Invoke the cached code.
3249 Handle<Code> code(function->code()); 3423 Handle<Code> code(function->code());
3250 ParameterCount expected(function->shared()->formal_parameter_count()); 3424 ParameterCount expected(function->shared()->formal_parameter_count());
3251 if (V8::UseCrankshaft()) { 3425 if (V8::UseCrankshaft()) {
3252 UNIMPLEMENTED_MIPS(); 3426 // TODO(kasperl): For now, we always call indirectly through the
3427 // code field in the function to allow recompilation to take effect
3428 // without changing any of the call sites.
3429 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3430 InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
3253 } else { 3431 } else {
3254 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind); 3432 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
3255 } 3433 }
3256 } 3434 }
3257 3435
3258 3436
3259 void MacroAssembler::IsObjectJSObjectType(Register heap_object, 3437 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3260 Register map, 3438 Register map,
3261 Register scratch, 3439 Register scratch,
3262 Label* fail) { 3440 Label* fail) {
(...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after
3559 void MacroAssembler::AdduAndCheckForOverflow(Register dst, 3737 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
3560 Register left, 3738 Register left,
3561 Register right, 3739 Register right,
3562 Register overflow_dst, 3740 Register overflow_dst,
3563 Register scratch) { 3741 Register scratch) {
3564 ASSERT(!dst.is(overflow_dst)); 3742 ASSERT(!dst.is(overflow_dst));
3565 ASSERT(!dst.is(scratch)); 3743 ASSERT(!dst.is(scratch));
3566 ASSERT(!overflow_dst.is(scratch)); 3744 ASSERT(!overflow_dst.is(scratch));
3567 ASSERT(!overflow_dst.is(left)); 3745 ASSERT(!overflow_dst.is(left));
3568 ASSERT(!overflow_dst.is(right)); 3746 ASSERT(!overflow_dst.is(right));
3569 ASSERT(!left.is(right)); 3747
3748 if (left.is(right) && dst.is(left)) {
3749 ASSERT(!dst.is(t9));
3750 ASSERT(!scratch.is(t9));
3751 ASSERT(!left.is(t9));
3752 ASSERT(!right.is(t9));
3753 ASSERT(!overflow_dst.is(t9));
3754 mov(t9, right);
3755 right = t9;
3756 }
3570 3757
3571 if (dst.is(left)) { 3758 if (dst.is(left)) {
3572 mov(scratch, left); // Preserve left. 3759 mov(scratch, left); // Preserve left.
3573 addu(dst, left, right); // Left is overwritten. 3760 addu(dst, left, right); // Left is overwritten.
3574 xor_(scratch, dst, scratch); // Original left. 3761 xor_(scratch, dst, scratch); // Original left.
3575 xor_(overflow_dst, dst, right); 3762 xor_(overflow_dst, dst, right);
3576 and_(overflow_dst, overflow_dst, scratch); 3763 and_(overflow_dst, overflow_dst, scratch);
3577 } else if (dst.is(right)) { 3764 } else if (dst.is(right)) {
3578 mov(scratch, right); // Preserve right. 3765 mov(scratch, right); // Preserve right.
3579 addu(dst, left, right); // Right is overwritten. 3766 addu(dst, left, right); // Right is overwritten.
(...skipping 12 matching lines...) Expand all
3592 void MacroAssembler::SubuAndCheckForOverflow(Register dst, 3779 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
3593 Register left, 3780 Register left,
3594 Register right, 3781 Register right,
3595 Register overflow_dst, 3782 Register overflow_dst,
3596 Register scratch) { 3783 Register scratch) {
3597 ASSERT(!dst.is(overflow_dst)); 3784 ASSERT(!dst.is(overflow_dst));
3598 ASSERT(!dst.is(scratch)); 3785 ASSERT(!dst.is(scratch));
3599 ASSERT(!overflow_dst.is(scratch)); 3786 ASSERT(!overflow_dst.is(scratch));
3600 ASSERT(!overflow_dst.is(left)); 3787 ASSERT(!overflow_dst.is(left));
3601 ASSERT(!overflow_dst.is(right)); 3788 ASSERT(!overflow_dst.is(right));
3602 ASSERT(!left.is(right));
3603 ASSERT(!scratch.is(left)); 3789 ASSERT(!scratch.is(left));
3604 ASSERT(!scratch.is(right)); 3790 ASSERT(!scratch.is(right));
3605 3791
3792 // This happens with some crankshaft code. Since Subu works fine if
3793 // left == right, let's not make that restriction here.
3794 if (left.is(right)) {
3795 mov(dst, zero_reg);
3796 mov(overflow_dst, zero_reg);
3797 return;
3798 }
3799
3606 if (dst.is(left)) { 3800 if (dst.is(left)) {
3607 mov(scratch, left); // Preserve left. 3801 mov(scratch, left); // Preserve left.
3608 subu(dst, left, right); // Left is overwritten. 3802 subu(dst, left, right); // Left is overwritten.
3609 xor_(overflow_dst, dst, scratch); // scratch is original left. 3803 xor_(overflow_dst, dst, scratch); // scratch is original left.
3610 xor_(scratch, scratch, right); // scratch is original left. 3804 xor_(scratch, scratch, right); // scratch is original left.
3611 and_(overflow_dst, scratch, overflow_dst); 3805 and_(overflow_dst, scratch, overflow_dst);
3612 } else if (dst.is(right)) { 3806 } else if (dst.is(right)) {
3613 mov(scratch, right); // Preserve right. 3807 mov(scratch, right); // Preserve right.
3614 subu(dst, left, right); // Right is overwritten. 3808 subu(dst, left, right); // Right is overwritten.
3615 xor_(overflow_dst, dst, left); 3809 xor_(overflow_dst, dst, left);
(...skipping 622 matching lines...) Expand 10 before | Expand all | Expand 10 after
4238 int kFlatAsciiStringMask = 4432 int kFlatAsciiStringMask =
4239 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 4433 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4240 int kFlatAsciiStringTag = ASCII_STRING_TYPE; 4434 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4241 And(scratch, type, Operand(kFlatAsciiStringMask)); 4435 And(scratch, type, Operand(kFlatAsciiStringMask));
4242 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); 4436 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4243 } 4437 }
4244 4438
4245 4439
4246 static const int kRegisterPassedArguments = 4; 4440 static const int kRegisterPassedArguments = 4;
4247 4441
4248 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { 4442 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
4443 int num_double_arguments) {
4444 int stack_passed_words = 0;
4445 num_reg_arguments += 2 * num_double_arguments;
4446
4447 // Up to four simple arguments are passed in registers a0..a3.
4448 if (num_reg_arguments > kRegisterPassedArguments) {
4449 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4450 }
4451 stack_passed_words += kCArgSlotCount;
4452 return stack_passed_words;
4453 }
4454
4455
4456 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4457 int num_double_arguments,
4458 Register scratch) {
4249 int frame_alignment = ActivationFrameAlignment(); 4459 int frame_alignment = ActivationFrameAlignment();
4250 4460
4251 // Up to four simple arguments are passed in registers a0..a3. 4461 // Up to four simple arguments are passed in registers a0..a3.
4252 // Those four arguments must have reserved argument slots on the stack for 4462 // Those four arguments must have reserved argument slots on the stack for
4253 // mips, even though those argument slots are not normally used. 4463 // mips, even though those argument slots are not normally used.
4254 // Remaining arguments are pushed on the stack, above (higher address than) 4464 // Remaining arguments are pushed on the stack, above (higher address than)
4255 // the argument slots. 4465 // the argument slots.
4256 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? 4466 int stack_passed_arguments = CalculateStackPassedWords(
4257 0 : num_arguments - kRegisterPassedArguments) + 4467 num_reg_arguments, num_double_arguments);
4258 kCArgSlotCount;
4259 if (frame_alignment > kPointerSize) { 4468 if (frame_alignment > kPointerSize) {
4260 // Make stack end at alignment and make room for num_arguments - 4 words 4469 // Make stack end at alignment and make room for num_arguments - 4 words
4261 // and the original value of sp. 4470 // and the original value of sp.
4262 mov(scratch, sp); 4471 mov(scratch, sp);
4263 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); 4472 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4264 ASSERT(IsPowerOf2(frame_alignment)); 4473 ASSERT(IsPowerOf2(frame_alignment));
4265 And(sp, sp, Operand(-frame_alignment)); 4474 And(sp, sp, Operand(-frame_alignment));
4266 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); 4475 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4267 } else { 4476 } else {
4268 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); 4477 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4269 } 4478 }
4270 } 4479 }
4271 4480
4272 4481
4482 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4483 Register scratch) {
4484 PrepareCallCFunction(num_reg_arguments, 0, scratch);
4485 }
4486
4487
4488 void MacroAssembler::CallCFunction(ExternalReference function,
4489 int num_reg_arguments,
4490 int num_double_arguments) {
4491 CallCFunctionHelper(no_reg,
4492 function,
4493 t8,
4494 num_reg_arguments,
4495 num_double_arguments);
4496 }
4497
4498
4499 void MacroAssembler::CallCFunction(Register function,
4500 Register scratch,
4501 int num_reg_arguments,
4502 int num_double_arguments) {
4503 CallCFunctionHelper(function,
4504 ExternalReference::the_hole_value_location(isolate()),
4505 scratch,
4506 num_reg_arguments,
4507 num_double_arguments);
4508 }
4509
4510
4273 void MacroAssembler::CallCFunction(ExternalReference function, 4511 void MacroAssembler::CallCFunction(ExternalReference function,
4274 int num_arguments) { 4512 int num_arguments) {
4275 CallCFunctionHelper(no_reg, function, t8, num_arguments); 4513 CallCFunction(function, num_arguments, 0);
4276 } 4514 }
4277 4515
4278 4516
4279 void MacroAssembler::CallCFunction(Register function, 4517 void MacroAssembler::CallCFunction(Register function,
4280 Register scratch, 4518 Register scratch,
4281 int num_arguments) { 4519 int num_arguments) {
4282 CallCFunctionHelper(function, 4520 CallCFunction(function, scratch, num_arguments, 0);
4283 ExternalReference::the_hole_value_location(isolate()),
4284 scratch,
4285 num_arguments);
4286 } 4521 }
4287 4522
4288 4523
4289 void MacroAssembler::CallCFunctionHelper(Register function, 4524 void MacroAssembler::CallCFunctionHelper(Register function,
4290 ExternalReference function_reference, 4525 ExternalReference function_reference,
4291 Register scratch, 4526 Register scratch,
4292 int num_arguments) { 4527 int num_reg_arguments,
4528 int num_double_arguments) {
4293 // Make sure that the stack is aligned before calling a C function unless 4529 // Make sure that the stack is aligned before calling a C function unless
4294 // running in the simulator. The simulator has its own alignment check which 4530 // running in the simulator. The simulator has its own alignment check which
4295 // provides more information. 4531 // provides more information.
4296 // The argument stots are presumed to have been set up by 4532 // The argument stots are presumed to have been set up by
4297 // PrepareCallCFunction. The C function must be called via t9, for mips ABI. 4533 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4298 4534
4299 #if defined(V8_HOST_ARCH_MIPS) 4535 #if defined(V8_HOST_ARCH_MIPS)
4300 if (emit_debug_code()) { 4536 if (emit_debug_code()) {
4301 int frame_alignment = OS::ActivationFrameAlignment(); 4537 int frame_alignment = OS::ActivationFrameAlignment();
4302 int frame_alignment_mask = frame_alignment - 1; 4538 int frame_alignment_mask = frame_alignment - 1;
(...skipping 17 matching lines...) Expand all
4320 if (function.is(no_reg)) { 4556 if (function.is(no_reg)) {
4321 function = t9; 4557 function = t9;
4322 li(function, Operand(function_reference)); 4558 li(function, Operand(function_reference));
4323 } else if (!function.is(t9)) { 4559 } else if (!function.is(t9)) {
4324 mov(t9, function); 4560 mov(t9, function);
4325 function = t9; 4561 function = t9;
4326 } 4562 }
4327 4563
4328 Call(function); 4564 Call(function);
4329 4565
4330 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? 4566 int stack_passed_arguments = CalculateStackPassedWords(
4331 0 : num_arguments - kRegisterPassedArguments) + 4567 num_reg_arguments, num_double_arguments);
4332 kCArgSlotCount;
4333 4568
4334 if (OS::ActivationFrameAlignment() > kPointerSize) { 4569 if (OS::ActivationFrameAlignment() > kPointerSize) {
4335 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); 4570 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
4336 } else { 4571 } else {
4337 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); 4572 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
4338 } 4573 }
4339 } 4574 }
4340 4575
4341 4576
4342 #undef BRANCH_ARGS_CHECK 4577 #undef BRANCH_ARGS_CHECK
4343 4578
4344 4579
4345 void MacroAssembler::LoadInstanceDescriptors(Register map, 4580 void MacroAssembler::LoadInstanceDescriptors(Register map,
4346 Register descriptors) { 4581 Register descriptors) {
4347 lw(descriptors, 4582 lw(descriptors,
4348 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset)); 4583 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
4349 Label not_smi; 4584 Label not_smi;
4350 JumpIfNotSmi(descriptors, &not_smi); 4585 JumpIfNotSmi(descriptors, &not_smi);
4351 li(descriptors, Operand(FACTORY->empty_descriptor_array())); 4586 li(descriptors, Operand(FACTORY->empty_descriptor_array()));
4352 bind(&not_smi); 4587 bind(&not_smi);
4353 } 4588 }
4354 4589
4355 4590
4591 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
4592 ASSERT(!output_reg.is(input_reg));
4593 Label done;
4594 li(output_reg, Operand(255));
4595 // Normal branch: nop in delay slot.
4596 Branch(&done, gt, input_reg, Operand(output_reg));
4597 // Use delay slot in this branch.
4598 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
4599 mov(output_reg, zero_reg); // In delay slot.
4600 mov(output_reg, input_reg); // Value is in range 0..255.
4601 bind(&done);
4602 }
4603
4604
4605 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
4606 DoubleRegister input_reg,
4607 DoubleRegister temp_double_reg) {
4608 Label above_zero;
4609 Label done;
4610 Label in_bounds;
4611
4612 Move(temp_double_reg, 0.0);
4613 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
4614
4615 // Double value is less than zero, NaN or Inf, return 0.
4616 mov(result_reg, zero_reg);
4617 Branch(&done);
4618
4619 // Double value is >= 255, return 255.
4620 bind(&above_zero);
4621 Move(temp_double_reg, 255.0);
4622 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
4623 li(result_reg, Operand(255));
4624 Branch(&done);
4625
4626 // In 0-255 range, round and truncate.
4627 bind(&in_bounds);
4628 round_w_d(temp_double_reg, input_reg);
4629 mfc1(result_reg, temp_double_reg);
4630 bind(&done);
4631 }
4632
4633
4356 CodePatcher::CodePatcher(byte* address, int instructions) 4634 CodePatcher::CodePatcher(byte* address, int instructions)
4357 : address_(address), 4635 : address_(address),
4358 instructions_(instructions), 4636 instructions_(instructions),
4359 size_(instructions * Assembler::kInstrSize), 4637 size_(instructions * Assembler::kInstrSize),
4360 masm_(Isolate::Current(), address, size_ + Assembler::kGap) { 4638 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
4361 // Create a new macro assembler pointing to the address of the code to patch. 4639 // Create a new macro assembler pointing to the address of the code to patch.
4362 // The size is adjusted with kGap on order for the assembler to generate size 4640 // The size is adjusted with kGap on order for the assembler to generate size
4363 // bytes of instructions without failing with buffer size constraints. 4641 // bytes of instructions without failing with buffer size constraints.
4364 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 4642 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4365 } 4643 }
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4403 opcode == BGTZL); 4681 opcode == BGTZL);
4404 opcode = (cond == eq) ? BEQ : BNE; 4682 opcode = (cond == eq) ? BEQ : BNE;
4405 instr = (instr & ~kOpcodeMask) | opcode; 4683 instr = (instr & ~kOpcodeMask) | opcode;
4406 masm_.emit(instr); 4684 masm_.emit(instr);
4407 } 4685 }
4408 4686
4409 4687
4410 } } // namespace v8::internal 4688 } } // namespace v8::internal
4411 4689
4412 #endif // V8_TARGET_ARCH_MIPS 4690 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/macro-assembler-mips.h ('k') | src/mips/regexp-macro-assembler-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698