OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/division-by-constant.h" | 10 #include "src/base/division-by-constant.h" |
(...skipping 12 matching lines...) Expand all Loading... |
23 : Assembler(arg_isolate, buffer, size), | 23 : Assembler(arg_isolate, buffer, size), |
24 generating_stub_(false), | 24 generating_stub_(false), |
25 has_frame_(false), | 25 has_frame_(false), |
26 has_double_zero_reg_set_(false) { | 26 has_double_zero_reg_set_(false) { |
27 if (create_code_object == CodeObjectRequired::kYes) { | 27 if (create_code_object == CodeObjectRequired::kYes) { |
28 code_object_ = | 28 code_object_ = |
29 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate()); | 29 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate()); |
30 } | 30 } |
31 } | 31 } |
32 | 32 |
33 | |
34 void MacroAssembler::Load(Register dst, | 33 void MacroAssembler::Load(Register dst, |
35 const MemOperand& src, | 34 const MemOperand& src, |
36 Representation r) { | 35 Representation r) { |
37 DCHECK(!r.IsDouble()); | 36 DCHECK(!r.IsDouble()); |
38 if (r.IsInteger8()) { | 37 if (r.IsInteger8()) { |
39 lb(dst, src); | 38 lb(dst, src); |
40 } else if (r.IsUInteger8()) { | 39 } else if (r.IsUInteger8()) { |
41 lbu(dst, src); | 40 lbu(dst, src); |
42 } else if (r.IsInteger16()) { | 41 } else if (r.IsInteger16()) { |
43 lh(dst, src); | 42 lh(dst, src); |
(...skipping 16 matching lines...) Expand all Loading... |
60 } else { | 59 } else { |
61 if (r.IsHeapObject()) { | 60 if (r.IsHeapObject()) { |
62 AssertNotSmi(src); | 61 AssertNotSmi(src); |
63 } else if (r.IsSmi()) { | 62 } else if (r.IsSmi()) { |
64 AssertSmi(src); | 63 AssertSmi(src); |
65 } | 64 } |
66 sw(src, dst); | 65 sw(src, dst); |
67 } | 66 } |
68 } | 67 } |
69 | 68 |
70 | |
71 void MacroAssembler::LoadRoot(Register destination, | 69 void MacroAssembler::LoadRoot(Register destination, |
72 Heap::RootListIndex index) { | 70 Heap::RootListIndex index) { |
73 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); | 71 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
74 } | 72 } |
75 | 73 |
76 | 74 |
77 void MacroAssembler::LoadRoot(Register destination, | 75 void MacroAssembler::LoadRoot(Register destination, |
78 Heap::RootListIndex index, | 76 Heap::RootListIndex index, |
79 Condition cond, | 77 Condition cond, |
80 Register src1, const Operand& src2) { | 78 Register src1, const Operand& src2) { |
(...skipping 1103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1184 Register tmp = rd.is(rt) ? scratch : rd; | 1182 Register tmp = rd.is(rt) ? scratch : rd; |
1185 DCHECK(!tmp.is(rt)); | 1183 DCHECK(!tmp.is(rt)); |
1186 sll(tmp, rs, sa); | 1184 sll(tmp, rs, sa); |
1187 Addu(rd, rt, tmp); | 1185 Addu(rd, rt, tmp); |
1188 } | 1186 } |
1189 } | 1187 } |
1190 | 1188 |
1191 | 1189 |
1192 // ------------Pseudo-instructions------------- | 1190 // ------------Pseudo-instructions------------- |
1193 | 1191 |
| 1192 // Word Swap Byte |
| 1193 void MacroAssembler::ByteSwapSigned(Register reg, int operand_size) { |
| 1194 DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4); |
| 1195 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| 1196 if (operand_size == 2) { |
| 1197 seh(reg, reg); |
| 1198 } else if (operand_size == 1) { |
| 1199 seb(reg, reg); |
| 1200 } |
| 1201 // No need to do any preparation if operand_size is 4 |
| 1202 |
| 1203 wsbh(reg, reg); |
| 1204 rotr(reg, reg, 16); |
| 1205 } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { |
| 1206 if (operand_size == 1) { |
| 1207 sll(reg, reg, 24); |
| 1208 sra(reg, reg, 24); |
| 1209 } else if (operand_size == 2) { |
| 1210 sll(reg, reg, 16); |
| 1211 sra(reg, reg, 16); |
| 1212 } |
| 1213 // No need to do any preparation if operand_size is 4 |
| 1214 |
| 1215 Register tmp = t0; |
| 1216 Register tmp2 = t1; |
| 1217 |
| 1218 andi(tmp2, reg, 0xFF); |
| 1219 sll(tmp2, tmp2, 24); |
| 1220 or_(tmp, zero_reg, tmp2); |
| 1221 |
| 1222 andi(tmp2, reg, 0xFF00); |
| 1223 sll(tmp2, tmp2, 8); |
| 1224 or_(tmp, tmp, tmp2); |
| 1225 |
| 1226 srl(reg, reg, 8); |
| 1227 andi(tmp2, reg, 0xFF00); |
| 1228 or_(tmp, tmp, tmp2); |
| 1229 |
| 1230 srl(reg, reg, 16); |
| 1231 andi(tmp2, reg, 0xFF); |
| 1232 or_(tmp, tmp, tmp2); |
| 1233 |
| 1234 or_(reg, tmp, zero_reg); |
| 1235 } |
| 1236 } |
| 1237 |
| 1238 void MacroAssembler::ByteSwapUnsigned(Register reg, int operand_size) { |
| 1239 DCHECK(operand_size == 1 || operand_size == 2); |
| 1240 |
| 1241 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { |
| 1242 if (operand_size == 1) { |
| 1243 andi(reg, reg, 0xFF); |
| 1244 } else { |
| 1245 andi(reg, reg, 0xFFFF); |
| 1246 } |
| 1247 // No need to do any preparation if operand_size is 4 |
| 1248 |
| 1249 wsbh(reg, reg); |
| 1250 rotr(reg, reg, 16); |
| 1251 } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { |
| 1252 if (operand_size == 1) { |
| 1253 sll(reg, reg, 24); |
| 1254 } else { |
| 1255 Register tmp = t0; |
| 1256 |
| 1257 andi(tmp, reg, 0xFF00); |
| 1258 sll(reg, reg, 24); |
| 1259 sll(tmp, tmp, 8); |
| 1260 or_(reg, tmp, reg); |
| 1261 } |
| 1262 } |
| 1263 } |
| 1264 |
1194 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { | 1265 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { |
1195 DCHECK(!rd.is(at)); | 1266 DCHECK(!rd.is(at)); |
1196 DCHECK(!rs.rm().is(at)); | 1267 DCHECK(!rs.rm().is(at)); |
1197 if (IsMipsArchVariant(kMips32r6)) { | 1268 if (IsMipsArchVariant(kMips32r6)) { |
1198 lw(rd, rs); | 1269 lw(rd, rs); |
1199 } else { | 1270 } else { |
1200 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | 1271 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
1201 IsMipsArchVariant(kLoongson)); | 1272 IsMipsArchVariant(kLoongson)); |
1202 if (is_int16(rs.offset() + kMipsLwrOffset) && | 1273 if (is_int16(rs.offset() + kMipsLwrOffset) && |
1203 is_int16(rs.offset() + kMipsLwlOffset)) { | 1274 is_int16(rs.offset() + kMipsLwlOffset)) { |
(...skipping 5591 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6795 if (mag.shift > 0) sra(result, result, mag.shift); | 6866 if (mag.shift > 0) sra(result, result, mag.shift); |
6796 srl(at, dividend, 31); | 6867 srl(at, dividend, 31); |
6797 Addu(result, result, Operand(at)); | 6868 Addu(result, result, Operand(at)); |
6798 } | 6869 } |
6799 | 6870 |
6800 | 6871 |
6801 } // namespace internal | 6872 } // namespace internal |
6802 } // namespace v8 | 6873 } // namespace v8 |
6803 | 6874 |
6804 #endif // V8_TARGET_ARCH_MIPS | 6875 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |