Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(32)

Side by Side Diff: src/compiler/mips64/code-generator-mips64.cc

Issue 732403002: MIPS64: Add turbofan support for mips64. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Addressed comments and code cleanup. Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/mips64/OWNERS ('k') | src/compiler/mips64/instruction-codes-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/code-generator.h" 5 #include "src/compiler/code-generator.h"
6 #include "src/compiler/code-generator-impl.h" 6 #include "src/compiler/code-generator-impl.h"
7 #include "src/compiler/gap-resolver.h" 7 #include "src/compiler/gap-resolver.h"
8 #include "src/compiler/node-matchers.h" 8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties-inl.h" 9 #include "src/compiler/node-properties-inl.h"
10 #include "src/mips/macro-assembler-mips.h" 10 #include "src/mips/macro-assembler-mips.h"
11 #include "src/scopes.h" 11 #include "src/scopes.h"
12 12
13 namespace v8 { 13 namespace v8 {
14 namespace internal { 14 namespace internal {
15 namespace compiler { 15 namespace compiler {
16 16
17 #define __ masm()-> 17 #define __ masm()->
18 18
19 19
20 // TODO(plind): Possibly avoid using these lithium names. 20 // TODO(plind): Possibly avoid using these lithium names.
21 #define kScratchReg kLithiumScratchReg 21 #define kScratchReg kLithiumScratchReg
22 #define kCompareReg kLithiumScratchReg2 22 #define kScratchReg2 kLithiumScratchReg2
23 #define kScratchDoubleReg kLithiumScratchDouble 23 #define kScratchDoubleReg kLithiumScratchDouble
24 24
25 25
26 // TODO(plind): consider renaming these macros. 26 // TODO(plind): consider renaming these macros.
27 #define TRACE_MSG(msg) \ 27 #define TRACE_MSG(msg) \
28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ 28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
29 __LINE__) 29 __LINE__)
30 30
31 #define TRACE_UNIMPL() \ 31 #define TRACE_UNIMPL() \
32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \ 32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
(...skipping 18 matching lines...) Expand all
51 // Single (Float) and Double register namespace is same on MIPS, 51 // Single (Float) and Double register namespace is same on MIPS,
52 // both are typedefs of FPURegister. 52 // both are typedefs of FPURegister.
53 return ToDoubleRegister(op); 53 return ToDoubleRegister(op);
54 } 54 }
55 55
56 Operand InputImmediate(int index) { 56 Operand InputImmediate(int index) {
57 Constant constant = ToConstant(instr_->InputAt(index)); 57 Constant constant = ToConstant(instr_->InputAt(index));
58 switch (constant.type()) { 58 switch (constant.type()) {
59 case Constant::kInt32: 59 case Constant::kInt32:
60 return Operand(constant.ToInt32()); 60 return Operand(constant.ToInt32());
61 case Constant::kInt64:
62 return Operand(constant.ToInt64());
61 case Constant::kFloat32: 63 case Constant::kFloat32:
62 return Operand( 64 return Operand(
63 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED)); 65 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
64 case Constant::kFloat64: 66 case Constant::kFloat64:
65 return Operand( 67 return Operand(
66 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); 68 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
67 case Constant::kInt64:
68 case Constant::kExternalReference: 69 case Constant::kExternalReference:
69 case Constant::kHeapObject: 70 case Constant::kHeapObject:
70 // TODO(plind): Maybe we should handle ExtRef & HeapObj here? 71 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
71 // maybe not done on arm due to const pool ?? 72 // maybe not done on arm due to const pool ??
72 break; 73 break;
73 } 74 }
74 UNREACHABLE(); 75 UNREACHABLE();
75 return Operand(zero_reg); 76 return Operand(zero_reg);
76 } 77 }
77 78
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
126 MipsOperandConverter i(this, instr); 127 MipsOperandConverter i(this, instr);
127 InstructionCode opcode = instr->opcode(); 128 InstructionCode opcode = instr->opcode();
128 129
129 switch (ArchOpcodeField::decode(opcode)) { 130 switch (ArchOpcodeField::decode(opcode)) {
130 case kArchCallCodeObject: { 131 case kArchCallCodeObject: {
131 EnsureSpaceForLazyDeopt(); 132 EnsureSpaceForLazyDeopt();
132 if (instr->InputAt(0)->IsImmediate()) { 133 if (instr->InputAt(0)->IsImmediate()) {
133 __ Call(Handle<Code>::cast(i.InputHeapObject(0)), 134 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
134 RelocInfo::CODE_TARGET); 135 RelocInfo::CODE_TARGET);
135 } else { 136 } else {
136 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); 137 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
137 __ Call(at); 138 __ Call(at);
138 } 139 }
139 AddSafepointAndDeopt(instr); 140 AddSafepointAndDeopt(instr);
140 break; 141 break;
141 } 142 }
142 case kArchCallJSFunction: { 143 case kArchCallJSFunction: {
143 EnsureSpaceForLazyDeopt(); 144 EnsureSpaceForLazyDeopt();
144 Register func = i.InputRegister(0); 145 Register func = i.InputRegister(0);
145 if (FLAG_debug_code) { 146 if (FLAG_debug_code) {
146 // Check the function's context matches the context argument. 147 // Check the function's context matches the context argument.
147 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); 148 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
148 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg)); 149 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
149 } 150 }
150 151
151 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); 152 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
152 __ Call(at); 153 __ Call(at);
153 AddSafepointAndDeopt(instr); 154 AddSafepointAndDeopt(instr);
154 break; 155 break;
155 } 156 }
156 case kArchJmp: 157 case kArchJmp:
157 __ Branch(GetLabel(i.InputRpo(0))); 158 __ Branch(GetLabel(i.InputRpo(0)));
158 break; 159 break;
159 case kArchNop: 160 case kArchNop:
160 // don't emit code for nops. 161 // don't emit code for nops.
161 break; 162 break;
162 case kArchRet: 163 case kArchRet:
163 AssembleReturn(); 164 AssembleReturn();
164 break; 165 break;
165 case kArchStackPointer: 166 case kArchStackPointer:
166 __ mov(i.OutputRegister(), sp); 167 __ mov(i.OutputRegister(), sp);
167 break; 168 break;
168 case kArchTruncateDoubleToI: 169 case kArchTruncateDoubleToI:
169 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0)); 170 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
170 break; 171 break;
171 case kMipsAdd: 172 case kMips64Add:
172 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 173 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
173 break; 174 break;
174 case kMipsAddOvf: 175 case kMips64Dadd:
175 __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), 176 __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
176 i.InputOperand(1), kCompareReg, kScratchReg);
177 break; 177 break;
178 case kMipsSub: 178 case kMips64Sub:
179 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 179 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
180 break; 180 break;
181 case kMipsSubOvf: 181 case kMips64Dsub:
182 __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), 182 __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
183 i.InputOperand(1), kCompareReg, kScratchReg);
184 break; 183 break;
185 case kMipsMul: 184 case kMips64Mul:
186 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 185 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
187 break; 186 break;
188 case kMipsMulHigh: 187 case kMips64MulHigh:
189 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 188 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
190 break; 189 break;
191 case kMipsMulHighU: 190 case kMips64MulHighU:
192 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 191 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
193 break; 192 break;
194 case kMipsDiv: 193 case kMips64Div:
195 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 194 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
196 break; 195 break;
197 case kMipsDivU: 196 case kMips64DivU:
198 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 197 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
199 break; 198 break;
200 case kMipsMod: 199 case kMips64Mod:
201 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 200 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
202 break; 201 break;
203 case kMipsModU: 202 case kMips64ModU:
204 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 203 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
205 break; 204 break;
206 case kMipsAnd: 205 case kMips64Dmul:
206 __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
207 break;
208 case kMips64Ddiv:
209 __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
210 break;
211 case kMips64DdivU:
212 __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
213 break;
214 case kMips64Dmod:
215 __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
216 break;
217 case kMips64DmodU:
218 __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
219 break;
207 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 220 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
208 break; 221 break;
209 case kMipsOr: 222 case kMips64And:
223 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
224 break;
225 case kMips64Or:
210 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 226 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
211 break; 227 break;
212 case kMipsXor: 228 case kMips64Xor:
213 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 229 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
214 break; 230 break;
215 case kMipsShl: 231 case kMips64Shl:
216 if (instr->InputAt(1)->IsRegister()) { 232 if (instr->InputAt(1)->IsRegister()) {
217 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 233 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
218 } else { 234 } else {
219 int32_t imm = i.InputOperand(1).immediate(); 235 int32_t imm = i.InputOperand(1).immediate();
220 __ sll(i.OutputRegister(), i.InputRegister(0), imm); 236 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
221 } 237 }
222 break; 238 break;
223 case kMipsShr: 239 case kMips64Shr:
224 if (instr->InputAt(1)->IsRegister()) { 240 if (instr->InputAt(1)->IsRegister()) {
225 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 241 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
226 } else { 242 } else {
227 int32_t imm = i.InputOperand(1).immediate(); 243 int32_t imm = i.InputOperand(1).immediate();
228 __ srl(i.OutputRegister(), i.InputRegister(0), imm); 244 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
229 } 245 }
230 break; 246 break;
231 case kMipsSar: 247 case kMips64Sar:
232 if (instr->InputAt(1)->IsRegister()) { 248 if (instr->InputAt(1)->IsRegister()) {
233 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 249 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
234 } else { 250 } else {
235 int32_t imm = i.InputOperand(1).immediate(); 251 int32_t imm = i.InputOperand(1).immediate();
236 __ sra(i.OutputRegister(), i.InputRegister(0), imm); 252 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
237 } 253 }
238 break; 254 break;
239 case kMipsRor: 255 case kMips64Ext:
256 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
257 i.InputInt8(2));
258 break;
259 case kMips64Dext:
260 __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
261 i.InputInt8(2));
262 break;
263 case kMips64Dshl:
264 if (instr->InputAt(1)->IsRegister()) {
265 __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
266 } else {
267 int32_t imm = i.InputOperand(1).immediate();
268 if (imm < 32) {
269 __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
270 } else {
271 __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
272 }
273 }
274 break;
275 case kMips64Dshr:
276 if (instr->InputAt(1)->IsRegister()) {
277 __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
278 } else {
279 int32_t imm = i.InputOperand(1).immediate();
280 if (imm < 32) {
281 __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
282 } else {
283 __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
284 }
285 }
286 break;
287 case kMips64Dsar:
288 if (instr->InputAt(1)->IsRegister()) {
289 __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
290 } else {
291 int32_t imm = i.InputOperand(1).immediate();
292 if (imm < 32) {
293 __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
294 } else {
295 __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
296 }
297 }
298 break;
299 case kMips64Ror:
240 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 300 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
241 break; 301 break;
242 case kMipsTst: 302 case kMips64Dror:
243 // Pseudo-instruction used for tst/branch. No opcode emitted here. 303 __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
244 break; 304 break;
245 case kMipsCmp: 305 case kMips64Tst:
306 case kMips64Tst32:
246 // Pseudo-instruction used for cmp/branch. No opcode emitted here. 307 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
247 break; 308 break;
248 case kMipsMov: 309 case kMips64Cmp:
310 case kMips64Cmp32:
311 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
312 break;
313 case kMips64Mov:
249 // TODO(plind): Should we combine mov/li like this, or use separate instr? 314 // TODO(plind): Should we combine mov/li like this, or use separate instr?
250 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType 315 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
251 if (HasRegisterInput(instr, 0)) { 316 if (HasRegisterInput(instr, 0)) {
252 __ mov(i.OutputRegister(), i.InputRegister(0)); 317 __ mov(i.OutputRegister(), i.InputRegister(0));
253 } else { 318 } else {
254 __ li(i.OutputRegister(), i.InputOperand(0)); 319 __ li(i.OutputRegister(), i.InputOperand(0));
255 } 320 }
256 break; 321 break;
257 322
258 case kMipsCmpD: 323 case kMips64CmpD:
259 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. 324 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
260 break; 325 break;
261 case kMipsAddD: 326 case kMips64AddD:
262 // TODO(plind): add special case: combine mult & add. 327 // TODO(plind): add special case: combine mult & add.
263 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 328 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
264 i.InputDoubleRegister(1)); 329 i.InputDoubleRegister(1));
265 break; 330 break;
266 case kMipsSubD: 331 case kMips64SubD:
267 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 332 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
268 i.InputDoubleRegister(1)); 333 i.InputDoubleRegister(1));
269 break; 334 break;
270 case kMipsMulD: 335 case kMips64MulD:
271 // TODO(plind): add special case: right op is -1.0, see arm port. 336 // TODO(plind): add special case: right op is -1.0, see arm port.
272 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 337 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
273 i.InputDoubleRegister(1)); 338 i.InputDoubleRegister(1));
274 break; 339 break;
275 case kMipsDivD: 340 case kMips64DivD:
276 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 341 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
277 i.InputDoubleRegister(1)); 342 i.InputDoubleRegister(1));
278 break; 343 break;
279 case kMipsModD: { 344 case kMips64ModD: {
280 // TODO(bmeurer): We should really get rid of this special instruction, 345 // TODO(bmeurer): We should really get rid of this special instruction,
281 // and generate a CallAddress instruction instead. 346 // and generate a CallAddress instruction instead.
282 FrameScope scope(masm(), StackFrame::MANUAL); 347 FrameScope scope(masm(), StackFrame::MANUAL);
283 __ PrepareCallCFunction(0, 2, kScratchReg); 348 __ PrepareCallCFunction(0, 2, kScratchReg);
284 __ MovToFloatParameters(i.InputDoubleRegister(0), 349 __ MovToFloatParameters(i.InputDoubleRegister(0),
285 i.InputDoubleRegister(1)); 350 i.InputDoubleRegister(1));
286 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), 351 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
287 0, 2); 352 0, 2);
288 // Move the result in the double result register. 353 // Move the result in the double result register.
289 __ MovFromFloatResult(i.OutputDoubleRegister()); 354 __ MovFromFloatResult(i.OutputDoubleRegister());
290 break; 355 break;
291 } 356 }
292 case kMipsSqrtD: { 357 case kMips64FloorD: {
358 __ floor_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
359 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
360 break;
361 }
362 case kMips64CeilD: {
363 __ ceil_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
364 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
365 break;
366 }
367 case kMips64RoundTruncateD: {
368 __ trunc_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
369 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
370 break;
371 }
372 case kMips64SqrtD: {
293 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 373 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
294 break; 374 break;
295 } 375 }
296 case kMipsCvtSD: { 376 case kMips64CvtSD: {
297 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); 377 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
298 break; 378 break;
299 } 379 }
300 case kMipsCvtDS: { 380 case kMips64CvtDS: {
301 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); 381 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
302 break; 382 break;
303 } 383 }
304 case kMipsCvtDW: { 384 case kMips64CvtDW: {
305 FPURegister scratch = kScratchDoubleReg; 385 FPURegister scratch = kScratchDoubleReg;
306 __ mtc1(i.InputRegister(0), scratch); 386 __ mtc1(i.InputRegister(0), scratch);
307 __ cvt_d_w(i.OutputDoubleRegister(), scratch); 387 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
308 break; 388 break;
309 } 389 }
310 case kMipsCvtDUw: { 390 case kMips64CvtDUw: {
311 FPURegister scratch = kScratchDoubleReg; 391 FPURegister scratch = kScratchDoubleReg;
312 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch); 392 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
313 break; 393 break;
314 } 394 }
315 case kMipsTruncWD: { 395 case kMips64TruncWD: {
316 FPURegister scratch = kScratchDoubleReg; 396 FPURegister scratch = kScratchDoubleReg;
317 // Other arches use round to zero here, so we follow. 397 // Other arches use round to zero here, so we follow.
318 __ trunc_w_d(scratch, i.InputDoubleRegister(0)); 398 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
319 __ mfc1(i.OutputRegister(), scratch); 399 __ mfc1(i.OutputRegister(), scratch);
320 break; 400 break;
321 } 401 }
322 case kMipsTruncUwD: { 402 case kMips64TruncUwD: {
323 FPURegister scratch = kScratchDoubleReg; 403 FPURegister scratch = kScratchDoubleReg;
324 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function. 404 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
325 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch); 405 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
326 break; 406 break;
327 } 407 }
328 // ... more basic instructions ... 408 // ... more basic instructions ...
329 409
330 case kMipsLbu: 410 case kMips64Lbu:
331 __ lbu(i.OutputRegister(), i.MemoryOperand()); 411 __ lbu(i.OutputRegister(), i.MemoryOperand());
332 break; 412 break;
333 case kMipsLb: 413 case kMips64Lb:
334 __ lb(i.OutputRegister(), i.MemoryOperand()); 414 __ lb(i.OutputRegister(), i.MemoryOperand());
335 break; 415 break;
336 case kMipsSb: 416 case kMips64Sb:
337 __ sb(i.InputRegister(2), i.MemoryOperand()); 417 __ sb(i.InputRegister(2), i.MemoryOperand());
338 break; 418 break;
339 case kMipsLhu: 419 case kMips64Lhu:
340 __ lhu(i.OutputRegister(), i.MemoryOperand()); 420 __ lhu(i.OutputRegister(), i.MemoryOperand());
341 break; 421 break;
342 case kMipsLh: 422 case kMips64Lh:
343 __ lh(i.OutputRegister(), i.MemoryOperand()); 423 __ lh(i.OutputRegister(), i.MemoryOperand());
344 break; 424 break;
345 case kMipsSh: 425 case kMips64Sh:
346 __ sh(i.InputRegister(2), i.MemoryOperand()); 426 __ sh(i.InputRegister(2), i.MemoryOperand());
347 break; 427 break;
348 case kMipsLw: 428 case kMips64Lw:
349 __ lw(i.OutputRegister(), i.MemoryOperand()); 429 __ lw(i.OutputRegister(), i.MemoryOperand());
350 break; 430 break;
351 case kMipsSw: 431 case kMips64Ld:
432 __ ld(i.OutputRegister(), i.MemoryOperand());
433 break;
434 case kMips64Sw:
352 __ sw(i.InputRegister(2), i.MemoryOperand()); 435 __ sw(i.InputRegister(2), i.MemoryOperand());
353 break; 436 break;
354 case kMipsLwc1: { 437 case kMips64Sd:
438 __ sd(i.InputRegister(2), i.MemoryOperand());
439 break;
440 case kMips64Lwc1: {
355 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand()); 441 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
356 break; 442 break;
357 } 443 }
358 case kMipsSwc1: { 444 case kMips64Swc1: {
359 int index = 0; 445 int index = 0;
360 MemOperand operand = i.MemoryOperand(&index); 446 MemOperand operand = i.MemoryOperand(&index);
361 __ swc1(i.InputSingleRegister(index), operand); 447 __ swc1(i.InputSingleRegister(index), operand);
362 break; 448 break;
363 } 449 }
364 case kMipsLdc1: 450 case kMips64Ldc1:
365 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); 451 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
366 break; 452 break;
367 case kMipsSdc1: 453 case kMips64Sdc1:
368 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand()); 454 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
369 break; 455 break;
370 case kMipsPush: 456 case kMips64Push:
371 __ Push(i.InputRegister(0)); 457 __ Push(i.InputRegister(0));
372 break; 458 break;
373 case kMipsStoreWriteBarrier: 459 case kMips64StoreWriteBarrier:
374 Register object = i.InputRegister(0); 460 Register object = i.InputRegister(0);
375 Register index = i.InputRegister(1); 461 Register index = i.InputRegister(1);
376 Register value = i.InputRegister(2); 462 Register value = i.InputRegister(2);
377 __ addu(index, object, index); 463 __ daddu(index, object, index);
378 __ sw(value, MemOperand(index)); 464 __ sd(value, MemOperand(index));
379 SaveFPRegsMode mode = 465 SaveFPRegsMode mode =
380 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; 466 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
381 RAStatus ra_status = kRAHasNotBeenSaved; 467 RAStatus ra_status = kRAHasNotBeenSaved;
382 __ RecordWrite(object, index, value, ra_status, mode); 468 __ RecordWrite(object, index, value, ra_status, mode);
383 break; 469 break;
384 } 470 }
385 } 471 }
386 472
387 473
388 #define UNSUPPORTED_COND(opcode, condition) \ 474 #define UNSUPPORTED_COND(opcode, condition) \
(...skipping 20 matching lines...) Expand all
409 495
410 // MIPS does not have condition code flags, so compare and branch are 496 // MIPS does not have condition code flags, so compare and branch are
411 // implemented differently than on the other arch's. The compare operations 497 // implemented differently than on the other arch's. The compare operations
412 // emit mips psuedo-instructions, which are handled here by branch 498 // emit mips psuedo-instructions, which are handled here by branch
413 // instructions that do the actual comparison. Essential that the input 499 // instructions that do the actual comparison. Essential that the input
414 // registers to compare psuedo-op are not modified before this branch op, as 500 // registers to compare psuedo-op are not modified before this branch op, as
415 // they are tested here. 501 // they are tested here.
416 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were 502 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
417 // not separated by other instructions. 503 // not separated by other instructions.
418 504
419 if (instr->arch_opcode() == kMipsTst) { 505 if (instr->arch_opcode() == kMips64Tst) {
420 switch (condition) { 506 switch (condition) {
421 case kNotEqual: 507 case kNotEqual:
422 cc = ne; 508 cc = ne;
423 break; 509 break;
424 case kEqual: 510 case kEqual:
425 cc = eq; 511 cc = eq;
426 break; 512 break;
427 default: 513 default:
428 UNSUPPORTED_COND(kMipsTst, condition); 514 UNSUPPORTED_COND(kMips64Tst, condition);
429 break; 515 break;
430 } 516 }
431 __ And(at, i.InputRegister(0), i.InputOperand(1)); 517 __ And(at, i.InputRegister(0), i.InputOperand(1));
432 __ Branch(tlabel, cc, at, Operand(zero_reg)); 518 __ Branch(tlabel, cc, at, Operand(zero_reg));
433 519 } else if (instr->arch_opcode() == kMips64Tst32) {
434 } else if (instr->arch_opcode() == kMipsAddOvf || 520 switch (condition) {
435 instr->arch_opcode() == kMipsSubOvf) { 521 case kNotEqual:
436 // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow. 522 cc = ne;
523 break;
524 case kEqual:
525 cc = eq;
526 break;
527 default:
528 UNSUPPORTED_COND(kMips64Tst32, condition);
529 break;
530 }
531 // Zero-extend registers on MIPS64 only 64-bit operand
532 // branch and compare op. is available.
533 // This is a disadvantage to perform 32-bit operation on MIPS64.
534 // Try to force globally in front-end Word64 representation to be preferred
535 // for MIPS64 even for Word32.
536 __ And(at, i.InputRegister(0), i.InputOperand(1));
537 __ Dext(at, at, 0, 32);
538 __ Branch(tlabel, cc, at, Operand(zero_reg));
539 } else if (instr->arch_opcode() == kMips64Dadd ||
540 instr->arch_opcode() == kMips64Dsub) {
437 switch (condition) { 541 switch (condition) {
438 case kOverflow: 542 case kOverflow:
439 cc = lt; 543 cc = ne;
440 break; 544 break;
441 case kNotOverflow: 545 case kNotOverflow:
442 cc = ge; 546 cc = eq;
443 break; 547 break;
444 default: 548 default:
445 UNSUPPORTED_COND(kMipsAddOvf, condition); 549 UNSUPPORTED_COND(kMips64Dadd, condition);
446 break; 550 break;
447 } 551 }
448 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
449 552
450 } else if (instr->arch_opcode() == kMipsCmp) { 553 __ dsra32(kScratchReg, i.OutputRegister(), 0);
554 __ sra(at, i.OutputRegister(), 31);
555 __ Branch(tlabel, cc, at, Operand(kScratchReg));
556 } else if (instr->arch_opcode() == kMips64Cmp) {
451 switch (condition) { 557 switch (condition) {
452 case kEqual: 558 case kEqual:
453 cc = eq; 559 cc = eq;
454 break; 560 break;
455 case kNotEqual: 561 case kNotEqual:
456 cc = ne; 562 cc = ne;
457 break; 563 break;
458 case kSignedLessThan: 564 case kSignedLessThan:
459 cc = lt; 565 cc = lt;
460 break; 566 break;
(...skipping 12 matching lines...) Expand all
473 case kUnsignedGreaterThanOrEqual: 579 case kUnsignedGreaterThanOrEqual:
474 cc = hs; 580 cc = hs;
475 break; 581 break;
476 case kUnsignedLessThanOrEqual: 582 case kUnsignedLessThanOrEqual:
477 cc = ls; 583 cc = ls;
478 break; 584 break;
479 case kUnsignedGreaterThan: 585 case kUnsignedGreaterThan:
480 cc = hi; 586 cc = hi;
481 break; 587 break;
482 default: 588 default:
483 UNSUPPORTED_COND(kMipsCmp, condition); 589 UNSUPPORTED_COND(kMips64Cmp, condition);
484 break; 590 break;
485 } 591 }
486 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); 592 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
487 593
488 if (!fallthru) __ Branch(flabel); // no fallthru to flabel. 594 if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
489 __ bind(&done); 595 __ bind(&done);
490 596
491 } else if (instr->arch_opcode() == kMipsCmpD) { 597 } else if (instr->arch_opcode() == kMips64Cmp32) {
598 switch (condition) {
599 case kEqual:
600 cc = eq;
601 break;
602 case kNotEqual:
603 cc = ne;
604 break;
605 case kSignedLessThan:
606 cc = lt;
607 break;
608 case kSignedGreaterThanOrEqual:
609 cc = ge;
610 break;
611 case kSignedLessThanOrEqual:
612 cc = le;
613 break;
614 case kSignedGreaterThan:
615 cc = gt;
616 break;
617 case kUnsignedLessThan:
618 cc = lo;
619 break;
620 case kUnsignedGreaterThanOrEqual:
621 cc = hs;
622 break;
623 case kUnsignedLessThanOrEqual:
624 cc = ls;
625 break;
626 case kUnsignedGreaterThan:
627 cc = hi;
628 break;
629 default:
630 UNSUPPORTED_COND(kMips64Cmp32, condition);
631 break;
632 }
633
634 switch (condition) {
635 case kEqual:
636 case kNotEqual:
637 case kSignedLessThan:
638 case kSignedGreaterThanOrEqual:
639 case kSignedLessThanOrEqual:
640 case kSignedGreaterThan:
641 // Sign-extend registers on MIPS64 only 64-bit operand
642 // branch and compare op. is available.
643 __ sll(i.InputRegister(0), i.InputRegister(0), 0);
644 if (instr->InputAt(1)->IsRegister()) {
645 __ sll(i.InputRegister(1), i.InputRegister(1), 0);
646 }
647 break;
648 case kUnsignedLessThan:
649 case kUnsignedGreaterThanOrEqual:
650 case kUnsignedLessThanOrEqual:
651 case kUnsignedGreaterThan:
652 // Zero-extend registers on MIPS64 only 64-bit operand
653 // branch and compare op. is available.
654 __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
655 if (instr->InputAt(1)->IsRegister()) {
656 __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
657 }
658 break;
659 default:
660 UNSUPPORTED_COND(kMips64Cmp, condition);
661 break;
662 }
663 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
664
665 if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
666 __ bind(&done);
667 } else if (instr->arch_opcode() == kMips64CmpD) {
492 // TODO(dusmil) optimize unordered checks to use less instructions 668 // TODO(dusmil) optimize unordered checks to use less instructions
493 // even if we have to unfold BranchF macro. 669 // even if we have to unfold BranchF macro.
494 Label* nan = flabel; 670 Label* nan = flabel;
495 switch (condition) { 671 switch (condition) {
496 case kUnorderedEqual: 672 case kUnorderedEqual:
497 cc = eq; 673 cc = eq;
498 break; 674 break;
499 case kUnorderedNotEqual: 675 case kUnorderedNotEqual:
500 cc = ne; 676 cc = ne;
501 nan = tlabel; 677 nan = tlabel;
502 break; 678 break;
503 case kUnorderedLessThan: 679 case kUnorderedLessThan:
504 cc = lt; 680 cc = lt;
505 break; 681 break;
506 case kUnorderedGreaterThanOrEqual: 682 case kUnorderedGreaterThanOrEqual:
507 cc = ge; 683 cc = ge;
508 nan = tlabel; 684 nan = tlabel;
509 break; 685 break;
510 case kUnorderedLessThanOrEqual: 686 case kUnorderedLessThanOrEqual:
511 cc = le; 687 cc = le;
512 break; 688 break;
513 case kUnorderedGreaterThan: 689 case kUnorderedGreaterThan:
514 cc = gt; 690 cc = gt;
515 nan = tlabel; 691 nan = tlabel;
516 break; 692 break;
517 default: 693 default:
518 UNSUPPORTED_COND(kMipsCmpD, condition); 694 UNSUPPORTED_COND(kMips64CmpD, condition);
519 break; 695 break;
520 } 696 }
521 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0), 697 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
522 i.InputDoubleRegister(1)); 698 i.InputDoubleRegister(1));
523 699
524 if (!fallthru) __ Branch(flabel); // no fallthru to flabel. 700 if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
525 __ bind(&done); 701 __ bind(&done);
526 702
527 } else { 703 } else {
528 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", 704 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
(...skipping 11 matching lines...) Expand all
540 716
541 // Materialize a full 32-bit 1 or 0 value. The result register is always the 717 // Materialize a full 32-bit 1 or 0 value. The result register is always the
542 // last output of the instruction. 718 // last output of the instruction.
543 Label false_value; 719 Label false_value;
544 DCHECK_NE(0, instr->OutputCount()); 720 DCHECK_NE(0, instr->OutputCount());
545 Register result = i.OutputRegister(instr->OutputCount() - 1); 721 Register result = i.OutputRegister(instr->OutputCount() - 1);
546 Condition cc = kNoCondition; 722 Condition cc = kNoCondition;
547 723
548 // MIPS does not have condition code flags, so compare and branch are 724 // MIPS does not have condition code flags, so compare and branch are
549 // implemented differently than on the other arch's. The compare operations 725 // implemented differently than on the other arch's. The compare operations
550 // emit mips psuedo-instructions, which are checked and handled here. 726 // emit mips pseudo-instructions, which are checked and handled here.
551 727
552 // For materializations, we use delay slot to set the result true, and 728 // For materializations, we use delay slot to set the result true, and
553 // in the false case, where we fall thru the branch, we reset the result 729 // in the false case, where we fall through the branch, we reset the result
554 // false. 730 // false.
555 731
556 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were 732 if (instr->arch_opcode() == kMips64Tst) {
557 // not separated by other instructions.
558 if (instr->arch_opcode() == kMipsTst) {
559 switch (condition) { 733 switch (condition) {
560 case kNotEqual: 734 case kNotEqual:
561 cc = ne; 735 cc = ne;
562 break; 736 break;
563 case kEqual: 737 case kEqual:
564 cc = eq; 738 cc = eq;
565 break; 739 break;
566 default: 740 default:
567 UNSUPPORTED_COND(kMipsTst, condition); 741 UNSUPPORTED_COND(kMips64Tst, condition);
568 break; 742 break;
569 } 743 }
570 __ And(at, i.InputRegister(0), i.InputOperand(1)); 744 __ And(at, i.InputRegister(0), i.InputOperand(1));
571 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg)); 745 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
572 __ li(result, Operand(1)); // In delay slot. 746 __ li(result, Operand(1)); // In delay slot.
573 747 } else if (instr->arch_opcode() == kMips64Tst32) {
574 } else if (instr->arch_opcode() == kMipsAddOvf || 748 switch (condition) {
575 instr->arch_opcode() == kMipsSubOvf) { 749 case kNotEqual:
576 // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow. 750 cc = ne;
751 break;
752 case kEqual:
753 cc = eq;
754 break;
755 default:
756 UNSUPPORTED_COND(kMips64Tst, condition);
757 break;
758 }
759 // Zero-extend register on MIPS64 only 64-bit operand
760 // branch and compare op. is available.
761 __ And(at, i.InputRegister(0), i.InputOperand(1));
762 __ Dext(at, at, 0, 32);
763 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
764 __ li(result, Operand(1)); // In delay slot.
765 } else if (instr->arch_opcode() == kMips64Dadd ||
766 instr->arch_opcode() == kMips64Dsub) {
577 switch (condition) { 767 switch (condition) {
578 case kOverflow: 768 case kOverflow:
579 cc = lt; 769 cc = ne;
580 break; 770 break;
581 case kNotOverflow: 771 case kNotOverflow:
582 cc = ge; 772 cc = eq;
583 break; 773 break;
584 default: 774 default:
585 UNSUPPORTED_COND(kMipsAddOvf, condition); 775 UNSUPPORTED_COND(kMips64DAdd, condition);
586 break; 776 break;
587 } 777 }
588 __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg)); 778 __ dsra32(kScratchReg, i.OutputRegister(), 0);
779 __ sra(at, i.OutputRegister(), 31);
780 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
589 __ li(result, Operand(1)); // In delay slot. 781 __ li(result, Operand(1)); // In delay slot.
590 782 } else if (instr->arch_opcode() == kMips64Cmp) {
591
592 } else if (instr->arch_opcode() == kMipsCmp) {
593 Register left = i.InputRegister(0); 783 Register left = i.InputRegister(0);
594 Operand right = i.InputOperand(1); 784 Operand right = i.InputOperand(1);
595 switch (condition) { 785 switch (condition) {
596 case kEqual: 786 case kEqual:
597 cc = eq; 787 cc = eq;
598 break; 788 break;
599 case kNotEqual: 789 case kNotEqual:
600 cc = ne; 790 cc = ne;
601 break; 791 break;
602 case kSignedLessThan: 792 case kSignedLessThan:
(...skipping 14 matching lines...) Expand all
617 case kUnsignedGreaterThanOrEqual: 807 case kUnsignedGreaterThanOrEqual:
618 cc = hs; 808 cc = hs;
619 break; 809 break;
620 case kUnsignedLessThanOrEqual: 810 case kUnsignedLessThanOrEqual:
621 cc = ls; 811 cc = ls;
622 break; 812 break;
623 case kUnsignedGreaterThan: 813 case kUnsignedGreaterThan:
624 cc = hi; 814 cc = hi;
625 break; 815 break;
626 default: 816 default:
627 UNSUPPORTED_COND(kMipsCmp, condition); 817 UNSUPPORTED_COND(kMips64Cmp, condition);
628 break; 818 break;
629 } 819 }
630 __ Branch(USE_DELAY_SLOT, &done, cc, left, right); 820 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
631 __ li(result, Operand(1)); // In delay slot. 821 __ li(result, Operand(1)); // In delay slot.
822 } else if (instr->arch_opcode() == kMips64Cmp32) {
823 Register left = i.InputRegister(0);
824 Operand right = i.InputOperand(1);
825 switch (condition) {
826 case kEqual:
827 cc = eq;
828 break;
829 case kNotEqual:
830 cc = ne;
831 break;
832 case kSignedLessThan:
833 cc = lt;
834 break;
835 case kSignedGreaterThanOrEqual:
836 cc = ge;
837 break;
838 case kSignedLessThanOrEqual:
839 cc = le;
840 break;
841 case kSignedGreaterThan:
842 cc = gt;
843 break;
844 case kUnsignedLessThan:
845 cc = lo;
846 break;
847 case kUnsignedGreaterThanOrEqual:
848 cc = hs;
849 break;
850 case kUnsignedLessThanOrEqual:
851 cc = ls;
852 break;
853 case kUnsignedGreaterThan:
854 cc = hi;
855 break;
856 default:
857 UNSUPPORTED_COND(kMips64Cmp, condition);
858 break;
859 }
632 860
633 } else if (instr->arch_opcode() == kMipsCmpD) { 861 switch (condition) {
862 case kEqual:
863 case kNotEqual:
864 case kSignedLessThan:
865 case kSignedGreaterThanOrEqual:
866 case kSignedLessThanOrEqual:
867 case kSignedGreaterThan:
868 // Sign-extend registers on MIPS64 only 64-bit operand
869 // branch and compare op. is available.
870 __ sll(left, left, 0);
871 if (instr->InputAt(1)->IsRegister()) {
872 __ sll(i.InputRegister(1), i.InputRegister(1), 0);
873 }
874 break;
875 case kUnsignedLessThan:
876 case kUnsignedGreaterThanOrEqual:
877 case kUnsignedLessThanOrEqual:
878 case kUnsignedGreaterThan:
879 // Zero-extend registers on MIPS64 only 64-bit operand
880 // branch and compare op. is available.
881 __ Dext(left, left, 0, 32);
882 if (instr->InputAt(1)->IsRegister()) {
883 __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
884 }
885 break;
886 default:
887 UNSUPPORTED_COND(kMips64Cmp32, condition);
888 break;
889 }
890 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
891 __ li(result, Operand(1)); // In delay slot.
892 } else if (instr->arch_opcode() == kMips64CmpD) {
634 FPURegister left = i.InputDoubleRegister(0); 893 FPURegister left = i.InputDoubleRegister(0);
635 FPURegister right = i.InputDoubleRegister(1); 894 FPURegister right = i.InputDoubleRegister(1);
636 // TODO(plind): Provide NaN-testing macro-asm function without need for 895 // TODO(plind): Provide NaN-testing macro-asm function without need for
637 // BranchF. 896 // BranchF.
638 FPURegister dummy1 = f0; 897 FPURegister dummy1 = f0;
639 FPURegister dummy2 = f2; 898 FPURegister dummy2 = f2;
640 switch (condition) { 899 switch (condition) {
641 case kUnorderedEqual: 900 case kUnorderedEqual:
642 // TODO(plind): improve the NaN testing throughout this function. 901 // TODO(plind): improve the NaN testing throughout this function.
643 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); 902 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
(...skipping 16 matching lines...) Expand all
660 case kUnorderedLessThanOrEqual: 919 case kUnorderedLessThanOrEqual:
661 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2); 920 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
662 cc = le; 921 cc = le;
663 break; 922 break;
664 case kUnorderedGreaterThan: 923 case kUnorderedGreaterThan:
665 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2); 924 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
666 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN. 925 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
667 cc = gt; 926 cc = gt;
668 break; 927 break;
669 default: 928 default:
670 UNSUPPORTED_COND(kMipsCmp, condition); 929 UNSUPPORTED_COND(kMips64Cmp, condition);
671 break; 930 break;
672 } 931 }
673 __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right); 932 __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
674 __ li(result, Operand(1)); // In delay slot - branch taken returns 1. 933 __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
675 // Fall-thru (branch not taken) returns 0. 934 // Fall-thru (branch not taken) returns 0.
676 935
677 } else { 936 } else {
678 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", 937 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
679 instr->arch_opcode()); 938 instr->arch_opcode());
680 TRACE_UNIMPL(); 939 TRACE_UNIMPL();
681 UNIMPLEMENTED(); 940 UNIMPLEMENTED();
682 } 941 }
683 // Fallthru case is the false materialization. 942 // Fallthru case is the false materialization.
684 __ bind(&false_value); 943 __ bind(&false_value);
685 __ li(result, Operand(0)); 944 __ li(result, Operand(static_cast<int64_t>(0)));
686 __ bind(&done); 945 __ bind(&done);
687 } 946 }
688 947
689 948
690 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) { 949 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
691 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( 950 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
692 isolate(), deoptimization_id, Deoptimizer::LAZY); 951 isolate(), deoptimization_id, Deoptimizer::LAZY);
693 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); 952 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
694 } 953 }
695 954
(...skipping 12 matching lines...) Expand all
708 register_save_area_size += kPointerSize; 967 register_save_area_size += kPointerSize;
709 } 968 }
710 frame()->SetRegisterSaveAreaSize(register_save_area_size); 969 frame()->SetRegisterSaveAreaSize(register_save_area_size);
711 __ MultiPush(saves); 970 __ MultiPush(saves);
712 } 971 }
713 } else if (descriptor->IsJSFunctionCall()) { 972 } else if (descriptor->IsJSFunctionCall()) {
714 CompilationInfo* info = this->info(); 973 CompilationInfo* info = this->info();
715 __ Prologue(info->IsCodePreAgingActive()); 974 __ Prologue(info->IsCodePreAgingActive());
716 frame()->SetRegisterSaveAreaSize( 975 frame()->SetRegisterSaveAreaSize(
717 StandardFrameConstants::kFixedFrameSizeFromFp); 976 StandardFrameConstants::kFixedFrameSizeFromFp);
977
978 // Sloppy mode functions and builtins need to replace the receiver with the
979 // global proxy when called as functions (without an explicit receiver
980 // object).
981 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
982 if (info->strict_mode() == SLOPPY && !info->is_native()) {
983 Label ok;
984 // +2 for return address and saved frame pointer.
985 int receiver_slot = info->scope()->num_parameters() + 2;
986 __ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
987 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
988 __ Branch(&ok, ne, a2, Operand(at));
989
990 __ ld(a2, GlobalObjectOperand());
991 __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
992 __ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
993 __ bind(&ok);
994 }
718 } else { 995 } else {
719 __ StubPrologue(); 996 __ StubPrologue();
720 frame()->SetRegisterSaveAreaSize( 997 frame()->SetRegisterSaveAreaSize(
721 StandardFrameConstants::kFixedFrameSizeFromFp); 998 StandardFrameConstants::kFixedFrameSizeFromFp);
722 } 999 }
723 int stack_slots = frame()->GetSpillSlotCount(); 1000 int stack_slots = frame()->GetSpillSlotCount();
724 if (stack_slots > 0) { 1001 if (stack_slots > 0) {
725 __ Subu(sp, sp, Operand(stack_slots * kPointerSize)); 1002 __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
726 } 1003 }
727 } 1004 }
728 1005
729 1006
730 void CodeGenerator::AssembleReturn() { 1007 void CodeGenerator::AssembleReturn() {
731 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); 1008 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
732 if (descriptor->kind() == CallDescriptor::kCallAddress) { 1009 if (descriptor->kind() == CallDescriptor::kCallAddress) {
733 if (frame()->GetRegisterSaveAreaSize() > 0) { 1010 if (frame()->GetRegisterSaveAreaSize() > 0) {
734 // Remove this frame's spill slots first. 1011 // Remove this frame's spill slots first.
735 int stack_slots = frame()->GetSpillSlotCount(); 1012 int stack_slots = frame()->GetSpillSlotCount();
736 if (stack_slots > 0) { 1013 if (stack_slots > 0) {
737 __ Addu(sp, sp, Operand(stack_slots * kPointerSize)); 1014 __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
738 } 1015 }
739 // Restore registers. 1016 // Restore registers.
740 const RegList saves = descriptor->CalleeSavedRegisters(); 1017 const RegList saves = descriptor->CalleeSavedRegisters();
741 if (saves != 0) { 1018 if (saves != 0) {
742 __ MultiPop(saves); 1019 __ MultiPop(saves);
743 } 1020 }
744 } 1021 }
745 __ mov(sp, fp); 1022 __ mov(sp, fp);
746 __ Pop(ra, fp); 1023 __ Pop(ra, fp);
747 __ Ret(); 1024 __ Ret();
(...skipping 12 matching lines...) Expand all
760 InstructionOperand* destination) { 1037 InstructionOperand* destination) {
761 MipsOperandConverter g(this, NULL); 1038 MipsOperandConverter g(this, NULL);
762 // Dispatch on the source and destination operand kinds. Not all 1039 // Dispatch on the source and destination operand kinds. Not all
763 // combinations are possible. 1040 // combinations are possible.
764 if (source->IsRegister()) { 1041 if (source->IsRegister()) {
765 DCHECK(destination->IsRegister() || destination->IsStackSlot()); 1042 DCHECK(destination->IsRegister() || destination->IsStackSlot());
766 Register src = g.ToRegister(source); 1043 Register src = g.ToRegister(source);
767 if (destination->IsRegister()) { 1044 if (destination->IsRegister()) {
768 __ mov(g.ToRegister(destination), src); 1045 __ mov(g.ToRegister(destination), src);
769 } else { 1046 } else {
770 __ sw(src, g.ToMemOperand(destination)); 1047 __ sd(src, g.ToMemOperand(destination));
771 } 1048 }
772 } else if (source->IsStackSlot()) { 1049 } else if (source->IsStackSlot()) {
773 DCHECK(destination->IsRegister() || destination->IsStackSlot()); 1050 DCHECK(destination->IsRegister() || destination->IsStackSlot());
774 MemOperand src = g.ToMemOperand(source); 1051 MemOperand src = g.ToMemOperand(source);
775 if (destination->IsRegister()) { 1052 if (destination->IsRegister()) {
776 __ lw(g.ToRegister(destination), src); 1053 __ ld(g.ToRegister(destination), src);
777 } else { 1054 } else {
778 Register temp = kScratchReg; 1055 Register temp = kScratchReg;
779 __ lw(temp, src); 1056 __ ld(temp, src);
780 __ sw(temp, g.ToMemOperand(destination)); 1057 __ sd(temp, g.ToMemOperand(destination));
781 } 1058 }
782 } else if (source->IsConstant()) { 1059 } else if (source->IsConstant()) {
783 Constant src = g.ToConstant(source); 1060 Constant src = g.ToConstant(source);
784 if (destination->IsRegister() || destination->IsStackSlot()) { 1061 if (destination->IsRegister() || destination->IsStackSlot()) {
785 Register dst = 1062 Register dst =
786 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; 1063 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
787 switch (src.type()) { 1064 switch (src.type()) {
788 case Constant::kInt32: 1065 case Constant::kInt32:
789 __ li(dst, Operand(src.ToInt32())); 1066 __ li(dst, Operand(src.ToInt32()));
790 break; 1067 break;
791 case Constant::kFloat32: 1068 case Constant::kFloat32:
792 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED)); 1069 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
793 break; 1070 break;
794 case Constant::kInt64: 1071 case Constant::kInt64:
795 UNREACHABLE(); 1072 __ li(dst, Operand(src.ToInt64()));
796 break; 1073 break;
797 case Constant::kFloat64: 1074 case Constant::kFloat64:
798 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED)); 1075 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
799 break; 1076 break;
800 case Constant::kExternalReference: 1077 case Constant::kExternalReference:
801 __ li(dst, Operand(src.ToExternalReference())); 1078 __ li(dst, Operand(src.ToExternalReference()));
802 break; 1079 break;
803 case Constant::kHeapObject: 1080 case Constant::kHeapObject:
804 __ li(dst, src.ToHeapObject()); 1081 __ li(dst, src.ToHeapObject());
805 break; 1082 break;
806 } 1083 }
807 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination)); 1084 if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
808 } else if (src.type() == Constant::kFloat32) { 1085 } else if (src.type() == Constant::kFloat32) {
809 FPURegister dst = destination->IsDoubleRegister() 1086 FPURegister dst = destination->IsDoubleRegister()
810 ? g.ToDoubleRegister(destination) 1087 ? g.ToDoubleRegister(destination)
811 : kScratchDoubleReg.low(); 1088 : kScratchDoubleReg.low();
812 // TODO(turbofan): Can we do better here? 1089 // TODO(turbofan): Can we do better here?
813 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32()))); 1090 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
814 __ mtc1(at, dst); 1091 __ mtc1(at, dst);
815 if (destination->IsDoubleStackSlot()) { 1092 if (destination->IsDoubleStackSlot()) {
816 __ swc1(dst, g.ToMemOperand(destination)); 1093 __ swc1(dst, g.ToMemOperand(destination));
817 } 1094 }
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
861 Register src = g.ToRegister(source); 1138 Register src = g.ToRegister(source);
862 if (destination->IsRegister()) { 1139 if (destination->IsRegister()) {
863 Register dst = g.ToRegister(destination); 1140 Register dst = g.ToRegister(destination);
864 __ Move(temp, src); 1141 __ Move(temp, src);
865 __ Move(src, dst); 1142 __ Move(src, dst);
866 __ Move(dst, temp); 1143 __ Move(dst, temp);
867 } else { 1144 } else {
868 DCHECK(destination->IsStackSlot()); 1145 DCHECK(destination->IsStackSlot());
869 MemOperand dst = g.ToMemOperand(destination); 1146 MemOperand dst = g.ToMemOperand(destination);
870 __ mov(temp, src); 1147 __ mov(temp, src);
871 __ lw(src, dst); 1148 __ ld(src, dst);
872 __ sw(temp, dst); 1149 __ sd(temp, dst);
873 } 1150 }
874 } else if (source->IsStackSlot()) { 1151 } else if (source->IsStackSlot()) {
875 DCHECK(destination->IsStackSlot()); 1152 DCHECK(destination->IsStackSlot());
876 Register temp_0 = kScratchReg; 1153 Register temp_0 = kScratchReg;
877 Register temp_1 = kCompareReg; 1154 Register temp_1 = kScratchReg2;
878 MemOperand src = g.ToMemOperand(source); 1155 MemOperand src = g.ToMemOperand(source);
879 MemOperand dst = g.ToMemOperand(destination); 1156 MemOperand dst = g.ToMemOperand(destination);
880 __ lw(temp_0, src); 1157 __ ld(temp_0, src);
881 __ lw(temp_1, dst); 1158 __ ld(temp_1, dst);
882 __ sw(temp_0, dst); 1159 __ sd(temp_0, dst);
883 __ sw(temp_1, src); 1160 __ sd(temp_1, src);
884 } else if (source->IsDoubleRegister()) { 1161 } else if (source->IsDoubleRegister()) {
885 FPURegister temp = kScratchDoubleReg; 1162 FPURegister temp = kScratchDoubleReg;
886 FPURegister src = g.ToDoubleRegister(source); 1163 FPURegister src = g.ToDoubleRegister(source);
887 if (destination->IsDoubleRegister()) { 1164 if (destination->IsDoubleRegister()) {
888 FPURegister dst = g.ToDoubleRegister(destination); 1165 FPURegister dst = g.ToDoubleRegister(destination);
889 __ Move(temp, src); 1166 __ Move(temp, src);
890 __ Move(src, dst); 1167 __ Move(src, dst);
891 __ Move(dst, temp); 1168 __ Move(dst, temp);
892 } else { 1169 } else {
893 DCHECK(destination->IsDoubleStackSlot()); 1170 DCHECK(destination->IsDoubleStackSlot());
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
943 } 1220 }
944 } 1221 }
945 MarkLazyDeoptSite(); 1222 MarkLazyDeoptSite();
946 } 1223 }
947 1224
948 #undef __ 1225 #undef __
949 1226
950 } // namespace compiler 1227 } // namespace compiler
951 } // namespace internal 1228 } // namespace internal
952 } // namespace v8 1229 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/mips64/OWNERS ('k') | src/compiler/mips64/instruction-codes-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698