OLD | NEW |
| (Empty) |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE file. | |
4 | |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. | |
6 #if defined(TARGET_ARCH_MIPS) | |
7 | |
8 #include "vm/intermediate_language.h" | |
9 | |
10 #include "vm/compiler.h" | |
11 #include "vm/dart_entry.h" | |
12 #include "vm/flow_graph.h" | |
13 #include "vm/flow_graph_compiler.h" | |
14 #include "vm/flow_graph_range_analysis.h" | |
15 #include "vm/instructions.h" | |
16 #include "vm/locations.h" | |
17 #include "vm/object_store.h" | |
18 #include "vm/parser.h" | |
19 #include "vm/simulator.h" | |
20 #include "vm/stack_frame.h" | |
21 #include "vm/stub_code.h" | |
22 #include "vm/symbols.h" | |
23 | |
24 #define __ compiler->assembler()-> | |
25 #define Z (compiler->zone()) | |
26 | |
27 namespace dart { | |
28 | |
29 // Generic summary for call instructions that have all arguments pushed | |
30 // on the stack and return the result in a fixed register V0. | |
31 LocationSummary* Instruction::MakeCallSummary(Zone* zone) { | |
32 LocationSummary* result = | |
33 new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); | |
34 result->set_out(0, Location::RegisterLocation(V0)); | |
35 return result; | |
36 } | |
37 | |
38 | |
39 LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, | |
40 bool opt) const { | |
41 const intptr_t kNumInputs = 1; | |
42 const intptr_t kNumTemps = 0; | |
43 LocationSummary* locs = new (zone) | |
44 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
45 locs->set_in(0, Location::AnyOrConstant(value())); | |
46 return locs; | |
47 } | |
48 | |
49 | |
50 void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
51 // In SSA mode, we need an explicit push. Nothing to do in non-SSA mode | |
52 // where PushArgument is handled by BindInstr::EmitNativeCode. | |
53 __ Comment("PushArgumentInstr"); | |
54 if (compiler->is_optimizing()) { | |
55 Location value = locs()->in(0); | |
56 if (value.IsRegister()) { | |
57 __ Push(value.reg()); | |
58 } else if (value.IsConstant()) { | |
59 __ PushObject(value.constant()); | |
60 } else { | |
61 ASSERT(value.IsStackSlot()); | |
62 const intptr_t value_offset = value.ToStackSlotOffset(); | |
63 __ LoadFromOffset(TMP, FP, value_offset); | |
64 __ Push(TMP); | |
65 } | |
66 } | |
67 } | |
68 | |
69 | |
70 LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
71 const intptr_t kNumInputs = 1; | |
72 const intptr_t kNumTemps = 0; | |
73 LocationSummary* locs = new (zone) | |
74 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
75 locs->set_in(0, Location::RegisterLocation(V0)); | |
76 return locs; | |
77 } | |
78 | |
79 | |
80 // Attempt optimized compilation at return instruction instead of at the entry. | |
81 // The entry needs to be patchable, no inlined objects are allowed in the area | |
82 // that will be overwritten by the patch instructions: a branch macro sequence. | |
83 void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
84 __ Comment("ReturnInstr"); | |
85 Register result = locs()->in(0).reg(); | |
86 ASSERT(result == V0); | |
87 | |
88 if (compiler->intrinsic_mode()) { | |
89 // Intrinsics don't have a frame. | |
90 __ Ret(); | |
91 return; | |
92 } | |
93 | |
94 #if defined(DEBUG) | |
95 Label stack_ok; | |
96 __ Comment("Stack Check"); | |
97 const intptr_t fp_sp_dist = | |
98 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; | |
99 ASSERT(fp_sp_dist <= 0); | |
100 __ subu(CMPRES1, SP, FP); | |
101 | |
102 __ BranchEqual(CMPRES1, Immediate(fp_sp_dist), &stack_ok); | |
103 __ break_(0); | |
104 | |
105 __ Bind(&stack_ok); | |
106 #endif | |
107 __ LeaveDartFrameAndReturn(); | |
108 } | |
109 | |
110 | |
111 static Condition NegateCondition(Condition condition) { | |
112 switch (condition.rel_op()) { | |
113 case AL: | |
114 condition.set_rel_op(NV); | |
115 break; | |
116 case NV: | |
117 condition.set_rel_op(AL); | |
118 break; | |
119 case EQ: | |
120 condition.set_rel_op(NE); | |
121 break; | |
122 case NE: | |
123 condition.set_rel_op(EQ); | |
124 break; | |
125 case LT: | |
126 condition.set_rel_op(GE); | |
127 break; | |
128 case LE: | |
129 condition.set_rel_op(GT); | |
130 break; | |
131 case GT: | |
132 condition.set_rel_op(LE); | |
133 break; | |
134 case GE: | |
135 condition.set_rel_op(LT); | |
136 break; | |
137 case ULT: | |
138 condition.set_rel_op(UGE); | |
139 break; | |
140 case ULE: | |
141 condition.set_rel_op(UGT); | |
142 break; | |
143 case UGT: | |
144 condition.set_rel_op(ULE); | |
145 break; | |
146 case UGE: | |
147 condition.set_rel_op(ULT); | |
148 break; | |
149 default: | |
150 UNREACHABLE(); | |
151 } | |
152 return condition; | |
153 } | |
154 | |
155 | |
156 LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone, | |
157 bool opt) const { | |
158 comparison()->InitializeLocationSummary(zone, opt); | |
159 return comparison()->locs(); | |
160 } | |
161 | |
162 | |
163 void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
164 const Register result = locs()->out(0).reg(); | |
165 | |
166 intptr_t true_value = if_true_; | |
167 intptr_t false_value = if_false_; | |
168 bool swapped = false; | |
169 if (true_value == 0) { | |
170 // Swap values so that false_value is zero. | |
171 intptr_t temp = true_value; | |
172 true_value = false_value; | |
173 false_value = temp; | |
174 swapped = true; | |
175 } | |
176 | |
177 // Initialize result with the true value. | |
178 __ LoadImmediate(result, Smi::RawValue(true_value)); | |
179 | |
180 // Emit comparison code. This must not overwrite the result register. | |
181 BranchLabels labels = {NULL, NULL, NULL}; // Emit branch-free code. | |
182 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels); | |
183 if (swapped) { | |
184 true_condition = NegateCondition(true_condition); | |
185 } | |
186 | |
187 // Evaluate condition and provide result in CMPRES1. | |
188 Register left = true_condition.left(); | |
189 Register right = true_condition.right(); | |
190 bool zero_is_false = true; // Zero in CMPRES1 indicates a false condition. | |
191 switch (true_condition.rel_op()) { | |
192 case AL: | |
193 return; // Result holds true_value. | |
194 case NV: | |
195 __ LoadImmediate(result, false_value); | |
196 return; | |
197 case EQ: | |
198 zero_is_false = false; | |
199 // fall through. | |
200 case NE: { | |
201 if (left == IMM) { | |
202 __ XorImmediate(CMPRES1, right, true_condition.imm()); | |
203 } else if (right == IMM) { | |
204 __ XorImmediate(CMPRES1, left, true_condition.imm()); | |
205 } else { | |
206 __ xor_(CMPRES1, left, right); | |
207 } | |
208 break; | |
209 } | |
210 case GE: | |
211 zero_is_false = false; | |
212 // fall through. | |
213 case LT: { | |
214 if (left == IMM) { | |
215 __ slti(CMPRES1, right, Immediate(true_condition.imm() + 1)); | |
216 zero_is_false = !zero_is_false; | |
217 } else if (right == IMM) { | |
218 __ slti(CMPRES1, left, Immediate(true_condition.imm())); | |
219 } else { | |
220 __ slt(CMPRES1, left, right); | |
221 } | |
222 break; | |
223 } | |
224 case LE: | |
225 zero_is_false = false; | |
226 // fall through. | |
227 case GT: { | |
228 if (left == IMM) { | |
229 __ slti(CMPRES1, right, Immediate(true_condition.imm())); | |
230 } else if (right == IMM) { | |
231 __ slti(CMPRES1, left, Immediate(true_condition.imm() + 1)); | |
232 zero_is_false = !zero_is_false; | |
233 } else { | |
234 __ slt(CMPRES1, right, left); | |
235 } | |
236 break; | |
237 } | |
238 case UGE: | |
239 zero_is_false = false; | |
240 // fall through. | |
241 case ULT: { | |
242 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
243 __ sltu(CMPRES1, left, right); | |
244 break; | |
245 } | |
246 case ULE: | |
247 zero_is_false = false; | |
248 // fall through. | |
249 case UGT: { | |
250 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
251 __ sltu(CMPRES1, right, left); | |
252 break; | |
253 } | |
254 default: | |
255 UNREACHABLE(); | |
256 } | |
257 | |
258 // CMPRES1 is the evaluated condition, zero or non-zero, as specified by the | |
259 // flag zero_is_false. | |
260 Register false_value_reg; | |
261 if (false_value == 0) { | |
262 false_value_reg = ZR; | |
263 } else { | |
264 __ LoadImmediate(CMPRES2, Smi::RawValue(false_value)); | |
265 false_value_reg = CMPRES2; | |
266 } | |
267 if (zero_is_false) { | |
268 __ movz(result, false_value_reg, CMPRES1); | |
269 } else { | |
270 __ movn(result, false_value_reg, CMPRES1); | |
271 } | |
272 } | |
273 | |
274 | |
275 LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone, | |
276 bool opt) const { | |
277 const intptr_t kNumInputs = 1; | |
278 const intptr_t kNumTemps = 0; | |
279 LocationSummary* summary = new (zone) | |
280 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
281 summary->set_in(0, Location::RegisterLocation(T0)); // Function. | |
282 summary->set_out(0, Location::RegisterLocation(V0)); | |
283 return summary; | |
284 } | |
285 | |
286 | |
287 void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
288 // Load arguments descriptor in S4. | |
289 const intptr_t argument_count = ArgumentCount(); // Includes type args. | |
290 const Array& arguments_descriptor = | |
291 Array::ZoneHandle(Z, GetArgumentsDescriptor()); | |
292 __ LoadObject(S4, arguments_descriptor); | |
293 | |
294 // Load closure function code in T2. | |
295 // S4: arguments descriptor array. | |
296 // S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value). | |
297 ASSERT(locs()->in(0).reg() == T0); | |
298 __ LoadImmediate(S5, 0); | |
299 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | |
300 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
301 __ jalr(T2); | |
302 compiler->RecordSafepoint(locs()); | |
303 compiler->EmitCatchEntryState(); | |
304 // Marks either the continuation point in unoptimized code or the | |
305 // deoptimization point in optimized code, after call. | |
306 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id()); | |
307 if (compiler->is_optimizing()) { | |
308 compiler->AddDeoptIndexAtCall(deopt_id_after); | |
309 } | |
310 // Add deoptimization continuation point after the call and before the | |
311 // arguments are removed. | |
312 // In optimized code this descriptor is needed for exception handling. | |
313 compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, | |
314 token_pos()); | |
315 __ Drop(argument_count); | |
316 } | |
317 | |
318 | |
319 LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone, | |
320 bool opt) const { | |
321 return LocationSummary::Make(zone, 0, Location::RequiresRegister(), | |
322 LocationSummary::kNoCall); | |
323 } | |
324 | |
325 | |
326 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
327 __ Comment("LoadLocalInstr"); | |
328 Register result = locs()->out(0).reg(); | |
329 __ LoadFromOffset(result, FP, local().index() * kWordSize); | |
330 } | |
331 | |
332 | |
333 LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone, | |
334 bool opt) const { | |
335 return LocationSummary::Make(zone, 1, Location::SameAsFirstInput(), | |
336 LocationSummary::kNoCall); | |
337 } | |
338 | |
339 | |
340 void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
341 __ Comment("StoreLocalInstr"); | |
342 Register value = locs()->in(0).reg(); | |
343 Register result = locs()->out(0).reg(); | |
344 ASSERT(result == value); // Assert that register assignment is correct. | |
345 __ StoreToOffset(value, FP, local().index() * kWordSize); | |
346 } | |
347 | |
348 | |
349 LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone, | |
350 bool opt) const { | |
351 return LocationSummary::Make(zone, 0, Location::RequiresRegister(), | |
352 LocationSummary::kNoCall); | |
353 } | |
354 | |
355 | |
356 void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
357 // The register allocator drops constant definitions that have no uses. | |
358 if (!locs()->out(0).IsInvalid()) { | |
359 __ Comment("ConstantInstr"); | |
360 Register result = locs()->out(0).reg(); | |
361 __ LoadObject(result, value()); | |
362 } | |
363 } | |
364 | |
365 | |
366 LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone, | |
367 bool opt) const { | |
368 const intptr_t kNumInputs = 0; | |
369 const intptr_t kNumTemps = (representation_ == kUnboxedInt32) ? 0 : 1; | |
370 LocationSummary* locs = new (zone) | |
371 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
372 if (representation_ == kUnboxedInt32) { | |
373 locs->set_out(0, Location::RequiresRegister()); | |
374 } else { | |
375 ASSERT(representation_ == kUnboxedDouble); | |
376 locs->set_out(0, Location::RequiresFpuRegister()); | |
377 } | |
378 if (kNumTemps > 0) { | |
379 locs->set_temp(0, Location::RequiresRegister()); | |
380 } | |
381 return locs; | |
382 } | |
383 | |
384 | |
385 void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
386 // The register allocator drops constant definitions that have no uses. | |
387 if (!locs()->out(0).IsInvalid()) { | |
388 switch (representation_) { | |
389 case kUnboxedDouble: { | |
390 ASSERT(value().IsDouble()); | |
391 const Register const_value = locs()->temp(0).reg(); | |
392 const DRegister result = locs()->out(0).fpu_reg(); | |
393 __ LoadObject(const_value, value()); | |
394 __ LoadDFromOffset(result, const_value, | |
395 Double::value_offset() - kHeapObjectTag); | |
396 break; | |
397 } | |
398 | |
399 case kUnboxedInt32: | |
400 __ LoadImmediate(locs()->out(0).reg(), Smi::Cast(value()).Value()); | |
401 break; | |
402 | |
403 default: | |
404 UNREACHABLE(); | |
405 } | |
406 } | |
407 } | |
408 | |
409 | |
410 LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone, | |
411 bool opt) const { | |
412 const intptr_t kNumInputs = 3; | |
413 const intptr_t kNumTemps = 0; | |
414 LocationSummary* summary = new (zone) | |
415 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
416 summary->set_in(0, Location::RegisterLocation(A0)); // Value. | |
417 summary->set_in(1, Location::RegisterLocation(A1)); // Instant. type args. | |
418 summary->set_in(2, Location::RegisterLocation(A2)); // Function type args. | |
419 summary->set_out(0, Location::RegisterLocation(A0)); | |
420 return summary; | |
421 } | |
422 | |
423 | |
424 LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone, | |
425 bool opt) const { | |
426 const intptr_t kNumInputs = 1; | |
427 const intptr_t kNumTemps = 0; | |
428 LocationSummary* locs = new (zone) | |
429 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
430 locs->set_in(0, Location::RegisterLocation(A0)); | |
431 locs->set_out(0, Location::RegisterLocation(A0)); | |
432 return locs; | |
433 } | |
434 | |
435 | |
436 static void EmitAssertBoolean(Register reg, | |
437 TokenPosition token_pos, | |
438 intptr_t deopt_id, | |
439 LocationSummary* locs, | |
440 FlowGraphCompiler* compiler) { | |
441 // Check that the type of the value is allowed in conditional context. | |
442 // Call the runtime if the object is not bool::true or bool::false. | |
443 ASSERT(locs->always_calls()); | |
444 Label done; | |
445 | |
446 if (Isolate::Current()->type_checks()) { | |
447 __ BranchEqual(reg, Bool::True(), &done); | |
448 __ BranchEqual(reg, Bool::False(), &done); | |
449 } else { | |
450 ASSERT(Isolate::Current()->asserts()); | |
451 __ BranchNotEqual(reg, Object::null_instance(), &done); | |
452 } | |
453 | |
454 __ Push(reg); // Push the source object. | |
455 compiler->GenerateRuntimeCall(token_pos, deopt_id, | |
456 kNonBoolTypeErrorRuntimeEntry, 1, locs); | |
457 // We should never return here. | |
458 __ break_(0); | |
459 __ Bind(&done); | |
460 } | |
461 | |
462 | |
463 void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
464 Register obj = locs()->in(0).reg(); | |
465 Register result = locs()->out(0).reg(); | |
466 | |
467 __ Comment("AssertBooleanInstr"); | |
468 EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler); | |
469 ASSERT(obj == result); | |
470 } | |
471 | |
472 | |
473 LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone, | |
474 bool opt) const { | |
475 const intptr_t kNumInputs = 2; | |
476 if (operation_cid() == kMintCid) { | |
477 const intptr_t kNumTemps = 0; | |
478 LocationSummary* locs = new (zone) | |
479 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
480 locs->set_in(0, Location::Pair(Location::RequiresRegister(), | |
481 Location::RequiresRegister())); | |
482 locs->set_in(1, Location::Pair(Location::RequiresRegister(), | |
483 Location::RequiresRegister())); | |
484 locs->set_out(0, Location::RequiresRegister()); | |
485 return locs; | |
486 } | |
487 if (operation_cid() == kDoubleCid) { | |
488 const intptr_t kNumTemps = 0; | |
489 LocationSummary* locs = new (zone) | |
490 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
491 locs->set_in(0, Location::RequiresFpuRegister()); | |
492 locs->set_in(1, Location::RequiresFpuRegister()); | |
493 locs->set_out(0, Location::RequiresRegister()); | |
494 return locs; | |
495 } | |
496 if (operation_cid() == kSmiCid) { | |
497 const intptr_t kNumTemps = 0; | |
498 LocationSummary* locs = new (zone) | |
499 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
500 locs->set_in(0, Location::RegisterOrConstant(left())); | |
501 // Only one input can be a constant operand. The case of two constant | |
502 // operands should be handled by constant propagation. | |
503 locs->set_in(1, locs->in(0).IsConstant() | |
504 ? Location::RequiresRegister() | |
505 : Location::RegisterOrConstant(right())); | |
506 locs->set_out(0, Location::RequiresRegister()); | |
507 return locs; | |
508 } | |
509 UNREACHABLE(); | |
510 return NULL; | |
511 } | |
512 | |
513 | |
514 static void LoadValueCid(FlowGraphCompiler* compiler, | |
515 Register value_cid_reg, | |
516 Register value_reg, | |
517 Label* value_is_smi = NULL) { | |
518 __ Comment("LoadValueCid"); | |
519 Label done; | |
520 if (value_is_smi == NULL) { | |
521 __ LoadImmediate(value_cid_reg, kSmiCid); | |
522 } | |
523 __ andi(CMPRES1, value_reg, Immediate(kSmiTagMask)); | |
524 if (value_is_smi == NULL) { | |
525 __ beq(CMPRES1, ZR, &done); | |
526 } else { | |
527 __ beq(CMPRES1, ZR, value_is_smi); | |
528 } | |
529 __ LoadClassId(value_cid_reg, value_reg); | |
530 __ Bind(&done); | |
531 } | |
532 | |
533 | |
534 static RelationOperator TokenKindToIntRelOp(Token::Kind kind) { | |
535 switch (kind) { | |
536 case Token::kEQ: | |
537 return EQ; | |
538 case Token::kNE: | |
539 return NE; | |
540 case Token::kLT: | |
541 return LT; | |
542 case Token::kGT: | |
543 return GT; | |
544 case Token::kLTE: | |
545 return LE; | |
546 case Token::kGTE: | |
547 return GE; | |
548 default: | |
549 UNREACHABLE(); | |
550 return NV; | |
551 } | |
552 } | |
553 | |
554 | |
555 static RelationOperator TokenKindToUintRelOp(Token::Kind kind) { | |
556 switch (kind) { | |
557 case Token::kEQ: | |
558 return EQ; | |
559 case Token::kNE: | |
560 return NE; | |
561 case Token::kLT: | |
562 return ULT; | |
563 case Token::kGT: | |
564 return UGT; | |
565 case Token::kLTE: | |
566 return ULE; | |
567 case Token::kGTE: | |
568 return UGE; | |
569 default: | |
570 UNREACHABLE(); | |
571 return NV; | |
572 } | |
573 } | |
574 | |
575 | |
576 // The comparison code to emit is specified by true_condition. | |
577 static void EmitBranchOnCondition(FlowGraphCompiler* compiler, | |
578 Condition true_condition, | |
579 BranchLabels labels) { | |
580 __ Comment("ControlInstruction::EmitBranchOnCondition"); | |
581 if (labels.fall_through == labels.false_label) { | |
582 // If the next block is the false successor, fall through to it. | |
583 __ BranchOnCondition(true_condition, labels.true_label); | |
584 } else { | |
585 // If the next block is not the false successor, branch to it. | |
586 Condition false_condition = NegateCondition(true_condition); | |
587 __ BranchOnCondition(false_condition, labels.false_label); | |
588 // Fall through or jump to the true successor. | |
589 if (labels.fall_through != labels.true_label) { | |
590 __ b(labels.true_label); | |
591 } | |
592 } | |
593 } | |
594 | |
595 | |
596 static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler, | |
597 const LocationSummary& locs, | |
598 Token::Kind kind) { | |
599 __ Comment("EmitSmiComparisonOp"); | |
600 const Location left = locs.in(0); | |
601 const Location right = locs.in(1); | |
602 ASSERT(!left.IsConstant() || !right.IsConstant()); | |
603 ASSERT(left.IsRegister() || left.IsConstant()); | |
604 ASSERT(right.IsRegister() || right.IsConstant()); | |
605 | |
606 int16_t imm = 0; | |
607 const Register left_reg = | |
608 left.IsRegister() ? left.reg() : __ LoadConditionOperand( | |
609 CMPRES1, left.constant(), &imm); | |
610 const Register right_reg = | |
611 right.IsRegister() ? right.reg() : __ LoadConditionOperand( | |
612 CMPRES2, right.constant(), &imm); | |
613 return Condition(left_reg, right_reg, TokenKindToIntRelOp(kind), imm); | |
614 } | |
615 | |
616 | |
617 static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler, | |
618 const LocationSummary& locs, | |
619 Token::Kind kind, | |
620 BranchLabels labels) { | |
621 __ Comment("EmitUnboxedMintEqualityOp"); | |
622 ASSERT(Token::IsEqualityOperator(kind)); | |
623 PairLocation* left_pair = locs.in(0).AsPairLocation(); | |
624 Register left_lo = left_pair->At(0).reg(); | |
625 Register left_hi = left_pair->At(1).reg(); | |
626 PairLocation* right_pair = locs.in(1).AsPairLocation(); | |
627 Register right_lo = right_pair->At(0).reg(); | |
628 Register right_hi = right_pair->At(1).reg(); | |
629 | |
630 if (labels.false_label == NULL) { | |
631 // Generate branch-free code. | |
632 __ xor_(CMPRES1, left_lo, right_lo); | |
633 __ xor_(AT, left_hi, right_hi); | |
634 __ or_(CMPRES1, CMPRES1, AT); | |
635 return Condition(CMPRES1, ZR, TokenKindToUintRelOp(kind)); | |
636 } else { | |
637 if (kind == Token::kEQ) { | |
638 __ bne(left_hi, right_hi, labels.false_label); | |
639 } else { | |
640 ASSERT(kind == Token::kNE); | |
641 __ bne(left_hi, right_hi, labels.true_label); | |
642 } | |
643 return Condition(left_lo, right_lo, TokenKindToUintRelOp(kind)); | |
644 } | |
645 } | |
646 | |
647 | |
648 static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler, | |
649 const LocationSummary& locs, | |
650 Token::Kind kind, | |
651 BranchLabels labels) { | |
652 __ Comment("EmitUnboxedMintComparisonOp"); | |
653 PairLocation* left_pair = locs.in(0).AsPairLocation(); | |
654 Register left_lo = left_pair->At(0).reg(); | |
655 Register left_hi = left_pair->At(1).reg(); | |
656 PairLocation* right_pair = locs.in(1).AsPairLocation(); | |
657 Register right_lo = right_pair->At(0).reg(); | |
658 Register right_hi = right_pair->At(1).reg(); | |
659 | |
660 if (labels.false_label == NULL) { | |
661 // Generate branch-free code (except for skipping the lower words compare). | |
662 // Result in CMPRES1, CMPRES2, so that CMPRES1 op CMPRES2 === left op right. | |
663 Label done; | |
664 // Compare upper halves first. | |
665 __ slt(CMPRES1, right_hi, left_hi); | |
666 __ slt(CMPRES2, left_hi, right_hi); | |
667 // If higher words aren't equal, skip comparing lower words. | |
668 __ bne(CMPRES1, CMPRES2, &done); | |
669 | |
670 __ sltu(CMPRES1, right_lo, left_lo); | |
671 __ sltu(CMPRES2, left_lo, right_lo); | |
672 __ Bind(&done); | |
673 return Condition(CMPRES1, CMPRES2, TokenKindToUintRelOp(kind)); | |
674 } else { | |
675 switch (kind) { | |
676 case Token::kLT: | |
677 case Token::kLTE: { | |
678 __ slt(AT, left_hi, right_hi); | |
679 __ bne(AT, ZR, labels.true_label); | |
680 __ delay_slot()->slt(AT, right_hi, left_hi); | |
681 __ bne(AT, ZR, labels.false_label); | |
682 break; | |
683 } | |
684 case Token::kGT: | |
685 case Token::kGTE: { | |
686 __ slt(AT, left_hi, right_hi); | |
687 __ bne(AT, ZR, labels.false_label); | |
688 __ delay_slot()->slt(AT, right_hi, left_hi); | |
689 __ bne(AT, ZR, labels.true_label); | |
690 break; | |
691 } | |
692 default: | |
693 UNREACHABLE(); | |
694 } | |
695 return Condition(left_lo, right_lo, TokenKindToUintRelOp(kind)); | |
696 } | |
697 } | |
698 | |
699 | |
700 static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler, | |
701 const LocationSummary& locs, | |
702 Token::Kind kind, | |
703 BranchLabels labels) { | |
704 DRegister left = locs.in(0).fpu_reg(); | |
705 DRegister right = locs.in(1).fpu_reg(); | |
706 | |
707 __ Comment("DoubleComparisonOp(left=%d, right=%d)", left, right); | |
708 | |
709 __ cund(left, right); | |
710 Label* nan_label = | |
711 (kind == Token::kNE) ? labels.true_label : labels.false_label; | |
712 __ bc1t(nan_label); | |
713 | |
714 switch (kind) { | |
715 case Token::kEQ: | |
716 __ ceqd(left, right); | |
717 break; | |
718 case Token::kNE: | |
719 __ ceqd(left, right); | |
720 break; | |
721 case Token::kLT: | |
722 __ coltd(left, right); | |
723 break; | |
724 case Token::kLTE: | |
725 __ coled(left, right); | |
726 break; | |
727 case Token::kGT: | |
728 __ coltd(right, left); | |
729 break; | |
730 case Token::kGTE: | |
731 __ coled(right, left); | |
732 break; | |
733 default: { | |
734 // We should only be passing the above conditions to this function. | |
735 UNREACHABLE(); | |
736 break; | |
737 } | |
738 } | |
739 | |
740 if (labels.false_label == NULL) { | |
741 // Generate branch-free code and return result in condition. | |
742 __ LoadImmediate(CMPRES1, 1); | |
743 if (kind == Token::kNE) { | |
744 __ movf(CMPRES1, ZR); | |
745 } else { | |
746 __ movt(CMPRES1, ZR); | |
747 } | |
748 return Condition(CMPRES1, ZR, EQ); | |
749 } else { | |
750 if (labels.fall_through == labels.false_label) { | |
751 if (kind == Token::kNE) { | |
752 __ bc1f(labels.true_label); | |
753 } else { | |
754 __ bc1t(labels.true_label); | |
755 } | |
756 // Since we already branched on true, return the never true condition. | |
757 return Condition(CMPRES1, CMPRES2, NV); | |
758 } else { | |
759 if (kind == Token::kNE) { | |
760 __ bc1t(labels.false_label); | |
761 } else { | |
762 __ bc1f(labels.false_label); | |
763 } | |
764 // Since we already branched on false, return the always true condition. | |
765 return Condition(CMPRES1, CMPRES2, AL); | |
766 } | |
767 } | |
768 } | |
769 | |
770 | |
771 Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
772 BranchLabels labels) { | |
773 if (operation_cid() == kSmiCid) { | |
774 return EmitSmiComparisonOp(compiler, *locs(), kind()); | |
775 } else if (operation_cid() == kMintCid) { | |
776 return EmitUnboxedMintEqualityOp(compiler, *locs(), kind(), labels); | |
777 } else { | |
778 ASSERT(operation_cid() == kDoubleCid); | |
779 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels); | |
780 } | |
781 } | |
782 | |
783 | |
784 void EqualityCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
785 ASSERT((kind() == Token::kNE) || (kind() == Token::kEQ)); | |
786 __ Comment("EqualityCompareInstr"); | |
787 | |
788 Label is_true, is_false; | |
789 BranchLabels labels = {&is_true, &is_false, &is_false}; | |
790 Condition true_condition = EmitComparisonCode(compiler, labels); | |
791 EmitBranchOnCondition(compiler, true_condition, labels); | |
792 | |
793 Register result = locs()->out(0).reg(); | |
794 Label done; | |
795 __ Bind(&is_false); | |
796 __ LoadObject(result, Bool::False()); | |
797 __ b(&done); | |
798 __ Bind(&is_true); | |
799 __ LoadObject(result, Bool::True()); | |
800 __ Bind(&done); | |
801 } | |
802 | |
803 | |
804 void EqualityCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
805 BranchInstr* branch) { | |
806 __ Comment("EqualityCompareInstr::EmitBranchCode"); | |
807 ASSERT((kind() == Token::kNE) || (kind() == Token::kEQ)); | |
808 | |
809 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
810 Condition true_condition = EmitComparisonCode(compiler, labels); | |
811 EmitBranchOnCondition(compiler, true_condition, labels); | |
812 } | |
813 | |
814 | |
815 LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
816 const intptr_t kNumInputs = 2; | |
817 const intptr_t kNumTemps = 0; | |
818 LocationSummary* locs = new (zone) | |
819 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
820 locs->set_in(0, Location::RequiresRegister()); | |
821 // Only one input can be a constant operand. The case of two constant | |
822 // operands should be handled by constant propagation. | |
823 locs->set_in(1, Location::RegisterOrConstant(right())); | |
824 return locs; | |
825 } | |
826 | |
827 | |
828 Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
829 BranchLabels labels) { | |
830 Register left = locs()->in(0).reg(); | |
831 Location right = locs()->in(1); | |
832 if (right.IsConstant()) { | |
833 ASSERT(right.constant().IsSmi()); | |
834 const int32_t imm = reinterpret_cast<int32_t>(right.constant().raw()); | |
835 __ AndImmediate(CMPRES1, left, imm); | |
836 } else { | |
837 __ and_(CMPRES1, left, right.reg()); | |
838 } | |
839 return Condition(CMPRES1, ZR, (kind() == Token::kNE) ? NE : EQ); | |
840 } | |
841 | |
842 | |
843 void TestSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
844 // Never emitted outside of the BranchInstr. | |
845 UNREACHABLE(); | |
846 } | |
847 | |
848 | |
849 void TestSmiInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
850 BranchInstr* branch) { | |
851 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
852 Condition true_condition = EmitComparisonCode(compiler, labels); | |
853 EmitBranchOnCondition(compiler, true_condition, labels); | |
854 } | |
855 | |
856 | |
857 LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone, | |
858 bool opt) const { | |
859 const intptr_t kNumInputs = 1; | |
860 const intptr_t kNumTemps = 1; | |
861 LocationSummary* locs = new (zone) | |
862 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
863 locs->set_in(0, Location::RequiresRegister()); | |
864 locs->set_temp(0, Location::RequiresRegister()); | |
865 locs->set_out(0, Location::RequiresRegister()); | |
866 return locs; | |
867 } | |
868 | |
869 | |
870 Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
871 BranchLabels labels) { | |
872 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT)); | |
873 Register val_reg = locs()->in(0).reg(); | |
874 Register cid_reg = locs()->temp(0).reg(); | |
875 | |
876 Label* deopt = | |
877 CanDeoptimize() | |
878 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids, | |
879 licm_hoisted_ ? ICData::kHoisted : 0) | |
880 : NULL; | |
881 | |
882 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; | |
883 const ZoneGrowableArray<intptr_t>& data = cid_results(); | |
884 ASSERT(data[0] == kSmiCid); | |
885 bool result = data[1] == true_result; | |
886 __ andi(CMPRES1, val_reg, Immediate(kSmiTagMask)); | |
887 __ beq(CMPRES1, ZR, result ? labels.true_label : labels.false_label); | |
888 | |
889 __ LoadClassId(cid_reg, val_reg); | |
890 for (intptr_t i = 2; i < data.length(); i += 2) { | |
891 const intptr_t test_cid = data[i]; | |
892 ASSERT(test_cid != kSmiCid); | |
893 result = data[i + 1] == true_result; | |
894 __ BranchEqual(cid_reg, Immediate(test_cid), | |
895 result ? labels.true_label : labels.false_label); | |
896 } | |
897 // No match found, deoptimize or default action. | |
898 if (deopt == NULL) { | |
899 // If the cid is not in the list, jump to the opposite label from the cids | |
900 // that are in the list. These must be all the same (see asserts in the | |
901 // constructor). | |
902 Label* target = result ? labels.false_label : labels.true_label; | |
903 if (target != labels.fall_through) { | |
904 __ b(target); | |
905 } | |
906 } else { | |
907 __ b(deopt); | |
908 } | |
909 // Dummy result as the last instruction is a jump or fall through. | |
910 return Condition(CMPRES1, ZR, AL); | |
911 } | |
912 | |
913 | |
914 void TestCidsInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
915 BranchInstr* branch) { | |
916 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
917 EmitComparisonCode(compiler, labels); | |
918 } | |
919 | |
920 | |
921 void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
922 Register result_reg = locs()->out(0).reg(); | |
923 Label is_true, is_false, done; | |
924 BranchLabels labels = {&is_true, &is_false, &is_false}; | |
925 EmitComparisonCode(compiler, labels); | |
926 __ Bind(&is_false); | |
927 __ LoadObject(result_reg, Bool::False()); | |
928 __ b(&done); | |
929 __ Bind(&is_true); | |
930 __ LoadObject(result_reg, Bool::True()); | |
931 __ Bind(&done); | |
932 } | |
933 | |
934 | |
935 LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone, | |
936 bool opt) const { | |
937 const intptr_t kNumInputs = 2; | |
938 const intptr_t kNumTemps = 0; | |
939 if (operation_cid() == kMintCid) { | |
940 const intptr_t kNumTemps = 0; | |
941 LocationSummary* locs = new (zone) | |
942 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
943 locs->set_in(0, Location::Pair(Location::RequiresRegister(), | |
944 Location::RequiresRegister())); | |
945 locs->set_in(1, Location::Pair(Location::RequiresRegister(), | |
946 Location::RequiresRegister())); | |
947 locs->set_out(0, Location::RequiresRegister()); | |
948 return locs; | |
949 } | |
950 if (operation_cid() == kDoubleCid) { | |
951 LocationSummary* summary = new (zone) | |
952 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
953 summary->set_in(0, Location::RequiresFpuRegister()); | |
954 summary->set_in(1, Location::RequiresFpuRegister()); | |
955 summary->set_out(0, Location::RequiresRegister()); | |
956 return summary; | |
957 } | |
958 ASSERT(operation_cid() == kSmiCid); | |
959 LocationSummary* summary = new (zone) | |
960 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
961 summary->set_in(0, Location::RegisterOrConstant(left())); | |
962 // Only one input can be a constant operand. The case of two constant | |
963 // operands should be handled by constant propagation. | |
964 summary->set_in(1, summary->in(0).IsConstant() | |
965 ? Location::RequiresRegister() | |
966 : Location::RegisterOrConstant(right())); | |
967 summary->set_out(0, Location::RequiresRegister()); | |
968 return summary; | |
969 } | |
970 | |
971 | |
972 Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
973 BranchLabels labels) { | |
974 if (operation_cid() == kSmiCid) { | |
975 return EmitSmiComparisonOp(compiler, *locs(), kind()); | |
976 } else if (operation_cid() == kMintCid) { | |
977 return EmitUnboxedMintComparisonOp(compiler, *locs(), kind(), labels); | |
978 } else { | |
979 ASSERT(operation_cid() == kDoubleCid); | |
980 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels); | |
981 } | |
982 } | |
983 | |
984 | |
985 void RelationalOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
986 __ Comment("RelationalOpInstr"); | |
987 | |
988 Label is_true, is_false; | |
989 BranchLabels labels = {&is_true, &is_false, &is_false}; | |
990 Condition true_condition = EmitComparisonCode(compiler, labels); | |
991 EmitBranchOnCondition(compiler, true_condition, labels); | |
992 | |
993 Register result = locs()->out(0).reg(); | |
994 Label done; | |
995 __ Bind(&is_false); | |
996 __ LoadObject(result, Bool::False()); | |
997 __ b(&done); | |
998 __ Bind(&is_true); | |
999 __ LoadObject(result, Bool::True()); | |
1000 __ Bind(&done); | |
1001 } | |
1002 | |
1003 | |
1004 void RelationalOpInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
1005 BranchInstr* branch) { | |
1006 __ Comment("RelationalOpInstr"); | |
1007 | |
1008 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
1009 Condition true_condition = EmitComparisonCode(compiler, labels); | |
1010 EmitBranchOnCondition(compiler, true_condition, labels); | |
1011 } | |
1012 | |
1013 | |
1014 LocationSummary* NativeCallInstr::MakeLocationSummary(Zone* zone, | |
1015 bool opt) const { | |
1016 return MakeCallSummary(zone); | |
1017 } | |
1018 | |
1019 | |
1020 void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1021 SetupNative(); | |
1022 __ Comment("NativeCallInstr"); | |
1023 Register result = locs()->out(0).reg(); | |
1024 | |
1025 // Push the result place holder initialized to NULL. | |
1026 __ PushObject(Object::null_object()); | |
1027 // Pass a pointer to the first argument in A2. | |
1028 if (!function().HasOptionalParameters()) { | |
1029 __ AddImmediate( | |
1030 A2, FP, (kParamEndSlotFromFp + function().NumParameters()) * kWordSize); | |
1031 } else { | |
1032 __ AddImmediate(A2, FP, kFirstLocalSlotFromFp * kWordSize); | |
1033 } | |
1034 // Compute the effective address. When running under the simulator, | |
1035 // this is a redirection address that forces the simulator to call | |
1036 // into the runtime system. | |
1037 uword entry; | |
1038 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function()); | |
1039 const StubEntry* stub_entry; | |
1040 if (link_lazily()) { | |
1041 stub_entry = StubCode::CallBootstrapNative_entry(); | |
1042 entry = NativeEntry::LinkNativeCallEntry(); | |
1043 } else { | |
1044 entry = reinterpret_cast<uword>(native_c_function()); | |
1045 if (is_bootstrap_native()) { | |
1046 stub_entry = StubCode::CallBootstrapNative_entry(); | |
1047 #if defined(USING_SIMULATOR) | |
1048 entry = Simulator::RedirectExternalReference( | |
1049 entry, Simulator::kBootstrapNativeCall, NativeEntry::kNumArguments); | |
1050 #endif | |
1051 } else if (is_auto_scope()) { | |
1052 // In the case of non bootstrap native methods the CallNativeCFunction | |
1053 // stub generates the redirection address when running under the simulator | |
1054 // and hence we do not change 'entry' here. | |
1055 stub_entry = StubCode::CallAutoScopeNative_entry(); | |
1056 } else { | |
1057 // In the case of non bootstrap native methods the CallNativeCFunction | |
1058 // stub generates the redirection address when running under the simulator | |
1059 // and hence we do not change 'entry' here. | |
1060 stub_entry = StubCode::CallNoScopeNative_entry(); | |
1061 } | |
1062 } | |
1063 __ LoadImmediate(A1, argc_tag); | |
1064 ExternalLabel label(entry); | |
1065 __ LoadNativeEntry(T5, &label, kNotPatchable); | |
1066 if (link_lazily()) { | |
1067 compiler->GeneratePatchableCall(token_pos(), *stub_entry, | |
1068 RawPcDescriptors::kOther, locs()); | |
1069 } else { | |
1070 compiler->GenerateCall(token_pos(), *stub_entry, RawPcDescriptors::kOther, | |
1071 locs()); | |
1072 } | |
1073 __ Pop(result); | |
1074 } | |
1075 | |
1076 | |
1077 LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary( | |
1078 Zone* zone, | |
1079 bool opt) const { | |
1080 const intptr_t kNumInputs = 1; | |
1081 // TODO(fschneider): Allow immediate operands for the char code. | |
1082 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), | |
1083 LocationSummary::kNoCall); | |
1084 } | |
1085 | |
1086 | |
1087 void OneByteStringFromCharCodeInstr::EmitNativeCode( | |
1088 FlowGraphCompiler* compiler) { | |
1089 ASSERT(compiler->is_optimizing()); | |
1090 Register char_code = locs()->in(0).reg(); | |
1091 Register result = locs()->out(0).reg(); | |
1092 | |
1093 __ lw(result, Address(THR, Thread::predefined_symbols_address_offset())); | |
1094 __ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize); | |
1095 __ sll(TMP, char_code, 1); // Char code is a smi. | |
1096 __ addu(TMP, TMP, result); | |
1097 __ lw(result, Address(TMP)); | |
1098 } | |
1099 | |
1100 | |
1101 LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone, | |
1102 bool opt) const { | |
1103 const intptr_t kNumInputs = 1; | |
1104 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), | |
1105 LocationSummary::kNoCall); | |
1106 } | |
1107 | |
1108 | |
1109 void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1110 __ Comment("StringToCharCodeInstr"); | |
1111 | |
1112 ASSERT(cid_ == kOneByteStringCid); | |
1113 Register str = locs()->in(0).reg(); | |
1114 Register result = locs()->out(0).reg(); | |
1115 ASSERT(str != result); | |
1116 Label done; | |
1117 __ lw(result, FieldAddress(str, String::length_offset())); | |
1118 __ BranchNotEqual(result, Immediate(Smi::RawValue(1)), &done); | |
1119 __ delay_slot()->addiu(result, ZR, Immediate(Smi::RawValue(-1))); | |
1120 __ lbu(result, FieldAddress(str, OneByteString::data_offset())); | |
1121 __ SmiTag(result); | |
1122 __ Bind(&done); | |
1123 } | |
1124 | |
1125 | |
1126 LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone, | |
1127 bool opt) const { | |
1128 const intptr_t kNumInputs = 1; | |
1129 const intptr_t kNumTemps = 0; | |
1130 LocationSummary* summary = new (zone) | |
1131 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
1132 summary->set_in(0, Location::RegisterLocation(A0)); | |
1133 summary->set_out(0, Location::RegisterLocation(V0)); | |
1134 return summary; | |
1135 } | |
1136 | |
1137 | |
1138 void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1139 Register array = locs()->in(0).reg(); | |
1140 __ Push(array); | |
1141 const int kTypeArgsLen = 0; | |
1142 const int kNumberOfArguments = 1; | |
1143 const Array& kNoArgumentNames = Object::null_array(); | |
1144 ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kNoArgumentNames); | |
1145 compiler->GenerateStaticCall(deopt_id(), token_pos(), CallFunction(), | |
1146 args_info, locs(), ICData::Handle()); | |
1147 ASSERT(locs()->out(0).reg() == V0); | |
1148 } | |
1149 | |
1150 | |
1151 LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone, | |
1152 bool opt) const { | |
1153 const intptr_t kNumInputs = 1; | |
1154 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), | |
1155 LocationSummary::kNoCall); | |
1156 } | |
1157 | |
1158 | |
1159 void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1160 Register obj = locs()->in(0).reg(); | |
1161 Register result = locs()->out(0).reg(); | |
1162 if (object()->definition()->representation() == kUntagged) { | |
1163 __ LoadFromOffset(result, obj, offset()); | |
1164 } else { | |
1165 ASSERT(object()->definition()->representation() == kTagged); | |
1166 __ LoadFieldFromOffset(result, obj, offset()); | |
1167 } | |
1168 } | |
1169 | |
1170 | |
1171 LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone, | |
1172 bool opt) const { | |
1173 const intptr_t kNumInputs = 1; | |
1174 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), | |
1175 LocationSummary::kNoCall); | |
1176 } | |
1177 | |
1178 | |
1179 void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1180 Register object = locs()->in(0).reg(); | |
1181 Register result = locs()->out(0).reg(); | |
1182 const AbstractType& value_type = *this->object()->Type()->ToAbstractType(); | |
1183 if (CompileType::Smi().IsAssignableTo(value_type) || | |
1184 value_type.IsTypeParameter()) { | |
1185 __ LoadTaggedClassIdMayBeSmi(result, object); | |
1186 } else { | |
1187 __ LoadClassId(result, object); | |
1188 __ SmiTag(result); | |
1189 } | |
1190 } | |
1191 | |
1192 | |
1193 CompileType LoadIndexedInstr::ComputeType() const { | |
1194 switch (class_id_) { | |
1195 case kArrayCid: | |
1196 case kImmutableArrayCid: | |
1197 return CompileType::Dynamic(); | |
1198 | |
1199 case kTypedDataFloat32ArrayCid: | |
1200 case kTypedDataFloat64ArrayCid: | |
1201 return CompileType::FromCid(kDoubleCid); | |
1202 case kTypedDataFloat32x4ArrayCid: | |
1203 return CompileType::FromCid(kFloat32x4Cid); | |
1204 case kTypedDataInt32x4ArrayCid: | |
1205 return CompileType::FromCid(kInt32x4Cid); | |
1206 | |
1207 case kTypedDataInt8ArrayCid: | |
1208 case kTypedDataUint8ArrayCid: | |
1209 case kTypedDataUint8ClampedArrayCid: | |
1210 case kExternalTypedDataUint8ArrayCid: | |
1211 case kExternalTypedDataUint8ClampedArrayCid: | |
1212 case kTypedDataInt16ArrayCid: | |
1213 case kTypedDataUint16ArrayCid: | |
1214 case kOneByteStringCid: | |
1215 case kTwoByteStringCid: | |
1216 case kExternalOneByteStringCid: | |
1217 case kExternalTwoByteStringCid: | |
1218 return CompileType::FromCid(kSmiCid); | |
1219 | |
1220 case kTypedDataInt32ArrayCid: | |
1221 case kTypedDataUint32ArrayCid: | |
1222 return CompileType::Int(); | |
1223 | |
1224 default: | |
1225 UNIMPLEMENTED(); | |
1226 return CompileType::Dynamic(); | |
1227 } | |
1228 } | |
1229 | |
1230 | |
1231 Representation LoadIndexedInstr::representation() const { | |
1232 switch (class_id_) { | |
1233 case kArrayCid: | |
1234 case kImmutableArrayCid: | |
1235 case kTypedDataInt8ArrayCid: | |
1236 case kTypedDataUint8ArrayCid: | |
1237 case kTypedDataUint8ClampedArrayCid: | |
1238 case kExternalTypedDataUint8ArrayCid: | |
1239 case kExternalTypedDataUint8ClampedArrayCid: | |
1240 case kTypedDataInt16ArrayCid: | |
1241 case kTypedDataUint16ArrayCid: | |
1242 case kOneByteStringCid: | |
1243 case kTwoByteStringCid: | |
1244 case kExternalOneByteStringCid: | |
1245 case kExternalTwoByteStringCid: | |
1246 return kTagged; | |
1247 case kTypedDataInt32ArrayCid: | |
1248 return kUnboxedInt32; | |
1249 case kTypedDataUint32ArrayCid: | |
1250 return kUnboxedUint32; | |
1251 case kTypedDataFloat32ArrayCid: | |
1252 case kTypedDataFloat64ArrayCid: | |
1253 return kUnboxedDouble; | |
1254 case kTypedDataInt32x4ArrayCid: | |
1255 return kUnboxedInt32x4; | |
1256 case kTypedDataFloat32x4ArrayCid: | |
1257 return kUnboxedFloat32x4; | |
1258 default: | |
1259 UNIMPLEMENTED(); | |
1260 return kTagged; | |
1261 } | |
1262 } | |
1263 | |
1264 | |
1265 static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) { | |
1266 ConstantInstr* constant = value->definition()->AsConstant(); | |
1267 if ((constant == NULL) || !Assembler::IsSafeSmi(constant->value())) { | |
1268 return false; | |
1269 } | |
1270 const int64_t index = Smi::Cast(constant->value()).AsInt64Value(); | |
1271 const intptr_t scale = Instance::ElementSizeFor(cid); | |
1272 const int64_t offset = | |
1273 index * scale + | |
1274 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | |
1275 if (!Utils::IsInt(32, offset)) { | |
1276 return false; | |
1277 } | |
1278 return Address::CanHoldOffset(static_cast<int32_t>(offset)); | |
1279 } | |
1280 | |
1281 | |
1282 LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone, | |
1283 bool opt) const { | |
1284 const intptr_t kNumInputs = 2; | |
1285 const intptr_t kNumTemps = aligned() ? 0 : 1; | |
1286 LocationSummary* locs = new (zone) | |
1287 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
1288 locs->set_in(0, Location::RequiresRegister()); | |
1289 if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { | |
1290 locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); | |
1291 } else { | |
1292 locs->set_in(1, Location::RequiresRegister()); | |
1293 } | |
1294 if ((representation() == kUnboxedDouble) || | |
1295 (representation() == kUnboxedFloat32x4) || | |
1296 (representation() == kUnboxedInt32x4)) { | |
1297 locs->set_out(0, Location::RequiresFpuRegister()); | |
1298 } else { | |
1299 locs->set_out(0, Location::RequiresRegister()); | |
1300 } | |
1301 if (!aligned()) { | |
1302 locs->set_temp(0, Location::RequiresRegister()); | |
1303 } | |
1304 return locs; | |
1305 } | |
1306 | |
1307 | |
1308 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1309 __ Comment("LoadIndexedInstr"); | |
1310 // The array register points to the backing store for external arrays. | |
1311 const Register array = locs()->in(0).reg(); | |
1312 const Location index = locs()->in(1); | |
1313 const Register address = aligned() ? kNoRegister : locs()->temp(0).reg(); | |
1314 | |
1315 Address element_address(kNoRegister); | |
1316 if (aligned()) { | |
1317 element_address = | |
1318 index.IsRegister() | |
1319 ? __ ElementAddressForRegIndex(true, // Load. | |
1320 IsExternal(), class_id(), | |
1321 index_scale(), array, index.reg()) | |
1322 : __ ElementAddressForIntIndex(IsExternal(), class_id(), | |
1323 index_scale(), array, | |
1324 Smi::Cast(index.constant()).Value()); | |
1325 // Warning: element_address may use register TMP as base. | |
1326 } else { | |
1327 if (index.IsRegister()) { | |
1328 __ LoadElementAddressForRegIndex(address, | |
1329 true, // Load. | |
1330 IsExternal(), class_id(), index_scale(), | |
1331 array, index.reg()); | |
1332 } else { | |
1333 __ LoadElementAddressForIntIndex(address, IsExternal(), class_id(), | |
1334 index_scale(), array, | |
1335 Smi::Cast(index.constant()).Value()); | |
1336 } | |
1337 } | |
1338 | |
1339 if ((representation() == kUnboxedDouble) || | |
1340 (representation() == kUnboxedFloat32x4) || | |
1341 (representation() == kUnboxedInt32x4)) { | |
1342 DRegister result = locs()->out(0).fpu_reg(); | |
1343 switch (class_id()) { | |
1344 case kTypedDataFloat32ArrayCid: | |
1345 // Load single precision float. | |
1346 __ lwc1(EvenFRegisterOf(result), element_address); | |
1347 break; | |
1348 case kTypedDataFloat64ArrayCid: | |
1349 __ LoadDFromOffset(result, element_address.base(), | |
1350 element_address.offset()); | |
1351 break; | |
1352 case kTypedDataInt32x4ArrayCid: | |
1353 case kTypedDataFloat32x4ArrayCid: | |
1354 UNIMPLEMENTED(); | |
1355 break; | |
1356 } | |
1357 return; | |
1358 } | |
1359 | |
1360 if ((representation() == kUnboxedUint32) || | |
1361 (representation() == kUnboxedInt32)) { | |
1362 const Register result = locs()->out(0).reg(); | |
1363 switch (class_id()) { | |
1364 case kTypedDataInt32ArrayCid: | |
1365 ASSERT(representation() == kUnboxedInt32); | |
1366 if (aligned()) { | |
1367 __ lw(result, element_address); | |
1368 } else { | |
1369 __ LoadWordUnaligned(result, address, TMP); | |
1370 } | |
1371 break; | |
1372 case kTypedDataUint32ArrayCid: | |
1373 ASSERT(representation() == kUnboxedUint32); | |
1374 if (aligned()) { | |
1375 __ lw(result, element_address); | |
1376 } else { | |
1377 __ LoadWordUnaligned(result, address, TMP); | |
1378 } | |
1379 break; | |
1380 default: | |
1381 UNREACHABLE(); | |
1382 } | |
1383 return; | |
1384 } | |
1385 | |
1386 ASSERT(representation() == kTagged); | |
1387 | |
1388 const Register result = locs()->out(0).reg(); | |
1389 switch (class_id()) { | |
1390 case kTypedDataInt8ArrayCid: | |
1391 ASSERT(index_scale() == 1); | |
1392 __ lb(result, element_address); | |
1393 __ SmiTag(result); | |
1394 break; | |
1395 case kTypedDataUint8ArrayCid: | |
1396 case kTypedDataUint8ClampedArrayCid: | |
1397 case kExternalTypedDataUint8ArrayCid: | |
1398 case kExternalTypedDataUint8ClampedArrayCid: | |
1399 case kOneByteStringCid: | |
1400 case kExternalOneByteStringCid: | |
1401 ASSERT(index_scale() == 1); | |
1402 __ lbu(result, element_address); | |
1403 __ SmiTag(result); | |
1404 break; | |
1405 case kTypedDataInt16ArrayCid: | |
1406 if (aligned()) { | |
1407 __ lh(result, element_address); | |
1408 } else { | |
1409 __ LoadHalfWordUnaligned(result, address, TMP); | |
1410 } | |
1411 __ SmiTag(result); | |
1412 break; | |
1413 case kTypedDataUint16ArrayCid: | |
1414 case kTwoByteStringCid: | |
1415 case kExternalTwoByteStringCid: | |
1416 if (aligned()) { | |
1417 __ lhu(result, element_address); | |
1418 } else { | |
1419 __ LoadHalfWordUnsignedUnaligned(result, address, TMP); | |
1420 } | |
1421 __ SmiTag(result); | |
1422 break; | |
1423 default: | |
1424 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid)); | |
1425 ASSERT(aligned()); | |
1426 __ lw(result, element_address); | |
1427 break; | |
1428 } | |
1429 } | |
1430 | |
1431 | |
1432 LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone, | |
1433 bool opt) const { | |
1434 const intptr_t kNumInputs = 2; | |
1435 const intptr_t kNumTemps = 0; | |
1436 LocationSummary* summary = new (zone) | |
1437 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
1438 summary->set_in(0, Location::RequiresRegister()); | |
1439 summary->set_in(1, Location::RequiresRegister()); | |
1440 | |
1441 // TODO(zerny): Handle mints properly once possible. | |
1442 ASSERT(representation() == kTagged); | |
1443 summary->set_out(0, Location::RequiresRegister()); | |
1444 | |
1445 return summary; | |
1446 } | |
1447 | |
1448 | |
1449 void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1450 // The string register points to the backing store for external strings. | |
1451 const Register str = locs()->in(0).reg(); | |
1452 const Location index = locs()->in(1); | |
1453 | |
1454 Address element_address = __ ElementAddressForRegIndex( | |
1455 true, IsExternal(), class_id(), index_scale(), str, index.reg()); | |
1456 // Warning: element_address may use register TMP as base. | |
1457 | |
1458 ASSERT(representation() == kTagged); | |
1459 Register result = locs()->out(0).reg(); | |
1460 switch (class_id()) { | |
1461 case kOneByteStringCid: | |
1462 case kExternalOneByteStringCid: | |
1463 switch (element_count()) { | |
1464 case 1: | |
1465 __ lbu(result, element_address); | |
1466 break; | |
1467 case 2: | |
1468 __ lhu(result, element_address); | |
1469 break; | |
1470 case 4: // Loading multiple code units is disabled on MIPS. | |
1471 default: | |
1472 UNREACHABLE(); | |
1473 } | |
1474 __ SmiTag(result); | |
1475 break; | |
1476 case kTwoByteStringCid: | |
1477 case kExternalTwoByteStringCid: | |
1478 switch (element_count()) { | |
1479 case 1: | |
1480 __ lhu(result, element_address); | |
1481 break; | |
1482 case 2: // Loading multiple code units is disabled on MIPS. | |
1483 default: | |
1484 UNREACHABLE(); | |
1485 } | |
1486 __ SmiTag(result); | |
1487 break; | |
1488 default: | |
1489 UNREACHABLE(); | |
1490 break; | |
1491 } | |
1492 } | |
1493 | |
1494 | |
1495 Representation StoreIndexedInstr::RequiredInputRepresentation( | |
1496 intptr_t idx) const { | |
1497 // Array can be a Dart object or a pointer to external data. | |
1498 if (idx == 0) return kNoRepresentation; // Flexible input representation. | |
1499 if (idx == 1) return kTagged; // Index is a smi. | |
1500 ASSERT(idx == 2); | |
1501 switch (class_id_) { | |
1502 case kArrayCid: | |
1503 case kOneByteStringCid: | |
1504 case kTypedDataInt8ArrayCid: | |
1505 case kTypedDataUint8ArrayCid: | |
1506 case kExternalTypedDataUint8ArrayCid: | |
1507 case kTypedDataUint8ClampedArrayCid: | |
1508 case kExternalTypedDataUint8ClampedArrayCid: | |
1509 case kTypedDataInt16ArrayCid: | |
1510 case kTypedDataUint16ArrayCid: | |
1511 return kTagged; | |
1512 case kTypedDataInt32ArrayCid: | |
1513 return kUnboxedInt32; | |
1514 case kTypedDataUint32ArrayCid: | |
1515 return kUnboxedUint32; | |
1516 case kTypedDataFloat32ArrayCid: | |
1517 case kTypedDataFloat64ArrayCid: | |
1518 return kUnboxedDouble; | |
1519 case kTypedDataFloat32x4ArrayCid: | |
1520 return kUnboxedFloat32x4; | |
1521 case kTypedDataInt32x4ArrayCid: | |
1522 return kUnboxedInt32x4; | |
1523 default: | |
1524 UNIMPLEMENTED(); | |
1525 return kTagged; | |
1526 } | |
1527 } | |
1528 | |
1529 | |
1530 LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone, | |
1531 bool opt) const { | |
1532 const intptr_t kNumInputs = 3; | |
1533 const intptr_t kNumTemps = aligned() ? 0 : 2; | |
1534 LocationSummary* locs = new (zone) | |
1535 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
1536 locs->set_in(0, Location::RequiresRegister()); | |
1537 if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { | |
1538 locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); | |
1539 } else { | |
1540 locs->set_in(1, Location::WritableRegister()); | |
1541 } | |
1542 switch (class_id()) { | |
1543 case kArrayCid: | |
1544 locs->set_in(2, ShouldEmitStoreBarrier() | |
1545 ? Location::WritableRegister() | |
1546 : Location::RegisterOrConstant(value())); | |
1547 break; | |
1548 case kExternalTypedDataUint8ArrayCid: | |
1549 case kExternalTypedDataUint8ClampedArrayCid: | |
1550 case kTypedDataInt8ArrayCid: | |
1551 case kTypedDataUint8ArrayCid: | |
1552 case kTypedDataUint8ClampedArrayCid: | |
1553 case kOneByteStringCid: | |
1554 case kTypedDataInt16ArrayCid: | |
1555 case kTypedDataUint16ArrayCid: | |
1556 case kTypedDataInt32ArrayCid: | |
1557 case kTypedDataUint32ArrayCid: | |
1558 locs->set_in(2, Location::RequiresRegister()); | |
1559 break; | |
1560 case kTypedDataFloat32ArrayCid: | |
1561 case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants. | |
1562 case kTypedDataInt32x4ArrayCid: | |
1563 case kTypedDataFloat32x4ArrayCid: | |
1564 locs->set_in(2, Location::RequiresFpuRegister()); | |
1565 break; | |
1566 default: | |
1567 UNREACHABLE(); | |
1568 return NULL; | |
1569 } | |
1570 if (!aligned()) { | |
1571 locs->set_temp(0, Location::RequiresRegister()); | |
1572 locs->set_temp(1, Location::RequiresRegister()); | |
1573 } | |
1574 return locs; | |
1575 } | |
1576 | |
1577 | |
1578 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1579 __ Comment("StoreIndexedInstr"); | |
1580 // The array register points to the backing store for external arrays. | |
1581 const Register array = locs()->in(0).reg(); | |
1582 const Location index = locs()->in(1); | |
1583 const Register address = aligned() ? kNoRegister : locs()->temp(0).reg(); | |
1584 const Register scratch = aligned() ? kNoRegister : locs()->temp(1).reg(); | |
1585 | |
1586 Address element_address(kNoRegister); | |
1587 if (aligned()) { | |
1588 element_address = | |
1589 index.IsRegister() | |
1590 ? __ ElementAddressForRegIndex(false, // Store. | |
1591 IsExternal(), class_id(), | |
1592 index_scale(), array, index.reg()) | |
1593 : __ ElementAddressForIntIndex(IsExternal(), class_id(), | |
1594 index_scale(), array, | |
1595 Smi::Cast(index.constant()).Value()); | |
1596 ASSERT(element_address.base() != TMP); // Allowed for load only. | |
1597 } else { | |
1598 if (index.IsRegister()) { | |
1599 __ LoadElementAddressForRegIndex(address, | |
1600 false, // Store. | |
1601 IsExternal(), class_id(), index_scale(), | |
1602 array, index.reg()); | |
1603 } else { | |
1604 __ LoadElementAddressForIntIndex(address, IsExternal(), class_id(), | |
1605 index_scale(), array, | |
1606 Smi::Cast(index.constant()).Value()); | |
1607 } | |
1608 } | |
1609 | |
1610 switch (class_id()) { | |
1611 case kArrayCid: | |
1612 ASSERT(aligned()); | |
1613 if (ShouldEmitStoreBarrier()) { | |
1614 Register value = locs()->in(2).reg(); | |
1615 __ StoreIntoObject(array, element_address, value); | |
1616 } else if (locs()->in(2).IsConstant()) { | |
1617 const Object& constant = locs()->in(2).constant(); | |
1618 __ StoreIntoObjectNoBarrier(array, element_address, constant); | |
1619 } else { | |
1620 Register value = locs()->in(2).reg(); | |
1621 __ StoreIntoObjectNoBarrier(array, element_address, value); | |
1622 } | |
1623 break; | |
1624 case kTypedDataInt8ArrayCid: | |
1625 case kTypedDataUint8ArrayCid: | |
1626 case kExternalTypedDataUint8ArrayCid: | |
1627 case kOneByteStringCid: { | |
1628 ASSERT(aligned()); | |
1629 if (locs()->in(2).IsConstant()) { | |
1630 const Smi& constant = Smi::Cast(locs()->in(2).constant()); | |
1631 __ LoadImmediate(TMP, static_cast<int8_t>(constant.Value())); | |
1632 __ sb(TMP, element_address); | |
1633 } else { | |
1634 Register value = locs()->in(2).reg(); | |
1635 __ SmiUntag(TMP, value); | |
1636 __ sb(TMP, element_address); | |
1637 } | |
1638 break; | |
1639 } | |
1640 case kTypedDataUint8ClampedArrayCid: | |
1641 case kExternalTypedDataUint8ClampedArrayCid: { | |
1642 ASSERT(aligned()); | |
1643 if (locs()->in(2).IsConstant()) { | |
1644 const Smi& constant = Smi::Cast(locs()->in(2).constant()); | |
1645 intptr_t value = constant.Value(); | |
1646 // Clamp to 0x0 or 0xFF respectively. | |
1647 if (value > 0xFF) { | |
1648 value = 0xFF; | |
1649 } else if (value < 0) { | |
1650 value = 0; | |
1651 } | |
1652 __ LoadImmediate(TMP, static_cast<int8_t>(value)); | |
1653 __ sb(TMP, element_address); | |
1654 } else { | |
1655 Register value = locs()->in(2).reg(); | |
1656 Label store_value, bigger, smaller; | |
1657 __ SmiUntag(TMP, value); | |
1658 __ BranchUnsignedLess(TMP, Immediate(0xFF + 1), &store_value); | |
1659 __ LoadImmediate(TMP, 0xFF); | |
1660 __ slti(CMPRES1, value, Immediate(1)); | |
1661 __ movn(TMP, ZR, CMPRES1); | |
1662 __ Bind(&store_value); | |
1663 __ sb(TMP, element_address); | |
1664 } | |
1665 break; | |
1666 } | |
1667 case kTypedDataInt16ArrayCid: | |
1668 case kTypedDataUint16ArrayCid: { | |
1669 Register value = locs()->in(2).reg(); | |
1670 __ SmiUntag(TMP, value); | |
1671 if (aligned()) { | |
1672 __ sh(TMP, element_address); | |
1673 } else { | |
1674 __ StoreHalfWordUnaligned(TMP, address, scratch); | |
1675 } | |
1676 break; | |
1677 } | |
1678 case kTypedDataInt32ArrayCid: | |
1679 case kTypedDataUint32ArrayCid: { | |
1680 if (aligned()) { | |
1681 __ sw(locs()->in(2).reg(), element_address); | |
1682 } else { | |
1683 __ StoreWordUnaligned(locs()->in(2).reg(), address, scratch); | |
1684 } | |
1685 break; | |
1686 } | |
1687 case kTypedDataFloat32ArrayCid: { | |
1688 ASSERT(aligned()); | |
1689 FRegister value = EvenFRegisterOf(locs()->in(2).fpu_reg()); | |
1690 __ swc1(value, element_address); | |
1691 break; | |
1692 } | |
1693 case kTypedDataFloat64ArrayCid: | |
1694 ASSERT(aligned()); | |
1695 __ StoreDToOffset(locs()->in(2).fpu_reg(), element_address.base(), | |
1696 element_address.offset()); | |
1697 break; | |
1698 case kTypedDataInt32x4ArrayCid: | |
1699 case kTypedDataFloat32x4ArrayCid: | |
1700 UNIMPLEMENTED(); | |
1701 break; | |
1702 default: | |
1703 UNREACHABLE(); | |
1704 } | |
1705 } | |
1706 | |
1707 | |
1708 LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone, | |
1709 bool opt) const { | |
1710 const intptr_t kNumInputs = 1; | |
1711 | |
1712 const intptr_t value_cid = value()->Type()->ToCid(); | |
1713 const intptr_t field_cid = field().guarded_cid(); | |
1714 | |
1715 const bool emit_full_guard = !opt || (field_cid == kIllegalCid); | |
1716 const bool needs_value_cid_temp_reg = | |
1717 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid)); | |
1718 const bool needs_field_temp_reg = emit_full_guard; | |
1719 | |
1720 intptr_t num_temps = 0; | |
1721 if (needs_value_cid_temp_reg) { | |
1722 num_temps++; | |
1723 } | |
1724 if (needs_field_temp_reg) { | |
1725 num_temps++; | |
1726 } | |
1727 | |
1728 LocationSummary* summary = new (zone) | |
1729 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall); | |
1730 summary->set_in(0, Location::RequiresRegister()); | |
1731 | |
1732 for (intptr_t i = 0; i < num_temps; i++) { | |
1733 summary->set_temp(i, Location::RequiresRegister()); | |
1734 } | |
1735 | |
1736 return summary; | |
1737 } | |
1738 | |
1739 | |
1740 void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1741 ASSERT(sizeof(classid_t) == kInt16Size); | |
1742 __ Comment("GuardFieldClassInstr"); | |
1743 | |
1744 const intptr_t value_cid = value()->Type()->ToCid(); | |
1745 const intptr_t field_cid = field().guarded_cid(); | |
1746 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid; | |
1747 | |
1748 if (field_cid == kDynamicCid) { | |
1749 if (Compiler::IsBackgroundCompilation()) { | |
1750 // Field state changed while compiling. | |
1751 Compiler::AbortBackgroundCompilation( | |
1752 deopt_id(), | |
1753 "GuardFieldClassInstr: field state changed while compiling"); | |
1754 } | |
1755 ASSERT(!compiler->is_optimizing()); | |
1756 return; // Nothing to emit. | |
1757 } | |
1758 | |
1759 const bool emit_full_guard = | |
1760 !compiler->is_optimizing() || (field_cid == kIllegalCid); | |
1761 | |
1762 const bool needs_value_cid_temp_reg = | |
1763 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid)); | |
1764 | |
1765 const bool needs_field_temp_reg = emit_full_guard; | |
1766 | |
1767 const Register value_reg = locs()->in(0).reg(); | |
1768 | |
1769 const Register value_cid_reg = | |
1770 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister; | |
1771 | |
1772 const Register field_reg = needs_field_temp_reg | |
1773 ? locs()->temp(locs()->temp_count() - 1).reg() | |
1774 : kNoRegister; | |
1775 | |
1776 Label ok, fail_label; | |
1777 | |
1778 Label* deopt = | |
1779 compiler->is_optimizing() | |
1780 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) | |
1781 : NULL; | |
1782 | |
1783 Label* fail = (deopt != NULL) ? deopt : &fail_label; | |
1784 | |
1785 if (emit_full_guard) { | |
1786 __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); | |
1787 | |
1788 FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset()); | |
1789 FieldAddress field_nullability_operand(field_reg, | |
1790 Field::is_nullable_offset()); | |
1791 | |
1792 if (value_cid == kDynamicCid) { | |
1793 LoadValueCid(compiler, value_cid_reg, value_reg); | |
1794 | |
1795 __ lhu(CMPRES1, field_cid_operand); | |
1796 __ beq(value_cid_reg, CMPRES1, &ok); | |
1797 __ lhu(TMP, field_nullability_operand); | |
1798 __ subu(CMPRES1, value_cid_reg, TMP); | |
1799 } else if (value_cid == kNullCid) { | |
1800 __ lhu(TMP, field_nullability_operand); | |
1801 __ LoadImmediate(CMPRES1, value_cid); | |
1802 __ subu(CMPRES1, TMP, CMPRES1); | |
1803 } else { | |
1804 __ lhu(TMP, field_cid_operand); | |
1805 __ LoadImmediate(CMPRES1, value_cid); | |
1806 __ subu(CMPRES1, TMP, CMPRES1); | |
1807 } | |
1808 __ beq(CMPRES1, ZR, &ok); | |
1809 | |
1810 // Check if the tracked state of the guarded field can be initialized | |
1811 // inline. If the field needs length check we fall through to runtime | |
1812 // which is responsible for computing offset of the length field | |
1813 // based on the class id. | |
1814 // Length guard will be emitted separately when needed via GuardFieldLength | |
1815 // instruction after GuardFieldClass. | |
1816 if (!field().needs_length_check()) { | |
1817 // Uninitialized field can be handled inline. Check if the | |
1818 // field is still unitialized. | |
1819 __ lhu(CMPRES1, field_cid_operand); | |
1820 __ BranchNotEqual(CMPRES1, Immediate(kIllegalCid), fail); | |
1821 | |
1822 if (value_cid == kDynamicCid) { | |
1823 __ sh(value_cid_reg, field_cid_operand); | |
1824 __ sh(value_cid_reg, field_nullability_operand); | |
1825 } else { | |
1826 __ LoadImmediate(TMP, value_cid); | |
1827 __ sh(TMP, field_cid_operand); | |
1828 __ sh(TMP, field_nullability_operand); | |
1829 } | |
1830 | |
1831 if (deopt == NULL) { | |
1832 ASSERT(!compiler->is_optimizing()); | |
1833 __ b(&ok); | |
1834 } | |
1835 } | |
1836 | |
1837 if (deopt == NULL) { | |
1838 ASSERT(!compiler->is_optimizing()); | |
1839 __ Bind(fail); | |
1840 | |
1841 __ lhu(CMPRES1, FieldAddress(field_reg, Field::guarded_cid_offset())); | |
1842 __ BranchEqual(CMPRES1, Immediate(kDynamicCid), &ok); | |
1843 | |
1844 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
1845 __ sw(field_reg, Address(SP, 1 * kWordSize)); | |
1846 __ sw(value_reg, Address(SP, 0 * kWordSize)); | |
1847 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); | |
1848 __ Drop(2); // Drop the field and the value. | |
1849 } | |
1850 } else { | |
1851 ASSERT(compiler->is_optimizing()); | |
1852 ASSERT(deopt != NULL); | |
1853 | |
1854 // Field guard class has been initialized and is known. | |
1855 if (value_cid == kDynamicCid) { | |
1856 // Value's class id is not known. | |
1857 __ andi(CMPRES1, value_reg, Immediate(kSmiTagMask)); | |
1858 | |
1859 if (field_cid != kSmiCid) { | |
1860 __ beq(CMPRES1, ZR, fail); | |
1861 __ LoadClassId(value_cid_reg, value_reg); | |
1862 __ LoadImmediate(TMP, field_cid); | |
1863 __ subu(CMPRES1, value_cid_reg, TMP); | |
1864 } | |
1865 | |
1866 if (field().is_nullable() && (field_cid != kNullCid)) { | |
1867 __ beq(CMPRES1, ZR, &ok); | |
1868 if (field_cid != kSmiCid) { | |
1869 __ LoadImmediate(TMP, kNullCid); | |
1870 __ subu(CMPRES1, value_cid_reg, TMP); | |
1871 } else { | |
1872 __ LoadObject(TMP, Object::null_object()); | |
1873 __ subu(CMPRES1, value_reg, TMP); | |
1874 } | |
1875 } | |
1876 | |
1877 __ bne(CMPRES1, ZR, fail); | |
1878 } else { | |
1879 // Both value's and field's class id is known. | |
1880 ASSERT((value_cid != field_cid) && (value_cid != nullability)); | |
1881 __ b(fail); | |
1882 } | |
1883 } | |
1884 __ Bind(&ok); | |
1885 } | |
1886 | |
1887 | |
1888 LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone, | |
1889 bool opt) const { | |
1890 const intptr_t kNumInputs = 1; | |
1891 | |
1892 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) { | |
1893 const intptr_t kNumTemps = 1; | |
1894 LocationSummary* summary = new (zone) | |
1895 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
1896 summary->set_in(0, Location::RequiresRegister()); | |
1897 // We need temporaries for field object. | |
1898 summary->set_temp(0, Location::RequiresRegister()); | |
1899 return summary; | |
1900 } | |
1901 LocationSummary* summary = | |
1902 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); | |
1903 summary->set_in(0, Location::RequiresRegister()); | |
1904 return summary; | |
1905 } | |
1906 | |
1907 | |
1908 void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
1909 if (field().guarded_list_length() == Field::kNoFixedLength) { | |
1910 if (Compiler::IsBackgroundCompilation()) { | |
1911 // Field state changed while compiling. | |
1912 Compiler::AbortBackgroundCompilation( | |
1913 deopt_id(), | |
1914 "GuardFieldLengthInstr: field state changed while compiling"); | |
1915 } | |
1916 ASSERT(!compiler->is_optimizing()); | |
1917 return; // Nothing to emit. | |
1918 } | |
1919 | |
1920 Label* deopt = | |
1921 compiler->is_optimizing() | |
1922 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) | |
1923 : NULL; | |
1924 | |
1925 const Register value_reg = locs()->in(0).reg(); | |
1926 | |
1927 if (!compiler->is_optimizing() || | |
1928 (field().guarded_list_length() == Field::kUnknownFixedLength)) { | |
1929 const Register field_reg = locs()->temp(0).reg(); | |
1930 | |
1931 Label ok; | |
1932 | |
1933 __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); | |
1934 | |
1935 __ lb(CMPRES1, | |
1936 FieldAddress(field_reg, | |
1937 Field::guarded_list_length_in_object_offset_offset())); | |
1938 __ blez(CMPRES1, &ok); | |
1939 | |
1940 __ lw(CMPRES2, | |
1941 FieldAddress(field_reg, Field::guarded_list_length_offset())); | |
1942 | |
1943 // Load the length from the value. GuardFieldClass already verified that | |
1944 // value's class matches guarded class id of the field. | |
1945 // CMPRES1 contains offset already corrected by -kHeapObjectTag that is | |
1946 // why we can use Address instead of FieldAddress. | |
1947 __ addu(TMP, value_reg, CMPRES1); | |
1948 __ lw(TMP, Address(TMP)); | |
1949 | |
1950 if (deopt == NULL) { | |
1951 __ beq(CMPRES2, TMP, &ok); | |
1952 | |
1953 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
1954 __ sw(field_reg, Address(SP, 1 * kWordSize)); | |
1955 __ sw(value_reg, Address(SP, 0 * kWordSize)); | |
1956 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); | |
1957 __ Drop(2); // Drop the field and the value. | |
1958 } else { | |
1959 __ bne(CMPRES2, TMP, deopt); | |
1960 } | |
1961 | |
1962 __ Bind(&ok); | |
1963 } else { | |
1964 ASSERT(compiler->is_optimizing()); | |
1965 ASSERT(field().guarded_list_length() >= 0); | |
1966 ASSERT(field().guarded_list_length_in_object_offset() != | |
1967 Field::kUnknownLengthOffset); | |
1968 | |
1969 __ lw(CMPRES1, | |
1970 FieldAddress(value_reg, | |
1971 field().guarded_list_length_in_object_offset())); | |
1972 __ LoadImmediate(TMP, Smi::RawValue(field().guarded_list_length())); | |
1973 __ bne(CMPRES1, TMP, deopt); | |
1974 } | |
1975 } | |
1976 | |
1977 | |
1978 class BoxAllocationSlowPath : public SlowPathCode { | |
1979 public: | |
1980 BoxAllocationSlowPath(Instruction* instruction, | |
1981 const Class& cls, | |
1982 Register result) | |
1983 : instruction_(instruction), cls_(cls), result_(result) {} | |
1984 | |
1985 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
1986 if (Assembler::EmittingComments()) { | |
1987 __ Comment("%s slow path allocation of %s", instruction_->DebugName(), | |
1988 String::Handle(cls_.ScrubbedName()).ToCString()); | |
1989 } | |
1990 __ Bind(entry_label()); | |
1991 const Code& stub = Code::ZoneHandle( | |
1992 compiler->zone(), StubCode::GetAllocationStubForClass(cls_)); | |
1993 const StubEntry stub_entry(stub); | |
1994 | |
1995 LocationSummary* locs = instruction_->locs(); | |
1996 locs->live_registers()->Remove(Location::RegisterLocation(result_)); | |
1997 | |
1998 compiler->SaveLiveRegisters(locs); | |
1999 compiler->GenerateCall(TokenPosition::kNoSource, // No token position. | |
2000 stub_entry, RawPcDescriptors::kOther, locs); | |
2001 compiler->AddStubCallTarget(stub); | |
2002 if (result_ != V0) { | |
2003 __ mov(result_, V0); | |
2004 } | |
2005 compiler->RestoreLiveRegisters(locs); | |
2006 __ b(exit_label()); | |
2007 } | |
2008 | |
2009 static void Allocate(FlowGraphCompiler* compiler, | |
2010 Instruction* instruction, | |
2011 const Class& cls, | |
2012 Register result, | |
2013 Register temp) { | |
2014 if (compiler->intrinsic_mode()) { | |
2015 __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), result, temp); | |
2016 } else { | |
2017 BoxAllocationSlowPath* slow_path = | |
2018 new BoxAllocationSlowPath(instruction, cls, result); | |
2019 compiler->AddSlowPathCode(slow_path); | |
2020 | |
2021 __ TryAllocate(cls, slow_path->entry_label(), result, temp); | |
2022 __ Bind(slow_path->exit_label()); | |
2023 } | |
2024 } | |
2025 | |
2026 private: | |
2027 Instruction* instruction_; | |
2028 const Class& cls_; | |
2029 const Register result_; | |
2030 }; | |
2031 | |
2032 | |
2033 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone, | |
2034 bool opt) const { | |
2035 const intptr_t kNumInputs = 2; | |
2036 const intptr_t kNumTemps = | |
2037 (IsUnboxedStore() && opt) ? 2 : ((IsPotentialUnboxedStore()) ? 3 : 0); | |
2038 LocationSummary* summary = new (zone) | |
2039 LocationSummary(zone, kNumInputs, kNumTemps, | |
2040 ((IsUnboxedStore() && opt && is_initialization()) || | |
2041 IsPotentialUnboxedStore()) | |
2042 ? LocationSummary::kCallOnSlowPath | |
2043 : LocationSummary::kNoCall); | |
2044 | |
2045 summary->set_in(0, Location::RequiresRegister()); | |
2046 if (IsUnboxedStore() && opt) { | |
2047 summary->set_in(1, Location::RequiresFpuRegister()); | |
2048 summary->set_temp(0, Location::RequiresRegister()); | |
2049 summary->set_temp(1, Location::RequiresRegister()); | |
2050 } else if (IsPotentialUnboxedStore()) { | |
2051 summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister() | |
2052 : Location::RequiresRegister()); | |
2053 summary->set_temp(0, Location::RequiresRegister()); | |
2054 summary->set_temp(1, Location::RequiresRegister()); | |
2055 summary->set_temp(2, opt ? Location::RequiresFpuRegister() | |
2056 : Location::FpuRegisterLocation(D1)); | |
2057 } else { | |
2058 summary->set_in(1, ShouldEmitStoreBarrier() | |
2059 ? Location::WritableRegister() | |
2060 : Location::RegisterOrConstant(value())); | |
2061 } | |
2062 return summary; | |
2063 } | |
2064 | |
2065 | |
2066 static void EnsureMutableBox(FlowGraphCompiler* compiler, | |
2067 StoreInstanceFieldInstr* instruction, | |
2068 Register box_reg, | |
2069 const Class& cls, | |
2070 Register instance_reg, | |
2071 intptr_t offset, | |
2072 Register temp) { | |
2073 Label done; | |
2074 __ lw(box_reg, FieldAddress(instance_reg, offset)); | |
2075 __ BranchNotEqual(box_reg, Object::null_object(), &done); | |
2076 BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp); | |
2077 __ mov(temp, box_reg); | |
2078 __ StoreIntoObjectOffset(instance_reg, offset, temp); | |
2079 __ Bind(&done); | |
2080 } | |
2081 | |
2082 | |
2083 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2084 ASSERT(sizeof(classid_t) == kInt16Size); | |
2085 Label skip_store; | |
2086 | |
2087 Register instance_reg = locs()->in(0).reg(); | |
2088 | |
2089 if (IsUnboxedStore() && compiler->is_optimizing()) { | |
2090 DRegister value = locs()->in(1).fpu_reg(); | |
2091 Register temp = locs()->temp(0).reg(); | |
2092 Register temp2 = locs()->temp(1).reg(); | |
2093 const intptr_t cid = field().UnboxedFieldCid(); | |
2094 | |
2095 if (is_initialization()) { | |
2096 const Class* cls = NULL; | |
2097 switch (cid) { | |
2098 case kDoubleCid: | |
2099 cls = &compiler->double_class(); | |
2100 break; | |
2101 default: | |
2102 UNREACHABLE(); | |
2103 } | |
2104 | |
2105 BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2); | |
2106 __ mov(temp2, temp); | |
2107 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2); | |
2108 } else { | |
2109 __ lw(temp, FieldAddress(instance_reg, offset_in_bytes_)); | |
2110 } | |
2111 switch (cid) { | |
2112 case kDoubleCid: | |
2113 __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag); | |
2114 break; | |
2115 default: | |
2116 UNREACHABLE(); | |
2117 } | |
2118 return; | |
2119 } | |
2120 | |
2121 if (IsPotentialUnboxedStore()) { | |
2122 Register value_reg = locs()->in(1).reg(); | |
2123 Register temp = locs()->temp(0).reg(); | |
2124 Register temp2 = locs()->temp(1).reg(); | |
2125 DRegister fpu_temp = locs()->temp(2).fpu_reg(); | |
2126 | |
2127 if (ShouldEmitStoreBarrier()) { | |
2128 // Value input is a writable register and should be manually preserved | |
2129 // across allocation slow-path. | |
2130 locs()->live_registers()->Add(locs()->in(1), kTagged); | |
2131 } | |
2132 | |
2133 Label store_pointer; | |
2134 Label store_double; | |
2135 | |
2136 __ LoadObject(temp, Field::ZoneHandle(Z, field().Original())); | |
2137 | |
2138 __ lhu(temp2, FieldAddress(temp, Field::is_nullable_offset())); | |
2139 __ BranchEqual(temp2, Immediate(kNullCid), &store_pointer); | |
2140 | |
2141 __ lbu(temp2, FieldAddress(temp, Field::kind_bits_offset())); | |
2142 __ andi(CMPRES1, temp2, Immediate(1 << Field::kUnboxingCandidateBit)); | |
2143 __ beq(CMPRES1, ZR, &store_pointer); | |
2144 | |
2145 __ lhu(temp2, FieldAddress(temp, Field::guarded_cid_offset())); | |
2146 __ BranchEqual(temp2, Immediate(kDoubleCid), &store_double); | |
2147 | |
2148 // Fall through. | |
2149 __ b(&store_pointer); | |
2150 | |
2151 if (!compiler->is_optimizing()) { | |
2152 locs()->live_registers()->Add(locs()->in(0)); | |
2153 locs()->live_registers()->Add(locs()->in(1)); | |
2154 } | |
2155 | |
2156 { | |
2157 __ Bind(&store_double); | |
2158 EnsureMutableBox(compiler, this, temp, compiler->double_class(), | |
2159 instance_reg, offset_in_bytes_, temp2); | |
2160 __ LoadDFromOffset(fpu_temp, value_reg, | |
2161 Double::value_offset() - kHeapObjectTag); | |
2162 __ StoreDToOffset(fpu_temp, temp, | |
2163 Double::value_offset() - kHeapObjectTag); | |
2164 __ b(&skip_store); | |
2165 } | |
2166 | |
2167 __ Bind(&store_pointer); | |
2168 } | |
2169 | |
2170 if (ShouldEmitStoreBarrier()) { | |
2171 Register value_reg = locs()->in(1).reg(); | |
2172 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, value_reg, | |
2173 CanValueBeSmi()); | |
2174 } else { | |
2175 if (locs()->in(1).IsConstant()) { | |
2176 __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes_, | |
2177 locs()->in(1).constant()); | |
2178 } else { | |
2179 Register value_reg = locs()->in(1).reg(); | |
2180 __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes_, | |
2181 value_reg); | |
2182 } | |
2183 } | |
2184 __ Bind(&skip_store); | |
2185 } | |
2186 | |
2187 | |
2188 LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone, | |
2189 bool opt) const { | |
2190 const intptr_t kNumInputs = 1; | |
2191 const intptr_t kNumTemps = 0; | |
2192 LocationSummary* summary = new (zone) | |
2193 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
2194 summary->set_in(0, Location::RequiresRegister()); | |
2195 summary->set_out(0, Location::RequiresRegister()); | |
2196 return summary; | |
2197 } | |
2198 | |
2199 | |
2200 // When the parser is building an implicit static getter for optimization, | |
2201 // it can generate a function body where deoptimization ids do not line up | |
2202 // with the unoptimized code. | |
2203 // | |
2204 // This is safe only so long as LoadStaticFieldInstr cannot deoptimize. | |
2205 void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2206 __ Comment("LoadStaticFieldInstr"); | |
2207 Register field = locs()->in(0).reg(); | |
2208 Register result = locs()->out(0).reg(); | |
2209 __ LoadFromOffset(result, field, | |
2210 Field::static_value_offset() - kHeapObjectTag); | |
2211 } | |
2212 | |
2213 | |
2214 LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone, | |
2215 bool opt) const { | |
2216 LocationSummary* locs = | |
2217 new (zone) LocationSummary(zone, 1, 1, LocationSummary::kNoCall); | |
2218 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister() | |
2219 : Location::RequiresRegister()); | |
2220 locs->set_temp(0, Location::RequiresRegister()); | |
2221 return locs; | |
2222 } | |
2223 | |
2224 | |
2225 void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2226 __ Comment("StoreStaticFieldInstr"); | |
2227 Register value = locs()->in(0).reg(); | |
2228 Register temp = locs()->temp(0).reg(); | |
2229 | |
2230 __ LoadObject(temp, Field::ZoneHandle(Z, field().Original())); | |
2231 if (this->value()->NeedsStoreBuffer()) { | |
2232 __ StoreIntoObject(temp, FieldAddress(temp, Field::static_value_offset()), | |
2233 value, CanValueBeSmi()); | |
2234 } else { | |
2235 __ StoreIntoObjectNoBarrier( | |
2236 temp, FieldAddress(temp, Field::static_value_offset()), value); | |
2237 } | |
2238 } | |
2239 | |
2240 | |
2241 LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone, | |
2242 bool opt) const { | |
2243 const intptr_t kNumInputs = 3; | |
2244 const intptr_t kNumTemps = 0; | |
2245 LocationSummary* summary = new (zone) | |
2246 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
2247 summary->set_in(0, Location::RegisterLocation(A0)); // Instance. | |
2248 summary->set_in(1, Location::RegisterLocation(A1)); // Instant. type args. | |
2249 summary->set_in(2, Location::RegisterLocation(A2)); // Function type args. | |
2250 summary->set_out(0, Location::RegisterLocation(V0)); | |
2251 return summary; | |
2252 } | |
2253 | |
2254 | |
2255 void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2256 ASSERT(locs()->in(0).reg() == A0); // Value. | |
2257 ASSERT(locs()->in(1).reg() == A1); // Instantiator type arguments. | |
2258 ASSERT(locs()->in(2).reg() == A2); // Function type arguments. | |
2259 | |
2260 __ Comment("InstanceOfInstr"); | |
2261 compiler->GenerateInstanceOf(token_pos(), deopt_id(), type(), locs()); | |
2262 ASSERT(locs()->out(0).reg() == V0); | |
2263 } | |
2264 | |
2265 | |
2266 LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone, | |
2267 bool opt) const { | |
2268 const intptr_t kNumInputs = 2; | |
2269 const intptr_t kNumTemps = 0; | |
2270 LocationSummary* locs = new (zone) | |
2271 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
2272 locs->set_in(0, Location::RegisterLocation(A0)); | |
2273 locs->set_in(1, Location::RegisterLocation(A1)); | |
2274 locs->set_out(0, Location::RegisterLocation(V0)); | |
2275 return locs; | |
2276 } | |
2277 | |
2278 | |
2279 // Inlines array allocation for known constant values. | |
2280 static void InlineArrayAllocation(FlowGraphCompiler* compiler, | |
2281 intptr_t num_elements, | |
2282 Label* slow_path, | |
2283 Label* done) { | |
2284 const int kInlineArraySize = 12; // Same as kInlineInstanceSize. | |
2285 const Register kLengthReg = A1; | |
2286 const Register kElemTypeReg = A0; | |
2287 const intptr_t instance_size = Array::InstanceSize(num_elements); | |
2288 | |
2289 __ TryAllocateArray(kArrayCid, instance_size, slow_path, | |
2290 V0, // instance | |
2291 T1, // end address | |
2292 T2, T3); | |
2293 // V0: new object start as a tagged pointer. | |
2294 // T1: new object end address. | |
2295 | |
2296 // Store the type argument field. | |
2297 __ StoreIntoObjectNoBarrier( | |
2298 V0, FieldAddress(V0, Array::type_arguments_offset()), kElemTypeReg); | |
2299 | |
2300 // Set the length field. | |
2301 __ StoreIntoObjectNoBarrier(V0, FieldAddress(V0, Array::length_offset()), | |
2302 kLengthReg); | |
2303 | |
2304 // Initialize all array elements to raw_null. | |
2305 // V0: new object start as a tagged pointer. | |
2306 // T1: new object end address. | |
2307 // T2: iterator which initially points to the start of the variable | |
2308 // data area to be initialized. | |
2309 // T7: null. | |
2310 if (num_elements > 0) { | |
2311 const intptr_t array_size = instance_size - sizeof(RawArray); | |
2312 __ LoadObject(T7, Object::null_object()); | |
2313 __ AddImmediate(T2, V0, sizeof(RawArray) - kHeapObjectTag); | |
2314 if (array_size < (kInlineArraySize * kWordSize)) { | |
2315 intptr_t current_offset = 0; | |
2316 while (current_offset < array_size) { | |
2317 __ sw(T7, Address(T2, current_offset)); | |
2318 current_offset += kWordSize; | |
2319 } | |
2320 } else { | |
2321 Label init_loop; | |
2322 __ Bind(&init_loop); | |
2323 __ sw(T7, Address(T2, 0)); | |
2324 __ addiu(T2, T2, Immediate(kWordSize)); | |
2325 __ BranchUnsignedLess(T2, T1, &init_loop); | |
2326 } | |
2327 } | |
2328 __ b(done); | |
2329 } | |
2330 | |
2331 | |
2332 void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2333 __ Comment("CreateArrayInstr"); | |
2334 const Register kLengthReg = A1; | |
2335 const Register kElemTypeReg = A0; | |
2336 const Register kResultReg = V0; | |
2337 ASSERT(locs()->in(0).reg() == kElemTypeReg); | |
2338 ASSERT(locs()->in(1).reg() == kLengthReg); | |
2339 | |
2340 Label slow_path, done; | |
2341 if (compiler->is_optimizing() && !FLAG_precompiled_mode && | |
2342 num_elements()->BindsToConstant() && | |
2343 num_elements()->BoundConstant().IsSmi()) { | |
2344 const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value(); | |
2345 if ((length >= 0) && (length <= Array::kMaxElements)) { | |
2346 Label slow_path, done; | |
2347 InlineArrayAllocation(compiler, length, &slow_path, &done); | |
2348 __ Bind(&slow_path); | |
2349 __ PushObject(Object::null_object()); // Make room for the result. | |
2350 __ Push(kLengthReg); // length. | |
2351 __ Push(kElemTypeReg); | |
2352 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
2353 kAllocateArrayRuntimeEntry, 2, locs()); | |
2354 __ Drop(2); | |
2355 __ Pop(kResultReg); | |
2356 __ Bind(&done); | |
2357 return; | |
2358 } | |
2359 } | |
2360 | |
2361 __ Bind(&slow_path); | |
2362 const Code& stub = Code::ZoneHandle(compiler->zone(), | |
2363 StubCode::AllocateArray_entry()->code()); | |
2364 compiler->AddStubCallTarget(stub); | |
2365 compiler->GenerateCallWithDeopt(token_pos(), deopt_id(), | |
2366 *StubCode::AllocateArray_entry(), | |
2367 RawPcDescriptors::kOther, locs()); | |
2368 __ Bind(&done); | |
2369 ASSERT(locs()->out(0).reg() == kResultReg); | |
2370 } | |
2371 | |
2372 | |
2373 LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone, | |
2374 bool opt) const { | |
2375 const intptr_t kNumInputs = 1; | |
2376 const intptr_t kNumTemps = | |
2377 (IsUnboxedLoad() && opt) ? 1 : ((IsPotentialUnboxedLoad()) ? 2 : 0); | |
2378 LocationSummary* locs = new (zone) LocationSummary( | |
2379 zone, kNumInputs, kNumTemps, (opt && !IsPotentialUnboxedLoad()) | |
2380 ? LocationSummary::kNoCall | |
2381 : LocationSummary::kCallOnSlowPath); | |
2382 | |
2383 locs->set_in(0, Location::RequiresRegister()); | |
2384 | |
2385 if (IsUnboxedLoad() && opt) { | |
2386 locs->set_temp(0, Location::RequiresRegister()); | |
2387 } else if (IsPotentialUnboxedLoad()) { | |
2388 locs->set_temp(0, opt ? Location::RequiresFpuRegister() | |
2389 : Location::FpuRegisterLocation(D1)); | |
2390 locs->set_temp(1, Location::RequiresRegister()); | |
2391 } | |
2392 locs->set_out(0, Location::RequiresRegister()); | |
2393 return locs; | |
2394 } | |
2395 | |
2396 | |
2397 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2398 ASSERT(sizeof(classid_t) == kInt16Size); | |
2399 | |
2400 Register instance_reg = locs()->in(0).reg(); | |
2401 if (IsUnboxedLoad() && compiler->is_optimizing()) { | |
2402 DRegister result = locs()->out(0).fpu_reg(); | |
2403 Register temp = locs()->temp(0).reg(); | |
2404 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes()); | |
2405 intptr_t cid = field()->UnboxedFieldCid(); | |
2406 switch (cid) { | |
2407 case kDoubleCid: | |
2408 __ LoadDFromOffset(result, temp, | |
2409 Double::value_offset() - kHeapObjectTag); | |
2410 break; | |
2411 default: | |
2412 UNREACHABLE(); | |
2413 } | |
2414 return; | |
2415 } | |
2416 | |
2417 Label done; | |
2418 Register result_reg = locs()->out(0).reg(); | |
2419 if (IsPotentialUnboxedLoad()) { | |
2420 Register temp = locs()->temp(1).reg(); | |
2421 DRegister value = locs()->temp(0).fpu_reg(); | |
2422 | |
2423 Label load_pointer; | |
2424 Label load_double; | |
2425 | |
2426 __ LoadObject(result_reg, Field::ZoneHandle(field()->Original())); | |
2427 | |
2428 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); | |
2429 FieldAddress field_nullability_operand(result_reg, | |
2430 Field::is_nullable_offset()); | |
2431 | |
2432 __ lhu(temp, field_nullability_operand); | |
2433 __ BranchEqual(temp, Immediate(kNullCid), &load_pointer); | |
2434 | |
2435 __ lhu(temp, field_cid_operand); | |
2436 __ BranchEqual(temp, Immediate(kDoubleCid), &load_double); | |
2437 | |
2438 // Fall through. | |
2439 __ b(&load_pointer); | |
2440 | |
2441 if (!compiler->is_optimizing()) { | |
2442 locs()->live_registers()->Add(locs()->in(0)); | |
2443 } | |
2444 | |
2445 { | |
2446 __ Bind(&load_double); | |
2447 BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(), | |
2448 result_reg, temp); | |
2449 __ lw(temp, FieldAddress(instance_reg, offset_in_bytes())); | |
2450 __ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag); | |
2451 __ StoreDToOffset(value, result_reg, | |
2452 Double::value_offset() - kHeapObjectTag); | |
2453 __ b(&done); | |
2454 } | |
2455 | |
2456 __ Bind(&load_pointer); | |
2457 } | |
2458 __ LoadFieldFromOffset(result_reg, instance_reg, offset_in_bytes()); | |
2459 __ Bind(&done); | |
2460 } | |
2461 | |
2462 | |
2463 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone, | |
2464 bool opt) const { | |
2465 const intptr_t kNumInputs = 2; | |
2466 const intptr_t kNumTemps = 0; | |
2467 LocationSummary* locs = new (zone) | |
2468 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
2469 locs->set_in(0, Location::RegisterLocation(T0)); // Instant. type args. | |
2470 locs->set_in(1, Location::RegisterLocation(T1)); // Function type args. | |
2471 locs->set_out(0, Location::RegisterLocation(T0)); | |
2472 return locs; | |
2473 } | |
2474 | |
2475 | |
2476 void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2477 __ Comment("InstantiateTypeInstr"); | |
2478 Register instantiator_type_args_reg = locs()->in(0).reg(); | |
2479 Register function_type_args_reg = locs()->in(1).reg(); | |
2480 Register result_reg = locs()->out(0).reg(); | |
2481 | |
2482 // 'instantiator_type_args_reg' is a TypeArguments object (or null). | |
2483 // 'function_type_args_reg' is a TypeArguments object (or null). | |
2484 // A runtime call to instantiate the type is required. | |
2485 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
2486 __ LoadObject(TMP, Object::null_object()); | |
2487 __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. | |
2488 __ LoadObject(TMP, type()); | |
2489 __ sw(TMP, Address(SP, 2 * kWordSize)); | |
2490 __ sw(instantiator_type_args_reg, Address(SP, 1 * kWordSize)); | |
2491 __ sw(function_type_args_reg, Address(SP, 0 * kWordSize)); | |
2492 | |
2493 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
2494 kInstantiateTypeRuntimeEntry, 3, locs()); | |
2495 // Pop instantiated type. | |
2496 __ lw(result_reg, Address(SP, 3 * kWordSize)); | |
2497 | |
2498 // Drop instantiator and uninstantiated type. | |
2499 __ addiu(SP, SP, Immediate(4 * kWordSize)); | |
2500 } | |
2501 | |
2502 | |
2503 LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary( | |
2504 Zone* zone, | |
2505 bool opt) const { | |
2506 const intptr_t kNumInputs = 2; | |
2507 const intptr_t kNumTemps = 0; | |
2508 LocationSummary* locs = new (zone) | |
2509 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
2510 locs->set_in(0, Location::RegisterLocation(T0)); // Instant. type args. | |
2511 locs->set_in(1, Location::RegisterLocation(T1)); // Function type args. | |
2512 locs->set_out(0, Location::RegisterLocation(T0)); | |
2513 return locs; | |
2514 } | |
2515 | |
2516 | |
2517 void InstantiateTypeArgumentsInstr::EmitNativeCode( | |
2518 FlowGraphCompiler* compiler) { | |
2519 __ Comment("InstantiateTypeArgumentsInstr"); | |
2520 Register instantiator_type_args_reg = locs()->in(0).reg(); | |
2521 Register function_type_args_reg = locs()->in(1).reg(); | |
2522 Register result_reg = locs()->out(0).reg(); | |
2523 ASSERT(instantiator_type_args_reg == T0); | |
2524 ASSERT(instantiator_type_args_reg == result_reg); | |
2525 | |
2526 // 'instantiator_type_args_reg' is a TypeArguments object (or null). | |
2527 // 'function_type_args_reg' is a TypeArguments object (or null). | |
2528 ASSERT(!type_arguments().IsUninstantiatedIdentity() && | |
2529 !type_arguments().CanShareInstantiatorTypeArguments( | |
2530 instantiator_class())); | |
2531 // If both the instantiator and function type arguments are null and if the | |
2532 // type argument vector instantiated from null becomes a vector of dynamic, | |
2533 // then use null as the type arguments. | |
2534 Label type_arguments_instantiated; | |
2535 const intptr_t len = type_arguments().Length(); | |
2536 if (type_arguments().IsRawWhenInstantiatedFromRaw(len)) { | |
2537 Label non_null_type_args; | |
2538 __ BranchNotEqual(instantiator_type_args_reg, Object::null_object(), | |
2539 &non_null_type_args); | |
2540 __ BranchEqual(function_type_args_reg, Object::null_object(), | |
2541 &type_arguments_instantiated); | |
2542 __ Bind(&non_null_type_args); | |
2543 } | |
2544 | |
2545 // Lookup cache before calling runtime. | |
2546 // TODO(regis): Consider moving this into a shared stub to reduce | |
2547 // generated code size. | |
2548 __ LoadObject(T2, type_arguments()); | |
2549 __ lw(T2, FieldAddress(T2, TypeArguments::instantiations_offset())); | |
2550 __ AddImmediate(T2, Array::data_offset() - kHeapObjectTag); | |
2551 // The instantiations cache is initialized with Object::zero_array() and is | |
2552 // therefore guaranteed to contain kNoInstantiator. No length check needed. | |
2553 Label loop, next, found, slow_case; | |
2554 __ Bind(&loop); | |
2555 __ lw(T3, Address(T2, 0 * kWordSize)); // Cached instantiator type args. | |
2556 __ bne(T3, T0, &next); | |
2557 __ lw(T4, Address(T2, 1 * kWordSize)); // Cached function type args. | |
2558 __ beq(T4, T1, &found); | |
2559 __ Bind(&next); | |
2560 __ BranchNotEqual(T3, Immediate(Smi::RawValue(StubCode::kNoInstantiator)), | |
2561 &loop); | |
2562 __ delay_slot()->addiu( | |
2563 T2, T2, Immediate(StubCode::kInstantiationSizeInWords * kWordSize)); | |
2564 __ b(&slow_case); | |
2565 __ Bind(&found); | |
2566 __ lw(T0, Address(T2, 2 * kWordSize)); // Cached instantiated args. | |
2567 __ b(&type_arguments_instantiated); | |
2568 | |
2569 __ Bind(&slow_case); | |
2570 // Instantiate non-null type arguments. | |
2571 // A runtime call to instantiate the type arguments is required. | |
2572 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
2573 __ LoadObject(TMP, Object::null_object()); | |
2574 __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. | |
2575 __ LoadObject(TMP, type_arguments()); | |
2576 __ sw(TMP, Address(SP, 2 * kWordSize)); | |
2577 __ sw(instantiator_type_args_reg, Address(SP, 1 * kWordSize)); | |
2578 __ sw(function_type_args_reg, Address(SP, 0 * kWordSize)); | |
2579 | |
2580 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
2581 kInstantiateTypeArgumentsRuntimeEntry, 3, | |
2582 locs()); | |
2583 // Pop instantiated type arguments. | |
2584 __ lw(result_reg, Address(SP, 3 * kWordSize)); | |
2585 // Drop 2 type argument vectors and uninstantiated type arguments. | |
2586 __ addiu(SP, SP, Immediate(4 * kWordSize)); | |
2587 __ Bind(&type_arguments_instantiated); | |
2588 } | |
2589 | |
2590 | |
2591 LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary( | |
2592 Zone* zone, | |
2593 bool opt) const { | |
2594 ASSERT(opt); | |
2595 const intptr_t kNumInputs = 0; | |
2596 const intptr_t kNumTemps = 3; | |
2597 LocationSummary* locs = new (zone) LocationSummary( | |
2598 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
2599 locs->set_temp(0, Location::RegisterLocation(T1)); | |
2600 locs->set_temp(1, Location::RegisterLocation(T2)); | |
2601 locs->set_temp(2, Location::RegisterLocation(T3)); | |
2602 locs->set_out(0, Location::RegisterLocation(V0)); | |
2603 return locs; | |
2604 } | |
2605 | |
2606 | |
2607 class AllocateContextSlowPath : public SlowPathCode { | |
2608 public: | |
2609 explicit AllocateContextSlowPath( | |
2610 AllocateUninitializedContextInstr* instruction) | |
2611 : instruction_(instruction) {} | |
2612 | |
2613 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
2614 __ Comment("AllocateContextSlowPath"); | |
2615 __ Bind(entry_label()); | |
2616 | |
2617 LocationSummary* locs = instruction_->locs(); | |
2618 locs->live_registers()->Remove(locs->out(0)); | |
2619 | |
2620 compiler->SaveLiveRegisters(locs); | |
2621 | |
2622 __ LoadImmediate(T1, instruction_->num_context_variables()); | |
2623 const Code& stub = Code::ZoneHandle( | |
2624 compiler->zone(), StubCode::AllocateContext_entry()->code()); | |
2625 compiler->AddStubCallTarget(stub); | |
2626 compiler->GenerateCall(instruction_->token_pos(), | |
2627 *StubCode::AllocateContext_entry(), | |
2628 RawPcDescriptors::kOther, locs); | |
2629 ASSERT(instruction_->locs()->out(0).reg() == V0); | |
2630 compiler->RestoreLiveRegisters(instruction_->locs()); | |
2631 __ b(exit_label()); | |
2632 } | |
2633 | |
2634 private: | |
2635 AllocateUninitializedContextInstr* instruction_; | |
2636 }; | |
2637 | |
2638 | |
2639 void AllocateUninitializedContextInstr::EmitNativeCode( | |
2640 FlowGraphCompiler* compiler) { | |
2641 Register temp0 = locs()->temp(0).reg(); | |
2642 Register temp1 = locs()->temp(1).reg(); | |
2643 Register temp2 = locs()->temp(2).reg(); | |
2644 Register result = locs()->out(0).reg(); | |
2645 // Try allocate the object. | |
2646 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this); | |
2647 compiler->AddSlowPathCode(slow_path); | |
2648 intptr_t instance_size = Context::InstanceSize(num_context_variables()); | |
2649 | |
2650 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(), | |
2651 result, // instance | |
2652 temp0, temp1, temp2); | |
2653 | |
2654 // Setup up number of context variables field. | |
2655 __ LoadImmediate(temp0, num_context_variables()); | |
2656 __ sw(temp0, FieldAddress(result, Context::num_variables_offset())); | |
2657 | |
2658 __ Bind(slow_path->exit_label()); | |
2659 } | |
2660 | |
2661 | |
2662 LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone, | |
2663 bool opt) const { | |
2664 const intptr_t kNumInputs = 0; | |
2665 const intptr_t kNumTemps = 1; | |
2666 LocationSummary* locs = new (zone) | |
2667 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
2668 locs->set_temp(0, Location::RegisterLocation(T1)); | |
2669 locs->set_out(0, Location::RegisterLocation(V0)); | |
2670 return locs; | |
2671 } | |
2672 | |
2673 | |
2674 void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2675 ASSERT(locs()->temp(0).reg() == T1); | |
2676 ASSERT(locs()->out(0).reg() == V0); | |
2677 | |
2678 __ Comment("AllocateContextInstr"); | |
2679 __ LoadImmediate(T1, num_context_variables()); | |
2680 compiler->GenerateCall(token_pos(), *StubCode::AllocateContext_entry(), | |
2681 RawPcDescriptors::kOther, locs()); | |
2682 } | |
2683 | |
2684 | |
2685 LocationSummary* InitStaticFieldInstr::MakeLocationSummary(Zone* zone, | |
2686 bool opt) const { | |
2687 const intptr_t kNumInputs = 1; | |
2688 const intptr_t kNumTemps = 1; | |
2689 LocationSummary* locs = new (zone) | |
2690 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
2691 locs->set_in(0, Location::RegisterLocation(T0)); | |
2692 locs->set_temp(0, Location::RegisterLocation(T1)); | |
2693 return locs; | |
2694 } | |
2695 | |
2696 | |
2697 void InitStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2698 Register field = locs()->in(0).reg(); | |
2699 Register temp = locs()->temp(0).reg(); | |
2700 | |
2701 Label call_runtime, no_call; | |
2702 __ Comment("InitStaticFieldInstr"); | |
2703 | |
2704 __ lw(temp, FieldAddress(field, Field::static_value_offset())); | |
2705 __ BranchEqual(temp, Object::sentinel(), &call_runtime); | |
2706 __ BranchNotEqual(temp, Object::transition_sentinel(), &no_call); | |
2707 | |
2708 __ Bind(&call_runtime); | |
2709 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
2710 __ LoadObject(TMP, Object::null_object()); | |
2711 __ sw(TMP, Address(SP, 1 * kWordSize)); // Make room for (unused) result. | |
2712 __ sw(field, Address(SP, 0 * kWordSize)); | |
2713 | |
2714 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
2715 kInitStaticFieldRuntimeEntry, 1, locs()); | |
2716 | |
2717 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Purge argument and result. | |
2718 | |
2719 __ Bind(&no_call); | |
2720 } | |
2721 | |
2722 | |
2723 LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone, | |
2724 bool opt) const { | |
2725 const intptr_t kNumInputs = 1; | |
2726 const intptr_t kNumTemps = 0; | |
2727 LocationSummary* locs = new (zone) | |
2728 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
2729 locs->set_in(0, Location::RegisterLocation(T0)); | |
2730 locs->set_out(0, Location::RegisterLocation(T0)); | |
2731 return locs; | |
2732 } | |
2733 | |
2734 | |
2735 void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2736 Register context_value = locs()->in(0).reg(); | |
2737 Register result = locs()->out(0).reg(); | |
2738 | |
2739 __ Comment("CloneContextInstr"); | |
2740 | |
2741 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
2742 __ LoadObject(TMP, Object::null_object()); // Make room for the result. | |
2743 __ sw(TMP, Address(SP, 1 * kWordSize)); | |
2744 __ sw(context_value, Address(SP, 0 * kWordSize)); | |
2745 | |
2746 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
2747 kCloneContextRuntimeEntry, 1, locs()); | |
2748 __ lw(result, Address(SP, 1 * kWordSize)); // Get result (cloned context). | |
2749 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
2750 } | |
2751 | |
2752 | |
2753 LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone, | |
2754 bool opt) const { | |
2755 UNREACHABLE(); | |
2756 return NULL; | |
2757 } | |
2758 | |
2759 | |
2760 void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2761 __ Bind(compiler->GetJumpLabel(this)); | |
2762 compiler->AddExceptionHandler(catch_try_index(), try_index(), | |
2763 compiler->assembler()->CodeSize(), | |
2764 handler_token_pos(), is_generated(), | |
2765 catch_handler_types_, needs_stacktrace()); | |
2766 // On lazy deoptimization we patch the optimized code here to enter the | |
2767 // deoptimization stub. | |
2768 const intptr_t deopt_id = Thread::ToDeoptAfter(GetDeoptId()); | |
2769 if (compiler->is_optimizing()) { | |
2770 compiler->AddDeoptIndexAtCall(deopt_id); | |
2771 } else { | |
2772 compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id, | |
2773 TokenPosition::kNoSource); | |
2774 } | |
2775 if (HasParallelMove()) { | |
2776 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); | |
2777 } | |
2778 // Restore SP from FP as we are coming from a throw and the code for | |
2779 // popping arguments has not been run. | |
2780 const intptr_t fp_sp_dist = | |
2781 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; | |
2782 ASSERT(fp_sp_dist <= 0); | |
2783 __ AddImmediate(SP, FP, fp_sp_dist); | |
2784 | |
2785 // Auxiliary variables introduced by the try catch can be captured if we are | |
2786 // inside a function with yield/resume points. In this case we first need | |
2787 // to restore the context to match the context at entry into the closure. | |
2788 if (should_restore_closure_context()) { | |
2789 const ParsedFunction& parsed_function = compiler->parsed_function(); | |
2790 ASSERT(parsed_function.function().IsClosureFunction()); | |
2791 LocalScope* scope = parsed_function.node_sequence()->scope(); | |
2792 | |
2793 LocalVariable* closure_parameter = scope->VariableAt(0); | |
2794 ASSERT(!closure_parameter->is_captured()); | |
2795 __ LoadFromOffset(CTX, FP, closure_parameter->index() * kWordSize); | |
2796 __ LoadFieldFromOffset(CTX, CTX, Closure::context_offset()); | |
2797 | |
2798 const intptr_t context_index = | |
2799 parsed_function.current_context_var()->index(); | |
2800 __ StoreToOffset(CTX, FP, context_index * kWordSize); | |
2801 } | |
2802 | |
2803 // Initialize exception and stack trace variables. | |
2804 if (exception_var().is_captured()) { | |
2805 ASSERT(stacktrace_var().is_captured()); | |
2806 __ StoreIntoObjectOffset(CTX, | |
2807 Context::variable_offset(exception_var().index()), | |
2808 kExceptionObjectReg); | |
2809 __ StoreIntoObjectOffset(CTX, | |
2810 Context::variable_offset(stacktrace_var().index()), | |
2811 kStackTraceObjectReg); | |
2812 } else { | |
2813 // Restore stack and initialize the two exception variables: | |
2814 // exception and stack trace variables. | |
2815 __ StoreToOffset(kExceptionObjectReg, FP, | |
2816 exception_var().index() * kWordSize); | |
2817 __ StoreToOffset(kStackTraceObjectReg, FP, | |
2818 stacktrace_var().index() * kWordSize); | |
2819 } | |
2820 } | |
2821 | |
2822 | |
2823 LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone, | |
2824 bool opt) const { | |
2825 const intptr_t kNumInputs = 0; | |
2826 const intptr_t kNumTemps = 1; | |
2827 LocationSummary* summary = new (zone) LocationSummary( | |
2828 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
2829 summary->set_temp(0, Location::RequiresRegister()); | |
2830 return summary; | |
2831 } | |
2832 | |
2833 | |
2834 class CheckStackOverflowSlowPath : public SlowPathCode { | |
2835 public: | |
2836 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction) | |
2837 : instruction_(instruction) {} | |
2838 | |
2839 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
2840 if (compiler->isolate()->use_osr() && osr_entry_label()->IsLinked()) { | |
2841 Register value = instruction_->locs()->temp(0).reg(); | |
2842 __ Comment("CheckStackOverflowSlowPathOsr"); | |
2843 __ Bind(osr_entry_label()); | |
2844 __ LoadImmediate(value, Thread::kOsrRequest); | |
2845 __ sw(value, Address(THR, Thread::stack_overflow_flags_offset())); | |
2846 } | |
2847 __ Comment("CheckStackOverflowSlowPath"); | |
2848 __ Bind(entry_label()); | |
2849 compiler->SaveLiveRegisters(instruction_->locs()); | |
2850 // pending_deoptimization_env_ is needed to generate a runtime call that | |
2851 // may throw an exception. | |
2852 ASSERT(compiler->pending_deoptimization_env_ == NULL); | |
2853 Environment* env = compiler->SlowPathEnvironmentFor(instruction_); | |
2854 compiler->pending_deoptimization_env_ = env; | |
2855 compiler->GenerateRuntimeCall( | |
2856 instruction_->token_pos(), instruction_->deopt_id(), | |
2857 kStackOverflowRuntimeEntry, 0, instruction_->locs()); | |
2858 | |
2859 if (compiler->isolate()->use_osr() && !compiler->is_optimizing() && | |
2860 instruction_->in_loop()) { | |
2861 // In unoptimized code, record loop stack checks as possible OSR entries. | |
2862 compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry, | |
2863 instruction_->deopt_id(), | |
2864 TokenPosition::kNoSource); | |
2865 } | |
2866 compiler->pending_deoptimization_env_ = NULL; | |
2867 compiler->RestoreLiveRegisters(instruction_->locs()); | |
2868 __ b(exit_label()); | |
2869 } | |
2870 | |
2871 Label* osr_entry_label() { | |
2872 ASSERT(Isolate::Current()->use_osr()); | |
2873 return &osr_entry_label_; | |
2874 } | |
2875 | |
2876 private: | |
2877 CheckStackOverflowInstr* instruction_; | |
2878 Label osr_entry_label_; | |
2879 }; | |
2880 | |
2881 | |
2882 void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
2883 __ Comment("CheckStackOverflowInstr"); | |
2884 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); | |
2885 compiler->AddSlowPathCode(slow_path); | |
2886 | |
2887 __ lw(CMPRES1, Address(THR, Thread::stack_limit_offset())); | |
2888 __ BranchUnsignedLessEqual(SP, CMPRES1, slow_path->entry_label()); | |
2889 if (compiler->CanOSRFunction() && in_loop()) { | |
2890 Register temp = locs()->temp(0).reg(); | |
2891 // In unoptimized code check the usage counter to trigger OSR at loop | |
2892 // stack checks. Use progressively higher thresholds for more deeply | |
2893 // nested loops to attempt to hit outer loops with OSR when possible. | |
2894 __ LoadObject(temp, compiler->parsed_function().function()); | |
2895 intptr_t threshold = | |
2896 FLAG_optimization_counter_threshold * (loop_depth() + 1); | |
2897 __ lw(temp, FieldAddress(temp, Function::usage_counter_offset())); | |
2898 __ BranchSignedGreaterEqual(temp, Immediate(threshold), | |
2899 slow_path->osr_entry_label()); | |
2900 } | |
2901 if (compiler->ForceSlowPathForStackOverflow()) { | |
2902 __ b(slow_path->entry_label()); | |
2903 } | |
2904 __ Bind(slow_path->exit_label()); | |
2905 } | |
2906 | |
2907 | |
2908 static void EmitSmiShiftLeft(FlowGraphCompiler* compiler, | |
2909 BinarySmiOpInstr* shift_left) { | |
2910 const LocationSummary& locs = *shift_left->locs(); | |
2911 Register left = locs.in(0).reg(); | |
2912 Register result = locs.out(0).reg(); | |
2913 Label* deopt = shift_left->CanDeoptimize() | |
2914 ? compiler->AddDeoptStub(shift_left->deopt_id(), | |
2915 ICData::kDeoptBinarySmiOp) | |
2916 : NULL; | |
2917 | |
2918 __ Comment("EmitSmiShiftLeft"); | |
2919 | |
2920 if (locs.in(1).IsConstant()) { | |
2921 const Object& constant = locs.in(1).constant(); | |
2922 ASSERT(constant.IsSmi()); | |
2923 // Immediate shift operation takes 5 bits for the count. | |
2924 const intptr_t kCountLimit = 0x1F; | |
2925 const intptr_t value = Smi::Cast(constant).Value(); | |
2926 ASSERT((0 < value) && (value < kCountLimit)); | |
2927 if (shift_left->can_overflow()) { | |
2928 // Check for overflow (preserve left). | |
2929 __ sll(TMP, left, value); | |
2930 __ sra(CMPRES1, TMP, value); | |
2931 __ bne(CMPRES1, left, deopt); // Overflow. | |
2932 } | |
2933 // Shift for result now we know there is no overflow. | |
2934 __ sll(result, left, value); | |
2935 return; | |
2936 } | |
2937 | |
2938 // Right (locs.in(1)) is not constant. | |
2939 Register right = locs.in(1).reg(); | |
2940 Range* right_range = shift_left->right_range(); | |
2941 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) { | |
2942 // TODO(srdjan): Implement code below for is_truncating(). | |
2943 // If left is constant, we know the maximal allowed size for right. | |
2944 const Object& obj = shift_left->left()->BoundConstant(); | |
2945 if (obj.IsSmi()) { | |
2946 const intptr_t left_int = Smi::Cast(obj).Value(); | |
2947 if (left_int == 0) { | |
2948 __ bltz(right, deopt); | |
2949 __ mov(result, ZR); | |
2950 return; | |
2951 } | |
2952 const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int); | |
2953 const bool right_needs_check = | |
2954 !RangeUtils::IsWithin(right_range, 0, max_right - 1); | |
2955 if (right_needs_check) { | |
2956 const Immediate& max_right_imm = | |
2957 Immediate(reinterpret_cast<int32_t>(Smi::New(max_right))); | |
2958 __ BranchUnsignedGreaterEqual(right, max_right_imm, deopt); | |
2959 } | |
2960 __ SmiUntag(TMP, right); | |
2961 __ sllv(result, left, TMP); | |
2962 } | |
2963 return; | |
2964 } | |
2965 | |
2966 const bool right_needs_check = | |
2967 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1)); | |
2968 if (!shift_left->can_overflow()) { | |
2969 if (right_needs_check) { | |
2970 if (!RangeUtils::IsPositive(right_range)) { | |
2971 ASSERT(shift_left->CanDeoptimize()); | |
2972 __ bltz(right, deopt); | |
2973 } | |
2974 Label done, is_not_zero; | |
2975 | |
2976 __ sltiu(CMPRES1, right, | |
2977 Immediate(reinterpret_cast<int32_t>(Smi::New(Smi::kBits)))); | |
2978 __ movz(result, ZR, CMPRES1); // result = right >= kBits ? 0 : result. | |
2979 __ sra(TMP, right, kSmiTagSize); | |
2980 __ sllv(TMP, left, TMP); | |
2981 // result = right < kBits ? left << right : result. | |
2982 __ movn(result, TMP, CMPRES1); | |
2983 } else { | |
2984 __ sra(TMP, right, kSmiTagSize); | |
2985 __ sllv(result, left, TMP); | |
2986 } | |
2987 } else { | |
2988 if (right_needs_check) { | |
2989 const Immediate& bits_imm = | |
2990 Immediate(reinterpret_cast<int32_t>(Smi::New(Smi::kBits))); | |
2991 ASSERT(shift_left->CanDeoptimize()); | |
2992 __ BranchUnsignedGreaterEqual(right, bits_imm, deopt); | |
2993 } | |
2994 // Left is not a constant. | |
2995 Register temp = locs.temp(0).reg(); | |
2996 // Check if count too large for handling it inlined. | |
2997 __ SmiUntag(temp, right); | |
2998 // Overflow test (preserve left, right, and temp); | |
2999 __ sllv(CMPRES1, left, temp); | |
3000 __ srav(CMPRES1, CMPRES1, temp); | |
3001 __ bne(CMPRES1, left, deopt); // Overflow. | |
3002 // Shift for result now we know there is no overflow. | |
3003 __ sllv(result, left, temp); | |
3004 } | |
3005 } | |
3006 | |
3007 | |
3008 class CheckedSmiSlowPath : public SlowPathCode { | |
3009 public: | |
3010 CheckedSmiSlowPath(CheckedSmiOpInstr* instruction, intptr_t try_index) | |
3011 : instruction_(instruction), try_index_(try_index) {} | |
3012 | |
3013 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
3014 if (Assembler::EmittingComments()) { | |
3015 __ Comment("slow path smi operation"); | |
3016 } | |
3017 __ Bind(entry_label()); | |
3018 LocationSummary* locs = instruction_->locs(); | |
3019 Register result = locs->out(0).reg(); | |
3020 locs->live_registers()->Remove(Location::RegisterLocation(result)); | |
3021 | |
3022 compiler->SaveLiveRegisters(locs); | |
3023 if (instruction_->env() != NULL) { | |
3024 Environment* env = compiler->SlowPathEnvironmentFor(instruction_); | |
3025 compiler->pending_deoptimization_env_ = env; | |
3026 } | |
3027 __ Push(locs->in(0).reg()); | |
3028 __ Push(locs->in(1).reg()); | |
3029 const String& selector = | |
3030 String::Handle(instruction_->call()->ic_data()->target_name()); | |
3031 const Array& argument_names = | |
3032 Array::Handle(instruction_->call()->ic_data()->arguments_descriptor()); | |
3033 compiler->EmitMegamorphicInstanceCall( | |
3034 selector, argument_names, instruction_->call()->ArgumentCount(), | |
3035 instruction_->call()->deopt_id(), instruction_->call()->token_pos(), | |
3036 locs, try_index_, | |
3037 /* slow_path_argument_count = */ 2); | |
3038 __ mov(result, V0); | |
3039 compiler->RestoreLiveRegisters(locs); | |
3040 __ b(exit_label()); | |
3041 compiler->pending_deoptimization_env_ = NULL; | |
3042 } | |
3043 | |
3044 private: | |
3045 CheckedSmiOpInstr* instruction_; | |
3046 intptr_t try_index_; | |
3047 }; | |
3048 | |
3049 | |
3050 LocationSummary* CheckedSmiOpInstr::MakeLocationSummary(Zone* zone, | |
3051 bool opt) const { | |
3052 const intptr_t kNumInputs = 2; | |
3053 const intptr_t kNumTemps = 0; | |
3054 LocationSummary* summary = new (zone) LocationSummary( | |
3055 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
3056 summary->set_in(0, Location::RequiresRegister()); | |
3057 summary->set_in(1, Location::RequiresRegister()); | |
3058 summary->set_out(0, Location::RequiresRegister()); | |
3059 return summary; | |
3060 } | |
3061 | |
3062 | |
3063 void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3064 CheckedSmiSlowPath* slow_path = | |
3065 new CheckedSmiSlowPath(this, compiler->CurrentTryIndex()); | |
3066 compiler->AddSlowPathCode(slow_path); | |
3067 // Test operands if necessary. | |
3068 Register left = locs()->in(0).reg(); | |
3069 Register right = locs()->in(1).reg(); | |
3070 Register result = locs()->out(0).reg(); | |
3071 intptr_t left_cid = this->left()->Type()->ToCid(); | |
3072 intptr_t right_cid = this->right()->Type()->ToCid(); | |
3073 bool combined_smi_check = false; | |
3074 if (this->left()->definition() == this->right()->definition()) { | |
3075 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); | |
3076 } else if (left_cid == kSmiCid) { | |
3077 __ andi(CMPRES1, right, Immediate(kSmiTagMask)); | |
3078 } else if (right_cid == kSmiCid) { | |
3079 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); | |
3080 } else { | |
3081 combined_smi_check = true; | |
3082 __ or_(result, left, right); | |
3083 __ andi(CMPRES1, result, Immediate(kSmiTagMask)); | |
3084 } | |
3085 __ bne(CMPRES1, ZR, slow_path->entry_label()); | |
3086 switch (op_kind()) { | |
3087 case Token::kADD: | |
3088 __ AdduDetectOverflow(result, left, right, CMPRES1); | |
3089 __ bltz(CMPRES1, slow_path->entry_label()); | |
3090 break; | |
3091 case Token::kSUB: | |
3092 __ SubuDetectOverflow(result, left, right, CMPRES1); | |
3093 __ bltz(CMPRES1, slow_path->entry_label()); | |
3094 break; | |
3095 case Token::kMUL: | |
3096 __ sra(TMP, left, kSmiTagSize); | |
3097 __ mult(TMP, right); | |
3098 __ mflo(result); | |
3099 __ mfhi(CMPRES2); | |
3100 __ sra(CMPRES1, result, 31); | |
3101 __ bne(CMPRES1, CMPRES2, slow_path->entry_label()); | |
3102 break; | |
3103 case Token::kBIT_OR: | |
3104 // Operation part of combined smi check. | |
3105 if (!combined_smi_check) { | |
3106 __ or_(result, left, right); | |
3107 } | |
3108 break; | |
3109 case Token::kBIT_AND: | |
3110 __ and_(result, left, right); | |
3111 break; | |
3112 case Token::kBIT_XOR: | |
3113 __ xor_(result, left, right); | |
3114 break; | |
3115 case Token::kSHL: | |
3116 ASSERT(result != left); | |
3117 ASSERT(result != right); | |
3118 __ BranchUnsignedGreater(right, Immediate(Smi::RawValue(Smi::kBits)), | |
3119 slow_path->entry_label()); | |
3120 // Check for overflow by shifting left and shifting back arithmetically. | |
3121 // If the result is different from the original, there was overflow. | |
3122 __ delay_slot()->SmiUntag(TMP, right); | |
3123 __ sllv(result, left, TMP); | |
3124 __ srav(CMPRES1, result, TMP); | |
3125 __ bne(CMPRES1, left, slow_path->entry_label()); | |
3126 break; | |
3127 case Token::kSHR: | |
3128 __ BranchUnsignedGreater(right, Immediate(Smi::RawValue(Smi::kBits)), | |
3129 slow_path->entry_label()); | |
3130 __ delay_slot()->SmiUntag(result, right); | |
3131 __ SmiUntag(TMP, left); | |
3132 __ srav(result, TMP, result); | |
3133 __ SmiTag(result); | |
3134 break; | |
3135 default: | |
3136 UNIMPLEMENTED(); | |
3137 } | |
3138 __ Bind(slow_path->exit_label()); | |
3139 } | |
3140 | |
3141 | |
3142 class CheckedSmiComparisonSlowPath : public SlowPathCode { | |
3143 public: | |
3144 CheckedSmiComparisonSlowPath(CheckedSmiComparisonInstr* instruction, | |
3145 intptr_t try_index, | |
3146 BranchLabels labels, | |
3147 bool merged) | |
3148 : instruction_(instruction), | |
3149 try_index_(try_index), | |
3150 labels_(labels), | |
3151 merged_(merged) {} | |
3152 | |
3153 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
3154 if (Assembler::EmittingComments()) { | |
3155 __ Comment("slow path smi operation"); | |
3156 } | |
3157 __ Bind(entry_label()); | |
3158 LocationSummary* locs = instruction_->locs(); | |
3159 Register result = merged_ ? locs->temp(0).reg() : locs->out(0).reg(); | |
3160 locs->live_registers()->Remove(Location::RegisterLocation(result)); | |
3161 | |
3162 compiler->SaveLiveRegisters(locs); | |
3163 if (instruction_->env() != NULL) { | |
3164 Environment* env = compiler->SlowPathEnvironmentFor(instruction_); | |
3165 compiler->pending_deoptimization_env_ = env; | |
3166 } | |
3167 __ Push(locs->in(0).reg()); | |
3168 __ Push(locs->in(1).reg()); | |
3169 String& selector = | |
3170 String::Handle(instruction_->call()->ic_data()->target_name()); | |
3171 Array& argument_names = | |
3172 Array::Handle(instruction_->call()->ic_data()->arguments_descriptor()); | |
3173 compiler->EmitMegamorphicInstanceCall( | |
3174 selector, argument_names, instruction_->call()->ArgumentCount(), | |
3175 instruction_->call()->deopt_id(), instruction_->call()->token_pos(), | |
3176 locs, try_index_, | |
3177 /* slow_path_argument_count = */ 2); | |
3178 __ mov(result, V0); | |
3179 compiler->RestoreLiveRegisters(locs); | |
3180 compiler->pending_deoptimization_env_ = NULL; | |
3181 if (merged_) { | |
3182 __ BranchEqual(result, Bool::True(), instruction_->is_negated() | |
3183 ? labels_.false_label | |
3184 : labels_.true_label); | |
3185 __ b(instruction_->is_negated() ? labels_.true_label | |
3186 : labels_.false_label); | |
3187 } else { | |
3188 __ b(exit_label()); | |
3189 } | |
3190 } | |
3191 | |
3192 private: | |
3193 CheckedSmiComparisonInstr* instruction_; | |
3194 intptr_t try_index_; | |
3195 BranchLabels labels_; | |
3196 bool merged_; | |
3197 }; | |
3198 | |
3199 | |
3200 LocationSummary* CheckedSmiComparisonInstr::MakeLocationSummary( | |
3201 Zone* zone, | |
3202 bool opt) const { | |
3203 const intptr_t kNumInputs = 2; | |
3204 const intptr_t kNumTemps = 1; | |
3205 LocationSummary* summary = new (zone) LocationSummary( | |
3206 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
3207 summary->set_in(0, Location::RequiresRegister()); | |
3208 summary->set_in(1, Location::RequiresRegister()); | |
3209 summary->set_temp(0, Location::RequiresRegister()); | |
3210 summary->set_out(0, Location::RequiresRegister()); | |
3211 return summary; | |
3212 } | |
3213 | |
3214 | |
3215 Condition CheckedSmiComparisonInstr::EmitComparisonCode( | |
3216 FlowGraphCompiler* compiler, | |
3217 BranchLabels labels) { | |
3218 return EmitSmiComparisonOp(compiler, *locs(), kind()); | |
3219 } | |
3220 | |
3221 | |
3222 #define EMIT_SMI_CHECK \ | |
3223 Register left = locs()->in(0).reg(); \ | |
3224 Register right = locs()->in(1).reg(); \ | |
3225 Register temp = locs()->temp(0).reg(); \ | |
3226 intptr_t left_cid = this->left()->Type()->ToCid(); \ | |
3227 intptr_t right_cid = this->right()->Type()->ToCid(); \ | |
3228 if (this->left()->definition() == this->right()->definition()) { \ | |
3229 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); \ | |
3230 } else if (left_cid == kSmiCid) { \ | |
3231 __ andi(CMPRES1, right, Immediate(kSmiTagMask)); \ | |
3232 } else if (right_cid == kSmiCid) { \ | |
3233 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); \ | |
3234 } else { \ | |
3235 __ or_(temp, left, right); \ | |
3236 __ andi(CMPRES1, temp, Immediate(kSmiTagMask)); \ | |
3237 } \ | |
3238 __ bne(CMPRES1, ZR, slow_path->entry_label()); | |
3239 | |
3240 | |
3241 void CheckedSmiComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
3242 BranchInstr* branch) { | |
3243 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
3244 CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath( | |
3245 this, compiler->CurrentTryIndex(), labels, | |
3246 /* merged = */ true); | |
3247 compiler->AddSlowPathCode(slow_path); | |
3248 EMIT_SMI_CHECK; | |
3249 Condition true_condition = EmitComparisonCode(compiler, labels); | |
3250 EmitBranchOnCondition(compiler, true_condition, labels); | |
3251 __ Bind(slow_path->exit_label()); | |
3252 } | |
3253 | |
3254 | |
3255 void CheckedSmiComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3256 Label true_label, false_label, done; | |
3257 BranchLabels labels = {&true_label, &false_label, &false_label}; | |
3258 CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath( | |
3259 this, compiler->CurrentTryIndex(), labels, | |
3260 /* merged = */ false); | |
3261 compiler->AddSlowPathCode(slow_path); | |
3262 EMIT_SMI_CHECK; | |
3263 Condition true_condition = EmitComparisonCode(compiler, labels); | |
3264 EmitBranchOnCondition(compiler, true_condition, labels); | |
3265 Register result = locs()->out(0).reg(); | |
3266 __ Bind(&false_label); | |
3267 __ LoadObject(result, Bool::False()); | |
3268 __ b(&done); | |
3269 __ Bind(&true_label); | |
3270 __ LoadObject(result, Bool::True()); | |
3271 __ Bind(&done); | |
3272 __ Bind(slow_path->exit_label()); | |
3273 } | |
3274 | |
3275 | |
3276 LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone, | |
3277 bool opt) const { | |
3278 const intptr_t kNumInputs = 2; | |
3279 const intptr_t kNumTemps = | |
3280 ((op_kind() == Token::kADD) || (op_kind() == Token::kMOD) || | |
3281 (op_kind() == Token::kTRUNCDIV) || | |
3282 (((op_kind() == Token::kSHL) && can_overflow()) || | |
3283 (op_kind() == Token::kSHR))) | |
3284 ? 1 | |
3285 : 0; | |
3286 LocationSummary* summary = new (zone) | |
3287 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
3288 if (op_kind() == Token::kTRUNCDIV) { | |
3289 summary->set_in(0, Location::RequiresRegister()); | |
3290 if (RightIsPowerOfTwoConstant()) { | |
3291 ConstantInstr* right_constant = right()->definition()->AsConstant(); | |
3292 summary->set_in(1, Location::Constant(right_constant)); | |
3293 } else { | |
3294 summary->set_in(1, Location::RequiresRegister()); | |
3295 } | |
3296 summary->set_temp(0, Location::RequiresRegister()); | |
3297 summary->set_out(0, Location::RequiresRegister()); | |
3298 return summary; | |
3299 } | |
3300 if (op_kind() == Token::kMOD) { | |
3301 summary->set_in(0, Location::RequiresRegister()); | |
3302 summary->set_in(1, Location::RequiresRegister()); | |
3303 summary->set_temp(0, Location::RequiresRegister()); | |
3304 summary->set_out(0, Location::RequiresRegister()); | |
3305 return summary; | |
3306 } | |
3307 summary->set_in(0, Location::RequiresRegister()); | |
3308 summary->set_in(1, Location::RegisterOrSmiConstant(right())); | |
3309 if (((op_kind() == Token::kSHL) && can_overflow()) || | |
3310 (op_kind() == Token::kSHR)) { | |
3311 summary->set_temp(0, Location::RequiresRegister()); | |
3312 } else if (op_kind() == Token::kADD) { | |
3313 // Need an extra temp for the overflow detection code. | |
3314 summary->set_temp(0, Location::RequiresRegister()); | |
3315 } | |
3316 // We make use of 3-operand instructions by not requiring result register | |
3317 // to be identical to first input register as on Intel. | |
3318 summary->set_out(0, Location::RequiresRegister()); | |
3319 return summary; | |
3320 } | |
3321 | |
3322 | |
3323 void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3324 __ Comment("BinarySmiOpInstr"); | |
3325 if (op_kind() == Token::kSHL) { | |
3326 EmitSmiShiftLeft(compiler, this); | |
3327 return; | |
3328 } | |
3329 | |
3330 Register left = locs()->in(0).reg(); | |
3331 Register result = locs()->out(0).reg(); | |
3332 Label* deopt = NULL; | |
3333 if (CanDeoptimize()) { | |
3334 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); | |
3335 } | |
3336 | |
3337 if (locs()->in(1).IsConstant()) { | |
3338 const Object& constant = locs()->in(1).constant(); | |
3339 ASSERT(constant.IsSmi()); | |
3340 const int32_t imm = reinterpret_cast<int32_t>(constant.raw()); | |
3341 switch (op_kind()) { | |
3342 case Token::kADD: { | |
3343 if (deopt == NULL) { | |
3344 __ AddImmediate(result, left, imm); | |
3345 } else { | |
3346 Register temp = locs()->temp(0).reg(); | |
3347 __ AddImmediateDetectOverflow(result, left, imm, CMPRES1, temp); | |
3348 __ bltz(CMPRES1, deopt); | |
3349 } | |
3350 break; | |
3351 } | |
3352 case Token::kSUB: { | |
3353 __ Comment("kSUB imm"); | |
3354 if (deopt == NULL) { | |
3355 __ AddImmediate(result, left, -imm); | |
3356 } else { | |
3357 __ SubImmediateDetectOverflow(result, left, imm, CMPRES1); | |
3358 __ bltz(CMPRES1, deopt); | |
3359 } | |
3360 break; | |
3361 } | |
3362 case Token::kMUL: { | |
3363 // Keep left value tagged and untag right value. | |
3364 const intptr_t value = Smi::Cast(constant).Value(); | |
3365 __ LoadImmediate(TMP, value); | |
3366 __ mult(left, TMP); | |
3367 __ mflo(result); | |
3368 if (deopt != NULL) { | |
3369 __ mfhi(CMPRES2); | |
3370 __ sra(CMPRES1, result, 31); | |
3371 __ bne(CMPRES1, CMPRES2, deopt); | |
3372 } | |
3373 break; | |
3374 } | |
3375 case Token::kTRUNCDIV: { | |
3376 const intptr_t value = Smi::Cast(constant).Value(); | |
3377 ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value))); | |
3378 const intptr_t shift_count = | |
3379 Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize; | |
3380 ASSERT(kSmiTagSize == 1); | |
3381 __ sra(TMP, left, 31); | |
3382 ASSERT(shift_count > 1); // 1, -1 case handled above. | |
3383 Register temp = locs()->temp(0).reg(); | |
3384 __ srl(TMP, TMP, 32 - shift_count); | |
3385 __ addu(temp, left, TMP); | |
3386 ASSERT(shift_count > 0); | |
3387 __ sra(result, temp, shift_count); | |
3388 if (value < 0) { | |
3389 __ subu(result, ZR, result); | |
3390 } | |
3391 __ SmiTag(result); | |
3392 break; | |
3393 } | |
3394 case Token::kBIT_AND: { | |
3395 // No overflow check. | |
3396 __ AndImmediate(result, left, imm); | |
3397 break; | |
3398 } | |
3399 case Token::kBIT_OR: { | |
3400 // No overflow check. | |
3401 __ OrImmediate(result, left, imm); | |
3402 break; | |
3403 } | |
3404 case Token::kBIT_XOR: { | |
3405 // No overflow check. | |
3406 __ XorImmediate(result, left, imm); | |
3407 break; | |
3408 } | |
3409 case Token::kSHR: { | |
3410 // sarl operation masks the count to 5 bits. | |
3411 const intptr_t kCountLimit = 0x1F; | |
3412 const intptr_t value = Smi::Cast(constant).Value(); | |
3413 __ Comment("kSHR"); | |
3414 __ sra(result, left, Utils::Minimum(value + kSmiTagSize, kCountLimit)); | |
3415 __ SmiTag(result); | |
3416 break; | |
3417 } | |
3418 | |
3419 default: | |
3420 UNREACHABLE(); | |
3421 break; | |
3422 } | |
3423 return; | |
3424 } | |
3425 | |
3426 Register right = locs()->in(1).reg(); | |
3427 switch (op_kind()) { | |
3428 case Token::kADD: { | |
3429 if (deopt == NULL) { | |
3430 __ addu(result, left, right); | |
3431 } else { | |
3432 Register temp = locs()->temp(0).reg(); | |
3433 __ AdduDetectOverflow(result, left, right, CMPRES1, temp); | |
3434 __ bltz(CMPRES1, deopt); | |
3435 } | |
3436 break; | |
3437 } | |
3438 case Token::kSUB: { | |
3439 __ Comment("kSUB"); | |
3440 if (deopt == NULL) { | |
3441 __ subu(result, left, right); | |
3442 } else { | |
3443 __ SubuDetectOverflow(result, left, right, CMPRES1); | |
3444 __ bltz(CMPRES1, deopt); | |
3445 } | |
3446 break; | |
3447 } | |
3448 case Token::kMUL: { | |
3449 __ Comment("kMUL"); | |
3450 __ sra(TMP, left, kSmiTagSize); | |
3451 __ mult(TMP, right); | |
3452 __ mflo(result); | |
3453 if (deopt != NULL) { | |
3454 __ mfhi(CMPRES2); | |
3455 __ sra(CMPRES1, result, 31); | |
3456 __ bne(CMPRES1, CMPRES2, deopt); | |
3457 } | |
3458 break; | |
3459 } | |
3460 case Token::kBIT_AND: { | |
3461 // No overflow check. | |
3462 __ and_(result, left, right); | |
3463 break; | |
3464 } | |
3465 case Token::kBIT_OR: { | |
3466 // No overflow check. | |
3467 __ or_(result, left, right); | |
3468 break; | |
3469 } | |
3470 case Token::kBIT_XOR: { | |
3471 // No overflow check. | |
3472 __ xor_(result, left, right); | |
3473 break; | |
3474 } | |
3475 case Token::kTRUNCDIV: { | |
3476 if (RangeUtils::CanBeZero(right_range())) { | |
3477 // Handle divide by zero in runtime. | |
3478 __ beq(right, ZR, deopt); | |
3479 } | |
3480 Register temp = locs()->temp(0).reg(); | |
3481 __ SmiUntag(temp, left); | |
3482 __ SmiUntag(TMP, right); | |
3483 __ div(temp, TMP); | |
3484 __ mflo(result); | |
3485 // Check the corner case of dividing the 'MIN_SMI' with -1, in which | |
3486 // case we cannot tag the result. | |
3487 __ BranchEqual(result, Immediate(0x40000000), deopt); | |
3488 __ SmiTag(result); | |
3489 break; | |
3490 } | |
3491 case Token::kMOD: { | |
3492 if (RangeUtils::CanBeZero(right_range())) { | |
3493 // Handle divide by zero in runtime. | |
3494 __ beq(right, ZR, deopt); | |
3495 } | |
3496 Register temp = locs()->temp(0).reg(); | |
3497 __ SmiUntag(temp, left); | |
3498 __ SmiUntag(TMP, right); | |
3499 __ div(temp, TMP); | |
3500 __ mfhi(result); | |
3501 // res = left % right; | |
3502 // if (res < 0) { | |
3503 // if (right < 0) { | |
3504 // res = res - right; | |
3505 // } else { | |
3506 // res = res + right; | |
3507 // } | |
3508 // } | |
3509 Label done; | |
3510 __ bgez(result, &done); | |
3511 if (RangeUtils::Overlaps(right_range(), -1, 1)) { | |
3512 Label subtract; | |
3513 __ bltz(right, &subtract); | |
3514 __ addu(result, result, TMP); | |
3515 __ b(&done); | |
3516 __ Bind(&subtract); | |
3517 __ subu(result, result, TMP); | |
3518 } else if (right_range()->IsPositive()) { | |
3519 // Right is positive. | |
3520 __ addu(result, result, TMP); | |
3521 } else { | |
3522 // Right is negative. | |
3523 __ subu(result, result, TMP); | |
3524 } | |
3525 __ Bind(&done); | |
3526 __ SmiTag(result); | |
3527 break; | |
3528 } | |
3529 case Token::kSHR: { | |
3530 Register temp = locs()->temp(0).reg(); | |
3531 if (CanDeoptimize()) { | |
3532 __ bltz(right, deopt); | |
3533 } | |
3534 __ SmiUntag(temp, right); | |
3535 // sra operation masks the count to 5 bits. | |
3536 const intptr_t kCountLimit = 0x1F; | |
3537 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) { | |
3538 Label ok; | |
3539 __ BranchSignedLessEqual(temp, Immediate(kCountLimit), &ok); | |
3540 __ LoadImmediate(temp, kCountLimit); | |
3541 __ Bind(&ok); | |
3542 } | |
3543 | |
3544 __ SmiUntag(CMPRES1, left); | |
3545 __ srav(result, CMPRES1, temp); | |
3546 __ SmiTag(result); | |
3547 break; | |
3548 } | |
3549 case Token::kDIV: { | |
3550 // Dispatches to 'Double./'. | |
3551 // TODO(srdjan): Implement as conversion to double and double division. | |
3552 UNREACHABLE(); | |
3553 break; | |
3554 } | |
3555 case Token::kOR: | |
3556 case Token::kAND: { | |
3557 // Flow graph builder has dissected this operation to guarantee correct | |
3558 // behavior (short-circuit evaluation). | |
3559 UNREACHABLE(); | |
3560 break; | |
3561 } | |
3562 default: | |
3563 UNREACHABLE(); | |
3564 break; | |
3565 } | |
3566 } | |
3567 | |
3568 | |
3569 LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone, | |
3570 bool opt) const { | |
3571 intptr_t left_cid = left()->Type()->ToCid(); | |
3572 intptr_t right_cid = right()->Type()->ToCid(); | |
3573 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid)); | |
3574 const intptr_t kNumInputs = 2; | |
3575 const intptr_t kNumTemps = 0; | |
3576 LocationSummary* summary = new (zone) | |
3577 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
3578 summary->set_in(0, Location::RequiresRegister()); | |
3579 summary->set_in(1, Location::RequiresRegister()); | |
3580 return summary; | |
3581 } | |
3582 | |
3583 | |
3584 void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3585 Label* deopt = | |
3586 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp, | |
3587 licm_hoisted_ ? ICData::kHoisted : 0); | |
3588 intptr_t left_cid = left()->Type()->ToCid(); | |
3589 intptr_t right_cid = right()->Type()->ToCid(); | |
3590 Register left = locs()->in(0).reg(); | |
3591 Register right = locs()->in(1).reg(); | |
3592 if (this->left()->definition() == this->right()->definition()) { | |
3593 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); | |
3594 } else if (left_cid == kSmiCid) { | |
3595 __ andi(CMPRES1, right, Immediate(kSmiTagMask)); | |
3596 } else if (right_cid == kSmiCid) { | |
3597 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); | |
3598 } else { | |
3599 __ or_(TMP, left, right); | |
3600 __ andi(CMPRES1, TMP, Immediate(kSmiTagMask)); | |
3601 } | |
3602 __ beq(CMPRES1, ZR, deopt); | |
3603 } | |
3604 | |
3605 | |
3606 LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
3607 const intptr_t kNumInputs = 1; | |
3608 const intptr_t kNumTemps = 1; | |
3609 LocationSummary* summary = new (zone) LocationSummary( | |
3610 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
3611 summary->set_in(0, Location::RequiresFpuRegister()); | |
3612 summary->set_temp(0, Location::RequiresRegister()); | |
3613 summary->set_out(0, Location::RequiresRegister()); | |
3614 return summary; | |
3615 } | |
3616 | |
3617 | |
3618 void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3619 ASSERT(from_representation() == kUnboxedDouble); | |
3620 | |
3621 Register out_reg = locs()->out(0).reg(); | |
3622 DRegister value = locs()->in(0).fpu_reg(); | |
3623 | |
3624 BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(), | |
3625 out_reg, locs()->temp(0).reg()); | |
3626 __ StoreDToOffset(value, out_reg, Double::value_offset() - kHeapObjectTag); | |
3627 } | |
3628 | |
3629 | |
3630 LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
3631 const intptr_t kNumInputs = 1; | |
3632 const intptr_t kNumTemps = 0; | |
3633 LocationSummary* summary = new (zone) | |
3634 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
3635 summary->set_in(0, Location::RequiresRegister()); | |
3636 if (representation() == kUnboxedMint) { | |
3637 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
3638 Location::RequiresRegister())); | |
3639 } else { | |
3640 summary->set_out(0, Location::RequiresFpuRegister()); | |
3641 } | |
3642 return summary; | |
3643 } | |
3644 | |
3645 | |
3646 void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) { | |
3647 const Register box = locs()->in(0).reg(); | |
3648 | |
3649 switch (representation()) { | |
3650 case kUnboxedMint: { | |
3651 PairLocation* result = locs()->out(0).AsPairLocation(); | |
3652 __ LoadFromOffset(result->At(0).reg(), box, | |
3653 ValueOffset() - kHeapObjectTag); | |
3654 __ LoadFromOffset(result->At(1).reg(), box, | |
3655 ValueOffset() - kHeapObjectTag + kWordSize); | |
3656 break; | |
3657 } | |
3658 | |
3659 case kUnboxedDouble: { | |
3660 const DRegister result = locs()->out(0).fpu_reg(); | |
3661 __ LoadDFromOffset(result, box, Double::value_offset() - kHeapObjectTag); | |
3662 break; | |
3663 } | |
3664 | |
3665 case kUnboxedFloat32x4: | |
3666 case kUnboxedFloat64x2: | |
3667 case kUnboxedInt32x4: { | |
3668 UNIMPLEMENTED(); | |
3669 break; | |
3670 } | |
3671 | |
3672 default: | |
3673 UNREACHABLE(); | |
3674 break; | |
3675 } | |
3676 } | |
3677 | |
3678 | |
3679 void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) { | |
3680 const Register box = locs()->in(0).reg(); | |
3681 | |
3682 switch (representation()) { | |
3683 case kUnboxedMint: { | |
3684 PairLocation* result = locs()->out(0).AsPairLocation(); | |
3685 __ SmiUntag(result->At(0).reg(), box); | |
3686 __ sra(result->At(1).reg(), result->At(0).reg(), 31); | |
3687 break; | |
3688 } | |
3689 | |
3690 case kUnboxedDouble: { | |
3691 const DRegister result = locs()->out(0).fpu_reg(); | |
3692 __ SmiUntag(TMP, box); | |
3693 __ mtc1(TMP, STMP1); | |
3694 __ cvtdw(result, STMP1); | |
3695 break; | |
3696 } | |
3697 | |
3698 default: | |
3699 UNREACHABLE(); | |
3700 break; | |
3701 } | |
3702 } | |
3703 | |
3704 | |
3705 void UnboxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3706 const intptr_t value_cid = value()->Type()->ToCid(); | |
3707 const intptr_t box_cid = BoxCid(); | |
3708 | |
3709 if (value_cid == box_cid) { | |
3710 EmitLoadFromBox(compiler); | |
3711 } else if (CanConvertSmi() && (value_cid == kSmiCid)) { | |
3712 EmitSmiConversion(compiler); | |
3713 } else { | |
3714 const Register box = locs()->in(0).reg(); | |
3715 Label* deopt = | |
3716 compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptCheckClass); | |
3717 Label is_smi; | |
3718 | |
3719 if ((value()->Type()->ToNullableCid() == box_cid) && | |
3720 value()->Type()->is_nullable()) { | |
3721 __ BranchEqual(box, Object::null_object(), deopt); | |
3722 } else { | |
3723 __ andi(CMPRES1, box, Immediate(kSmiTagMask)); | |
3724 __ beq(CMPRES1, ZR, CanConvertSmi() ? &is_smi : deopt); | |
3725 __ LoadClassId(CMPRES1, box); | |
3726 __ BranchNotEqual(CMPRES1, Immediate(box_cid), deopt); | |
3727 } | |
3728 | |
3729 EmitLoadFromBox(compiler); | |
3730 | |
3731 if (is_smi.IsLinked()) { | |
3732 Label done; | |
3733 __ b(&done); | |
3734 __ Bind(&is_smi); | |
3735 EmitSmiConversion(compiler); | |
3736 __ Bind(&done); | |
3737 } | |
3738 } | |
3739 } | |
3740 | |
3741 | |
3742 LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone, | |
3743 bool opt) const { | |
3744 ASSERT((from_representation() == kUnboxedInt32) || | |
3745 (from_representation() == kUnboxedUint32)); | |
3746 const intptr_t kNumInputs = 1; | |
3747 const intptr_t kNumTemps = 1; | |
3748 LocationSummary* summary = new (zone) LocationSummary( | |
3749 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
3750 summary->set_in(0, Location::RequiresRegister()); | |
3751 summary->set_temp(0, Location::RequiresRegister()); | |
3752 summary->set_out(0, Location::RequiresRegister()); | |
3753 return summary; | |
3754 } | |
3755 | |
3756 | |
3757 void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3758 Register value = locs()->in(0).reg(); | |
3759 Register out = locs()->out(0).reg(); | |
3760 ASSERT(value != out); | |
3761 | |
3762 __ SmiTag(out, value); | |
3763 if (!ValueFitsSmi()) { | |
3764 Register temp = locs()->temp(0).reg(); | |
3765 Label done; | |
3766 if (from_representation() == kUnboxedInt32) { | |
3767 __ SmiUntag(CMPRES1, out); | |
3768 __ BranchEqual(CMPRES1, value, &done); | |
3769 } else { | |
3770 ASSERT(from_representation() == kUnboxedUint32); | |
3771 __ AndImmediate(CMPRES1, value, 0xC0000000); | |
3772 __ BranchEqual(CMPRES1, ZR, &done); | |
3773 } | |
3774 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out, | |
3775 temp); | |
3776 Register hi; | |
3777 if (from_representation() == kUnboxedInt32) { | |
3778 hi = temp; | |
3779 __ sra(hi, value, kBitsPerWord - 1); | |
3780 } else { | |
3781 ASSERT(from_representation() == kUnboxedUint32); | |
3782 hi = ZR; | |
3783 } | |
3784 __ StoreToOffset(value, out, Mint::value_offset() - kHeapObjectTag); | |
3785 __ StoreToOffset(hi, out, | |
3786 Mint::value_offset() - kHeapObjectTag + kWordSize); | |
3787 __ Bind(&done); | |
3788 } | |
3789 } | |
3790 | |
3791 | |
3792 LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone, | |
3793 bool opt) const { | |
3794 const intptr_t kNumInputs = 1; | |
3795 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1; | |
3796 LocationSummary* summary = new (zone) | |
3797 LocationSummary(zone, kNumInputs, kNumTemps, | |
3798 ValueFitsSmi() ? LocationSummary::kNoCall | |
3799 : LocationSummary::kCallOnSlowPath); | |
3800 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
3801 Location::RequiresRegister())); | |
3802 if (!ValueFitsSmi()) { | |
3803 summary->set_temp(0, Location::RequiresRegister()); | |
3804 } | |
3805 summary->set_out(0, Location::RequiresRegister()); | |
3806 return summary; | |
3807 } | |
3808 | |
3809 | |
3810 void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3811 if (ValueFitsSmi()) { | |
3812 PairLocation* value_pair = locs()->in(0).AsPairLocation(); | |
3813 Register value_lo = value_pair->At(0).reg(); | |
3814 Register out_reg = locs()->out(0).reg(); | |
3815 __ SmiTag(out_reg, value_lo); | |
3816 return; | |
3817 } | |
3818 | |
3819 PairLocation* value_pair = locs()->in(0).AsPairLocation(); | |
3820 Register value_lo = value_pair->At(0).reg(); | |
3821 Register value_hi = value_pair->At(1).reg(); | |
3822 Register tmp = locs()->temp(0).reg(); | |
3823 Register out_reg = locs()->out(0).reg(); | |
3824 | |
3825 Label not_smi, done; | |
3826 __ SmiTag(out_reg, value_lo); | |
3827 __ SmiUntag(tmp, out_reg); | |
3828 __ bne(tmp, value_lo, ¬_smi); | |
3829 __ delay_slot()->sra(tmp, out_reg, 31); | |
3830 __ beq(tmp, value_hi, &done); | |
3831 | |
3832 __ Bind(¬_smi); | |
3833 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), | |
3834 out_reg, tmp); | |
3835 __ StoreToOffset(value_lo, out_reg, Mint::value_offset() - kHeapObjectTag); | |
3836 __ StoreToOffset(value_hi, out_reg, | |
3837 Mint::value_offset() - kHeapObjectTag + kWordSize); | |
3838 __ Bind(&done); | |
3839 } | |
3840 | |
3841 | |
3842 LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone, | |
3843 bool opt) const { | |
3844 ASSERT((representation() == kUnboxedInt32) || | |
3845 (representation() == kUnboxedUint32)); | |
3846 const intptr_t kNumInputs = 1; | |
3847 const intptr_t kNumTemps = 0; | |
3848 LocationSummary* summary = new (zone) | |
3849 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
3850 summary->set_in(0, Location::RequiresRegister()); | |
3851 summary->set_out(0, Location::RequiresRegister()); | |
3852 return summary; | |
3853 } | |
3854 | |
3855 | |
3856 static void LoadInt32FromMint(FlowGraphCompiler* compiler, | |
3857 Register mint, | |
3858 Register result, | |
3859 Label* deopt) { | |
3860 __ LoadFieldFromOffset(result, mint, Mint::value_offset()); | |
3861 if (deopt != NULL) { | |
3862 __ LoadFieldFromOffset(CMPRES1, mint, Mint::value_offset() + kWordSize); | |
3863 __ sra(CMPRES2, result, kBitsPerWord - 1); | |
3864 __ BranchNotEqual(CMPRES1, CMPRES2, deopt); | |
3865 } | |
3866 } | |
3867 | |
3868 | |
3869 void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3870 const intptr_t value_cid = value()->Type()->ToCid(); | |
3871 const Register value = locs()->in(0).reg(); | |
3872 const Register out = locs()->out(0).reg(); | |
3873 Label* deopt = | |
3874 CanDeoptimize() | |
3875 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger) | |
3876 : NULL; | |
3877 Label* out_of_range = !is_truncating() ? deopt : NULL; | |
3878 ASSERT(value != out); | |
3879 | |
3880 if (value_cid == kSmiCid) { | |
3881 __ SmiUntag(out, value); | |
3882 } else if (value_cid == kMintCid) { | |
3883 LoadInt32FromMint(compiler, value, out, out_of_range); | |
3884 } else if (!CanDeoptimize()) { | |
3885 Label done; | |
3886 __ SmiUntag(out, value); | |
3887 __ andi(CMPRES1, value, Immediate(kSmiTagMask)); | |
3888 __ beq(CMPRES1, ZR, &done); | |
3889 LoadInt32FromMint(compiler, value, out, NULL); | |
3890 __ Bind(&done); | |
3891 } else { | |
3892 Label done; | |
3893 __ SmiUntag(out, value); | |
3894 __ andi(CMPRES1, value, Immediate(kSmiTagMask)); | |
3895 __ beq(CMPRES1, ZR, &done); | |
3896 __ LoadClassId(CMPRES1, value); | |
3897 __ BranchNotEqual(CMPRES1, Immediate(kMintCid), deopt); | |
3898 LoadInt32FromMint(compiler, value, out, out_of_range); | |
3899 __ Bind(&done); | |
3900 } | |
3901 } | |
3902 | |
3903 | |
3904 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone, | |
3905 bool opt) const { | |
3906 const intptr_t kNumInputs = 2; | |
3907 const intptr_t kNumTemps = 0; | |
3908 LocationSummary* summary = new (zone) | |
3909 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
3910 summary->set_in(0, Location::RequiresFpuRegister()); | |
3911 summary->set_in(1, Location::RequiresFpuRegister()); | |
3912 summary->set_out(0, Location::RequiresFpuRegister()); | |
3913 return summary; | |
3914 } | |
3915 | |
3916 | |
3917 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
3918 DRegister left = locs()->in(0).fpu_reg(); | |
3919 DRegister right = locs()->in(1).fpu_reg(); | |
3920 DRegister result = locs()->out(0).fpu_reg(); | |
3921 switch (op_kind()) { | |
3922 case Token::kADD: | |
3923 __ addd(result, left, right); | |
3924 break; | |
3925 case Token::kSUB: | |
3926 __ subd(result, left, right); | |
3927 break; | |
3928 case Token::kMUL: | |
3929 __ muld(result, left, right); | |
3930 break; | |
3931 case Token::kDIV: | |
3932 __ divd(result, left, right); | |
3933 break; | |
3934 default: | |
3935 UNREACHABLE(); | |
3936 } | |
3937 } | |
3938 | |
3939 | |
3940 LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone, | |
3941 bool opt) const { | |
3942 const intptr_t kNumInputs = 1; | |
3943 const intptr_t kNumTemps = 0; | |
3944 LocationSummary* summary = new (zone) | |
3945 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
3946 summary->set_in(0, Location::RequiresFpuRegister()); | |
3947 summary->set_out(0, Location::RequiresRegister()); | |
3948 return summary; | |
3949 } | |
3950 | |
3951 | |
3952 Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
3953 BranchLabels labels) { | |
3954 const DRegister value = locs()->in(0).fpu_reg(); | |
3955 const bool is_negated = kind() != Token::kEQ; | |
3956 if (op_kind() == MethodRecognizer::kDouble_getIsNaN) { | |
3957 __ cund(value, value); | |
3958 if (labels.fall_through == labels.true_label) { | |
3959 if (is_negated) { | |
3960 __ bc1t(labels.false_label); | |
3961 } else { | |
3962 __ bc1f(labels.false_label); | |
3963 } | |
3964 } else if (labels.fall_through == labels.false_label) { | |
3965 if (is_negated) { | |
3966 __ bc1f(labels.true_label); | |
3967 } else { | |
3968 __ bc1t(labels.true_label); | |
3969 } | |
3970 } else { | |
3971 if (is_negated) { | |
3972 __ bc1t(labels.false_label); | |
3973 } else { | |
3974 __ bc1f(labels.false_label); | |
3975 } | |
3976 __ b(labels.true_label); | |
3977 } | |
3978 return Condition(); // Unused. | |
3979 } else { | |
3980 ASSERT(op_kind() == MethodRecognizer::kDouble_getIsInfinite); | |
3981 __ mfc1(CMPRES1, EvenFRegisterOf(value)); | |
3982 // If the low word isn't zero, then it isn't infinity. | |
3983 __ bne(CMPRES1, ZR, is_negated ? labels.true_label : labels.false_label); | |
3984 __ mfc1(CMPRES1, OddFRegisterOf(value)); | |
3985 // Mask off the sign bit. | |
3986 __ AndImmediate(CMPRES1, CMPRES1, 0x7FFFFFFF); | |
3987 // Compare with +infinity. | |
3988 __ LoadImmediate(CMPRES2, 0x7FF00000); | |
3989 return Condition(CMPRES1, CMPRES2, is_negated ? NE : EQ); | |
3990 } | |
3991 } | |
3992 | |
3993 void DoubleTestOpInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
3994 BranchInstr* branch) { | |
3995 ASSERT(compiler->is_optimizing()); | |
3996 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
3997 Condition true_condition = EmitComparisonCode(compiler, labels); | |
3998 // Branches for isNaN are emitted in EmitComparisonCode already. | |
3999 if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) { | |
4000 EmitBranchOnCondition(compiler, true_condition, labels); | |
4001 } | |
4002 } | |
4003 | |
4004 | |
4005 void DoubleTestOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4006 Label is_true, is_false; | |
4007 BranchLabels labels = {&is_true, &is_false, &is_false}; | |
4008 Condition true_condition = EmitComparisonCode(compiler, labels); | |
4009 // Branches for isNaN are emitted in EmitComparisonCode already. | |
4010 if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) { | |
4011 EmitBranchOnCondition(compiler, true_condition, labels); | |
4012 } | |
4013 const Register result = locs()->out(0).reg(); | |
4014 Label done; | |
4015 __ Comment("return bool"); | |
4016 __ Bind(&is_false); | |
4017 __ LoadObject(result, Bool::False()); | |
4018 __ b(&done); | |
4019 __ Bind(&is_true); | |
4020 __ LoadObject(result, Bool::True()); | |
4021 __ Bind(&done); | |
4022 } | |
4023 | |
4024 | |
4025 LocationSummary* BinaryFloat32x4OpInstr::MakeLocationSummary(Zone* zone, | |
4026 bool opt) const { | |
4027 UNIMPLEMENTED(); | |
4028 return NULL; | |
4029 } | |
4030 | |
4031 | |
4032 void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4033 UNIMPLEMENTED(); | |
4034 } | |
4035 | |
4036 | |
4037 LocationSummary* BinaryFloat64x2OpInstr::MakeLocationSummary(Zone* zone, | |
4038 bool opt) const { | |
4039 UNIMPLEMENTED(); | |
4040 return NULL; | |
4041 } | |
4042 | |
4043 | |
4044 void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4045 UNIMPLEMENTED(); | |
4046 } | |
4047 | |
4048 | |
4049 LocationSummary* Simd32x4ShuffleInstr::MakeLocationSummary(Zone* zone, | |
4050 bool opt) const { | |
4051 UNIMPLEMENTED(); | |
4052 return NULL; | |
4053 } | |
4054 | |
4055 | |
4056 void Simd32x4ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4057 UNIMPLEMENTED(); | |
4058 } | |
4059 | |
4060 | |
4061 LocationSummary* Simd32x4ShuffleMixInstr::MakeLocationSummary(Zone* zone, | |
4062 bool opt) const { | |
4063 UNIMPLEMENTED(); | |
4064 return NULL; | |
4065 } | |
4066 | |
4067 | |
4068 void Simd32x4ShuffleMixInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4069 UNIMPLEMENTED(); | |
4070 } | |
4071 | |
4072 | |
4073 LocationSummary* Float32x4ConstructorInstr::MakeLocationSummary( | |
4074 Zone* zone, | |
4075 bool opt) const { | |
4076 UNIMPLEMENTED(); | |
4077 return NULL; | |
4078 } | |
4079 | |
4080 | |
4081 void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4082 UNIMPLEMENTED(); | |
4083 } | |
4084 | |
4085 | |
4086 LocationSummary* Float32x4ZeroInstr::MakeLocationSummary(Zone* zone, | |
4087 bool opt) const { | |
4088 UNIMPLEMENTED(); | |
4089 return NULL; | |
4090 } | |
4091 | |
4092 | |
4093 void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4094 UNIMPLEMENTED(); | |
4095 } | |
4096 | |
4097 | |
4098 LocationSummary* Float32x4SplatInstr::MakeLocationSummary(Zone* zone, | |
4099 bool opt) const { | |
4100 UNIMPLEMENTED(); | |
4101 return NULL; | |
4102 } | |
4103 | |
4104 | |
4105 void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4106 UNIMPLEMENTED(); | |
4107 } | |
4108 | |
4109 | |
4110 LocationSummary* Float32x4ComparisonInstr::MakeLocationSummary(Zone* zone, | |
4111 bool opt) const { | |
4112 UNIMPLEMENTED(); | |
4113 return NULL; | |
4114 } | |
4115 | |
4116 | |
4117 void Float32x4ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4118 UNIMPLEMENTED(); | |
4119 } | |
4120 | |
4121 | |
4122 LocationSummary* Float32x4MinMaxInstr::MakeLocationSummary(Zone* zone, | |
4123 bool opt) const { | |
4124 UNIMPLEMENTED(); | |
4125 return NULL; | |
4126 } | |
4127 | |
4128 | |
4129 void Float32x4MinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4130 UNIMPLEMENTED(); | |
4131 } | |
4132 | |
4133 | |
4134 LocationSummary* Float32x4SqrtInstr::MakeLocationSummary(Zone* zone, | |
4135 bool opt) const { | |
4136 UNIMPLEMENTED(); | |
4137 return NULL; | |
4138 } | |
4139 | |
4140 | |
4141 void Float32x4SqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4142 UNIMPLEMENTED(); | |
4143 } | |
4144 | |
4145 | |
4146 LocationSummary* Float32x4ScaleInstr::MakeLocationSummary(Zone* zone, | |
4147 bool opt) const { | |
4148 UNIMPLEMENTED(); | |
4149 return NULL; | |
4150 } | |
4151 | |
4152 | |
4153 void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4154 UNIMPLEMENTED(); | |
4155 } | |
4156 | |
4157 | |
4158 LocationSummary* Float32x4ZeroArgInstr::MakeLocationSummary(Zone* zone, | |
4159 bool opt) const { | |
4160 UNIMPLEMENTED(); | |
4161 return NULL; | |
4162 } | |
4163 | |
4164 | |
4165 void Float32x4ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4166 UNIMPLEMENTED(); | |
4167 } | |
4168 | |
4169 | |
4170 LocationSummary* Float32x4ClampInstr::MakeLocationSummary(Zone* zone, | |
4171 bool opt) const { | |
4172 UNIMPLEMENTED(); | |
4173 return NULL; | |
4174 } | |
4175 | |
4176 | |
4177 void Float32x4ClampInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4178 UNIMPLEMENTED(); | |
4179 } | |
4180 | |
4181 | |
4182 LocationSummary* Float32x4WithInstr::MakeLocationSummary(Zone* zone, | |
4183 bool opt) const { | |
4184 UNIMPLEMENTED(); | |
4185 return NULL; | |
4186 } | |
4187 | |
4188 | |
4189 void Float32x4WithInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4190 UNIMPLEMENTED(); | |
4191 } | |
4192 | |
4193 | |
4194 LocationSummary* Float32x4ToInt32x4Instr::MakeLocationSummary(Zone* zone, | |
4195 bool opt) const { | |
4196 UNIMPLEMENTED(); | |
4197 return NULL; | |
4198 } | |
4199 | |
4200 | |
4201 void Float32x4ToInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4202 UNIMPLEMENTED(); | |
4203 } | |
4204 | |
4205 | |
4206 LocationSummary* Simd64x2ShuffleInstr::MakeLocationSummary(Zone* zone, | |
4207 bool opt) const { | |
4208 UNIMPLEMENTED(); | |
4209 return NULL; | |
4210 } | |
4211 | |
4212 | |
4213 void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4214 UNIMPLEMENTED(); | |
4215 } | |
4216 | |
4217 | |
4218 LocationSummary* Float64x2ZeroInstr::MakeLocationSummary(Zone* zone, | |
4219 bool opt) const { | |
4220 UNIMPLEMENTED(); | |
4221 return NULL; | |
4222 } | |
4223 | |
4224 | |
4225 void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4226 UNIMPLEMENTED(); | |
4227 } | |
4228 | |
4229 | |
4230 LocationSummary* Float64x2SplatInstr::MakeLocationSummary(Zone* zone, | |
4231 bool opt) const { | |
4232 UNIMPLEMENTED(); | |
4233 return NULL; | |
4234 } | |
4235 | |
4236 | |
4237 void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4238 UNIMPLEMENTED(); | |
4239 } | |
4240 | |
4241 | |
4242 LocationSummary* Float64x2ConstructorInstr::MakeLocationSummary( | |
4243 Zone* zone, | |
4244 bool opt) const { | |
4245 UNIMPLEMENTED(); | |
4246 return NULL; | |
4247 } | |
4248 | |
4249 | |
4250 void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4251 UNIMPLEMENTED(); | |
4252 } | |
4253 | |
4254 | |
4255 LocationSummary* Float64x2ToFloat32x4Instr::MakeLocationSummary( | |
4256 Zone* zone, | |
4257 bool opt) const { | |
4258 UNIMPLEMENTED(); | |
4259 return NULL; | |
4260 } | |
4261 | |
4262 | |
4263 void Float64x2ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4264 UNIMPLEMENTED(); | |
4265 } | |
4266 | |
4267 | |
4268 LocationSummary* Float32x4ToFloat64x2Instr::MakeLocationSummary( | |
4269 Zone* zone, | |
4270 bool opt) const { | |
4271 UNIMPLEMENTED(); | |
4272 return NULL; | |
4273 } | |
4274 | |
4275 | |
4276 void Float32x4ToFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4277 UNIMPLEMENTED(); | |
4278 } | |
4279 | |
4280 | |
4281 LocationSummary* Float64x2ZeroArgInstr::MakeLocationSummary(Zone* zone, | |
4282 bool opt) const { | |
4283 UNIMPLEMENTED(); | |
4284 return NULL; | |
4285 } | |
4286 | |
4287 | |
4288 void Float64x2ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4289 UNIMPLEMENTED(); | |
4290 } | |
4291 | |
4292 | |
4293 LocationSummary* Float64x2OneArgInstr::MakeLocationSummary(Zone* zone, | |
4294 bool opt) const { | |
4295 UNIMPLEMENTED(); | |
4296 return NULL; | |
4297 } | |
4298 | |
4299 | |
4300 void Float64x2OneArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4301 UNIMPLEMENTED(); | |
4302 } | |
4303 | |
4304 | |
4305 LocationSummary* Int32x4ConstructorInstr::MakeLocationSummary(Zone* zone, | |
4306 bool opt) const { | |
4307 UNIMPLEMENTED(); | |
4308 return NULL; | |
4309 } | |
4310 | |
4311 | |
4312 void Int32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4313 UNIMPLEMENTED(); | |
4314 } | |
4315 | |
4316 | |
4317 LocationSummary* Int32x4BoolConstructorInstr::MakeLocationSummary( | |
4318 Zone* zone, | |
4319 bool opt) const { | |
4320 UNIMPLEMENTED(); | |
4321 return NULL; | |
4322 } | |
4323 | |
4324 | |
4325 void Int32x4BoolConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4326 UNIMPLEMENTED(); | |
4327 } | |
4328 | |
4329 | |
4330 LocationSummary* Int32x4GetFlagInstr::MakeLocationSummary(Zone* zone, | |
4331 bool opt) const { | |
4332 UNIMPLEMENTED(); | |
4333 return NULL; | |
4334 } | |
4335 | |
4336 | |
4337 void Int32x4GetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4338 UNIMPLEMENTED(); | |
4339 } | |
4340 | |
4341 | |
4342 LocationSummary* Simd32x4GetSignMaskInstr::MakeLocationSummary(Zone* zone, | |
4343 bool opt) const { | |
4344 UNIMPLEMENTED(); | |
4345 return NULL; | |
4346 } | |
4347 | |
4348 | |
4349 void Simd32x4GetSignMaskInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4350 UNIMPLEMENTED(); | |
4351 } | |
4352 | |
4353 | |
4354 LocationSummary* Int32x4SelectInstr::MakeLocationSummary(Zone* zone, | |
4355 bool opt) const { | |
4356 UNIMPLEMENTED(); | |
4357 return NULL; | |
4358 } | |
4359 | |
4360 | |
4361 void Int32x4SelectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4362 UNIMPLEMENTED(); | |
4363 } | |
4364 | |
4365 | |
4366 LocationSummary* Int32x4SetFlagInstr::MakeLocationSummary(Zone* zone, | |
4367 bool opt) const { | |
4368 UNIMPLEMENTED(); | |
4369 return NULL; | |
4370 } | |
4371 | |
4372 | |
4373 void Int32x4SetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4374 UNIMPLEMENTED(); | |
4375 } | |
4376 | |
4377 | |
4378 LocationSummary* Int32x4ToFloat32x4Instr::MakeLocationSummary(Zone* zone, | |
4379 bool opt) const { | |
4380 UNIMPLEMENTED(); | |
4381 return NULL; | |
4382 } | |
4383 | |
4384 | |
4385 void Int32x4ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4386 UNIMPLEMENTED(); | |
4387 } | |
4388 | |
4389 | |
4390 LocationSummary* BinaryInt32x4OpInstr::MakeLocationSummary(Zone* zone, | |
4391 bool opt) const { | |
4392 UNIMPLEMENTED(); | |
4393 return NULL; | |
4394 } | |
4395 | |
4396 | |
4397 void BinaryInt32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4398 UNIMPLEMENTED(); | |
4399 } | |
4400 | |
4401 | |
4402 LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone, | |
4403 bool opt) const { | |
4404 ASSERT((kind() == MathUnaryInstr::kSqrt) || | |
4405 (kind() == MathUnaryInstr::kDoubleSquare)); | |
4406 const intptr_t kNumInputs = 1; | |
4407 const intptr_t kNumTemps = 0; | |
4408 LocationSummary* summary = new (zone) | |
4409 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4410 summary->set_in(0, Location::RequiresFpuRegister()); | |
4411 summary->set_out(0, Location::RequiresFpuRegister()); | |
4412 return summary; | |
4413 } | |
4414 | |
4415 | |
4416 void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4417 if (kind() == MathUnaryInstr::kSqrt) { | |
4418 __ sqrtd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg()); | |
4419 } else if (kind() == MathUnaryInstr::kDoubleSquare) { | |
4420 DRegister val = locs()->in(0).fpu_reg(); | |
4421 DRegister result = locs()->out(0).fpu_reg(); | |
4422 __ muld(result, val, val); | |
4423 } else { | |
4424 UNREACHABLE(); | |
4425 } | |
4426 } | |
4427 | |
4428 | |
4429 LocationSummary* CaseInsensitiveCompareUC16Instr::MakeLocationSummary( | |
4430 Zone* zone, | |
4431 bool opt) const { | |
4432 const intptr_t kNumTemps = 0; | |
4433 LocationSummary* summary = new (zone) | |
4434 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); | |
4435 summary->set_in(0, Location::RegisterLocation(A0)); | |
4436 summary->set_in(1, Location::RegisterLocation(A1)); | |
4437 summary->set_in(2, Location::RegisterLocation(A2)); | |
4438 summary->set_in(3, Location::RegisterLocation(A3)); | |
4439 summary->set_out(0, Location::RegisterLocation(V0)); | |
4440 return summary; | |
4441 } | |
4442 | |
4443 | |
4444 void CaseInsensitiveCompareUC16Instr::EmitNativeCode( | |
4445 FlowGraphCompiler* compiler) { | |
4446 // Call the function. | |
4447 __ CallRuntime(TargetFunction(), TargetFunction().argument_count()); | |
4448 } | |
4449 | |
4450 | |
4451 LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone, | |
4452 bool opt) const { | |
4453 if (result_cid() == kDoubleCid) { | |
4454 const intptr_t kNumInputs = 2; | |
4455 const intptr_t kNumTemps = 1; | |
4456 LocationSummary* summary = new (zone) | |
4457 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4458 summary->set_in(0, Location::RequiresFpuRegister()); | |
4459 summary->set_in(1, Location::RequiresFpuRegister()); | |
4460 // Reuse the left register so that code can be made shorter. | |
4461 summary->set_out(0, Location::SameAsFirstInput()); | |
4462 summary->set_temp(0, Location::RequiresRegister()); | |
4463 return summary; | |
4464 } | |
4465 ASSERT(result_cid() == kSmiCid); | |
4466 const intptr_t kNumInputs = 2; | |
4467 const intptr_t kNumTemps = 0; | |
4468 LocationSummary* summary = new (zone) | |
4469 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4470 summary->set_in(0, Location::RequiresRegister()); | |
4471 summary->set_in(1, Location::RequiresRegister()); | |
4472 // Reuse the left register so that code can be made shorter. | |
4473 summary->set_out(0, Location::SameAsFirstInput()); | |
4474 return summary; | |
4475 } | |
4476 | |
4477 | |
4478 void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4479 ASSERT((op_kind() == MethodRecognizer::kMathMin) || | |
4480 (op_kind() == MethodRecognizer::kMathMax)); | |
4481 const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin); | |
4482 if (result_cid() == kDoubleCid) { | |
4483 Label done, returns_nan, are_equal; | |
4484 DRegister left = locs()->in(0).fpu_reg(); | |
4485 DRegister right = locs()->in(1).fpu_reg(); | |
4486 DRegister result = locs()->out(0).fpu_reg(); | |
4487 Register temp = locs()->temp(0).reg(); | |
4488 __ cund(left, right); | |
4489 __ bc1t(&returns_nan); | |
4490 __ ceqd(left, right); | |
4491 __ bc1t(&are_equal); | |
4492 if (is_min) { | |
4493 __ coltd(left, right); | |
4494 } else { | |
4495 __ coltd(right, left); | |
4496 } | |
4497 // TODO(zra): Add conditional moves. | |
4498 ASSERT(left == result); | |
4499 __ bc1t(&done); | |
4500 __ movd(result, right); | |
4501 __ b(&done); | |
4502 | |
4503 __ Bind(&returns_nan); | |
4504 __ LoadImmediate(result, NAN); | |
4505 __ b(&done); | |
4506 | |
4507 __ Bind(&are_equal); | |
4508 Label left_is_negative; | |
4509 // Check for negative zero: -0.0 is equal 0.0 but min or max must return | |
4510 // -0.0 or 0.0 respectively. | |
4511 // Check for negative left value (get the sign bit): | |
4512 // - min -> left is negative ? left : right. | |
4513 // - max -> left is negative ? right : left | |
4514 // Check the sign bit. | |
4515 __ mfc1(temp, OddFRegisterOf(left)); // Moves bits 32...63 of left to temp. | |
4516 if (is_min) { | |
4517 ASSERT(left == result); | |
4518 __ bltz(temp, &done); // Left is negative. | |
4519 } else { | |
4520 __ bgez(temp, &done); // Left is positive. | |
4521 } | |
4522 __ movd(result, right); | |
4523 __ Bind(&done); | |
4524 return; | |
4525 } | |
4526 | |
4527 Label done; | |
4528 ASSERT(result_cid() == kSmiCid); | |
4529 Register left = locs()->in(0).reg(); | |
4530 Register right = locs()->in(1).reg(); | |
4531 Register result = locs()->out(0).reg(); | |
4532 ASSERT(result == left); | |
4533 if (is_min) { | |
4534 __ BranchSignedLessEqual(left, right, &done); | |
4535 } else { | |
4536 __ BranchSignedGreaterEqual(left, right, &done); | |
4537 } | |
4538 __ mov(result, right); | |
4539 __ Bind(&done); | |
4540 } | |
4541 | |
4542 | |
4543 LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone, | |
4544 bool opt) const { | |
4545 const intptr_t kNumInputs = 1; | |
4546 const intptr_t kNumTemps = 0; | |
4547 LocationSummary* summary = new (zone) | |
4548 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4549 summary->set_in(0, Location::RequiresRegister()); | |
4550 // We make use of 3-operand instructions by not requiring result register | |
4551 // to be identical to first input register as on Intel. | |
4552 summary->set_out(0, Location::RequiresRegister()); | |
4553 return summary; | |
4554 } | |
4555 | |
4556 | |
4557 void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4558 Register value = locs()->in(0).reg(); | |
4559 Register result = locs()->out(0).reg(); | |
4560 switch (op_kind()) { | |
4561 case Token::kNEGATE: { | |
4562 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp); | |
4563 __ SubuDetectOverflow(result, ZR, value, CMPRES1); | |
4564 __ bltz(CMPRES1, deopt); | |
4565 break; | |
4566 } | |
4567 case Token::kBIT_NOT: | |
4568 __ nor(result, value, ZR); | |
4569 __ addiu(result, result, Immediate(-1)); // Remove inverted smi-tag. | |
4570 break; | |
4571 default: | |
4572 UNREACHABLE(); | |
4573 } | |
4574 } | |
4575 | |
4576 | |
4577 LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone, | |
4578 bool opt) const { | |
4579 const intptr_t kNumInputs = 1; | |
4580 const intptr_t kNumTemps = 0; | |
4581 LocationSummary* summary = new (zone) | |
4582 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4583 summary->set_in(0, Location::RequiresFpuRegister()); | |
4584 summary->set_out(0, Location::RequiresFpuRegister()); | |
4585 return summary; | |
4586 } | |
4587 | |
4588 | |
4589 void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4590 FpuRegister result = locs()->out(0).fpu_reg(); | |
4591 FpuRegister value = locs()->in(0).fpu_reg(); | |
4592 __ negd(result, value); | |
4593 } | |
4594 | |
4595 | |
4596 LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone, | |
4597 bool opt) const { | |
4598 const intptr_t kNumInputs = 1; | |
4599 const intptr_t kNumTemps = 0; | |
4600 LocationSummary* result = new (zone) | |
4601 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4602 result->set_in(0, Location::RequiresRegister()); | |
4603 result->set_out(0, Location::RequiresFpuRegister()); | |
4604 return result; | |
4605 } | |
4606 | |
4607 | |
4608 void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4609 Register value = locs()->in(0).reg(); | |
4610 FpuRegister result = locs()->out(0).fpu_reg(); | |
4611 __ mtc1(value, STMP1); | |
4612 __ cvtdw(result, STMP1); | |
4613 } | |
4614 | |
4615 | |
4616 LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone, | |
4617 bool opt) const { | |
4618 const intptr_t kNumInputs = 1; | |
4619 const intptr_t kNumTemps = 0; | |
4620 LocationSummary* result = new (zone) | |
4621 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4622 result->set_in(0, Location::RequiresRegister()); | |
4623 result->set_out(0, Location::RequiresFpuRegister()); | |
4624 return result; | |
4625 } | |
4626 | |
4627 | |
4628 void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4629 Register value = locs()->in(0).reg(); | |
4630 FpuRegister result = locs()->out(0).fpu_reg(); | |
4631 __ SmiUntag(TMP, value); | |
4632 __ mtc1(TMP, STMP1); | |
4633 __ cvtdw(result, STMP1); | |
4634 } | |
4635 | |
4636 | |
4637 LocationSummary* MintToDoubleInstr::MakeLocationSummary(Zone* zone, | |
4638 bool opt) const { | |
4639 UNIMPLEMENTED(); | |
4640 return NULL; | |
4641 } | |
4642 | |
4643 | |
4644 void MintToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4645 UNIMPLEMENTED(); | |
4646 } | |
4647 | |
4648 | |
4649 LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone, | |
4650 bool opt) const { | |
4651 const intptr_t kNumInputs = 1; | |
4652 const intptr_t kNumTemps = 0; | |
4653 LocationSummary* result = new (zone) | |
4654 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
4655 result->set_in(0, Location::RegisterLocation(T1)); | |
4656 result->set_out(0, Location::RegisterLocation(V0)); | |
4657 return result; | |
4658 } | |
4659 | |
4660 | |
4661 void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4662 Register result = locs()->out(0).reg(); | |
4663 Register value_obj = locs()->in(0).reg(); | |
4664 ASSERT(result == V0); | |
4665 ASSERT(result != value_obj); | |
4666 __ LoadDFromOffset(DTMP, value_obj, Double::value_offset() - kHeapObjectTag); | |
4667 __ truncwd(STMP1, DTMP); | |
4668 __ mfc1(result, STMP1); | |
4669 | |
4670 // Overflow is signaled with minint. | |
4671 Label do_call, done; | |
4672 // Check for overflow and that it fits into Smi. | |
4673 __ LoadImmediate(TMP, 0xC0000000); | |
4674 __ subu(CMPRES1, result, TMP); | |
4675 __ bltz(CMPRES1, &do_call); | |
4676 __ SmiTag(result); | |
4677 __ b(&done); | |
4678 __ Bind(&do_call); | |
4679 __ Push(value_obj); | |
4680 ASSERT(instance_call()->HasICData()); | |
4681 const ICData& ic_data = *instance_call()->ic_data(); | |
4682 ASSERT(ic_data.NumberOfChecksIs(1)); | |
4683 const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0)); | |
4684 const int kTypeArgsLen = 0; | |
4685 const int kNumberOfArguments = 1; | |
4686 const Array& kNoArgumentNames = Object::null_array(); | |
4687 ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kNoArgumentNames); | |
4688 compiler->GenerateStaticCall(deopt_id(), instance_call()->token_pos(), target, | |
4689 args_info, locs(), ICData::Handle()); | |
4690 __ Bind(&done); | |
4691 } | |
4692 | |
4693 | |
4694 LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone, | |
4695 bool opt) const { | |
4696 const intptr_t kNumInputs = 1; | |
4697 const intptr_t kNumTemps = 0; | |
4698 LocationSummary* result = new (zone) | |
4699 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4700 result->set_in(0, Location::RequiresFpuRegister()); | |
4701 result->set_out(0, Location::RequiresRegister()); | |
4702 return result; | |
4703 } | |
4704 | |
4705 | |
4706 void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4707 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi); | |
4708 Register result = locs()->out(0).reg(); | |
4709 DRegister value = locs()->in(0).fpu_reg(); | |
4710 __ truncwd(STMP1, value); | |
4711 __ mfc1(result, STMP1); | |
4712 | |
4713 // Check for overflow and that it fits into Smi. | |
4714 __ LoadImmediate(TMP, 0xC0000000); | |
4715 __ subu(CMPRES1, result, TMP); | |
4716 __ bltz(CMPRES1, deopt); | |
4717 __ SmiTag(result); | |
4718 } | |
4719 | |
4720 | |
4721 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone, | |
4722 bool opt) const { | |
4723 UNIMPLEMENTED(); | |
4724 return NULL; | |
4725 } | |
4726 | |
4727 | |
4728 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4729 UNIMPLEMENTED(); | |
4730 } | |
4731 | |
4732 | |
4733 LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone, | |
4734 bool opt) const { | |
4735 const intptr_t kNumInputs = 1; | |
4736 const intptr_t kNumTemps = 0; | |
4737 LocationSummary* result = new (zone) | |
4738 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4739 result->set_in(0, Location::RequiresFpuRegister()); | |
4740 result->set_out(0, Location::SameAsFirstInput()); | |
4741 return result; | |
4742 } | |
4743 | |
4744 | |
4745 void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4746 DRegister value = locs()->in(0).fpu_reg(); | |
4747 FRegister result = EvenFRegisterOf(locs()->out(0).fpu_reg()); | |
4748 __ cvtsd(result, value); | |
4749 } | |
4750 | |
4751 | |
4752 LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone, | |
4753 bool opt) const { | |
4754 const intptr_t kNumInputs = 1; | |
4755 const intptr_t kNumTemps = 0; | |
4756 LocationSummary* result = new (zone) | |
4757 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4758 result->set_in(0, Location::RequiresFpuRegister()); | |
4759 result->set_out(0, Location::SameAsFirstInput()); | |
4760 return result; | |
4761 } | |
4762 | |
4763 | |
4764 void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4765 FRegister value = EvenFRegisterOf(locs()->in(0).fpu_reg()); | |
4766 DRegister result = locs()->out(0).fpu_reg(); | |
4767 __ cvtds(result, value); | |
4768 } | |
4769 | |
4770 | |
4771 LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone, | |
4772 bool opt) const { | |
4773 // Calling convention on MIPS uses D6 and D7 to pass the first two | |
4774 // double arguments. | |
4775 ASSERT((InputCount() == 1) || (InputCount() == 2)); | |
4776 const intptr_t kNumTemps = 0; | |
4777 LocationSummary* result = new (zone) | |
4778 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); | |
4779 result->set_in(0, Location::FpuRegisterLocation(D6)); | |
4780 if (InputCount() == 2) { | |
4781 result->set_in(1, Location::FpuRegisterLocation(D7)); | |
4782 } | |
4783 result->set_out(0, Location::FpuRegisterLocation(D0)); | |
4784 return result; | |
4785 } | |
4786 | |
4787 | |
4788 // Pseudo code: | |
4789 // if (exponent == 0.0) return 1.0; | |
4790 // // Speed up simple cases. | |
4791 // if (exponent == 1.0) return base; | |
4792 // if (exponent == 2.0) return base * base; | |
4793 // if (exponent == 3.0) return base * base * base; | |
4794 // if (base == 1.0) return 1.0; | |
4795 // if (base.isNaN || exponent.isNaN) { | |
4796 // return double.NAN; | |
4797 // } | |
4798 // if (base != -Infinity && exponent == 0.5) { | |
4799 // if (base == 0.0) return 0.0; | |
4800 // return sqrt(value); | |
4801 // } | |
4802 // TODO(srdjan): Move into a stub? | |
4803 static void InvokeDoublePow(FlowGraphCompiler* compiler, | |
4804 InvokeMathCFunctionInstr* instr) { | |
4805 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow); | |
4806 const intptr_t kInputCount = 2; | |
4807 ASSERT(instr->InputCount() == kInputCount); | |
4808 LocationSummary* locs = instr->locs(); | |
4809 | |
4810 DRegister base = locs->in(0).fpu_reg(); | |
4811 DRegister exp = locs->in(1).fpu_reg(); | |
4812 DRegister result = locs->out(0).fpu_reg(); | |
4813 | |
4814 Label check_base, skip_call; | |
4815 __ LoadImmediate(DTMP, 0.0); | |
4816 __ LoadImmediate(result, 1.0); | |
4817 // exponent == 0.0 -> return 1.0; | |
4818 __ cund(exp, exp); | |
4819 __ bc1t(&check_base); // NaN -> check base. | |
4820 __ ceqd(exp, DTMP); | |
4821 __ bc1t(&skip_call); // exp is 0.0, result is 1.0. | |
4822 | |
4823 // exponent == 1.0 ? | |
4824 __ ceqd(exp, result); | |
4825 Label return_base; | |
4826 __ bc1t(&return_base); | |
4827 // exponent == 2.0 ? | |
4828 __ LoadImmediate(DTMP, 2.0); | |
4829 __ ceqd(exp, DTMP); | |
4830 Label return_base_times_2; | |
4831 __ bc1t(&return_base_times_2); | |
4832 // exponent == 3.0 ? | |
4833 __ LoadImmediate(DTMP, 3.0); | |
4834 __ ceqd(exp, DTMP); | |
4835 __ bc1f(&check_base); | |
4836 | |
4837 // base_times_3. | |
4838 __ muld(result, base, base); | |
4839 __ muld(result, result, base); | |
4840 __ b(&skip_call); | |
4841 | |
4842 __ Bind(&return_base); | |
4843 __ movd(result, base); | |
4844 __ b(&skip_call); | |
4845 | |
4846 __ Bind(&return_base_times_2); | |
4847 __ muld(result, base, base); | |
4848 __ b(&skip_call); | |
4849 | |
4850 __ Bind(&check_base); | |
4851 // Note: 'exp' could be NaN. | |
4852 // base == 1.0 -> return 1.0; | |
4853 __ cund(base, base); | |
4854 Label return_nan; | |
4855 __ bc1t(&return_nan); | |
4856 __ ceqd(base, result); | |
4857 __ bc1t(&skip_call); // base and result are 1.0. | |
4858 | |
4859 __ cund(exp, exp); | |
4860 Label try_sqrt; | |
4861 __ bc1f(&try_sqrt); // Neither 'exp' nor 'base' are NaN. | |
4862 | |
4863 __ Bind(&return_nan); | |
4864 __ LoadImmediate(result, NAN); | |
4865 __ b(&skip_call); | |
4866 | |
4867 __ Bind(&try_sqrt); | |
4868 // Before calling pow, check if we could use sqrt instead of pow. | |
4869 __ LoadImmediate(result, kNegInfinity); | |
4870 // base == -Infinity -> call pow; | |
4871 __ ceqd(base, result); | |
4872 Label do_pow; | |
4873 __ bc1t(&do_pow); | |
4874 | |
4875 // exponent == 0.5 ? | |
4876 __ LoadImmediate(result, 0.5); | |
4877 __ ceqd(exp, result); | |
4878 __ bc1f(&do_pow); | |
4879 | |
4880 // base == 0 -> return 0; | |
4881 __ LoadImmediate(DTMP, 0.0); | |
4882 __ ceqd(base, DTMP); | |
4883 Label return_zero; | |
4884 __ bc1t(&return_zero); | |
4885 | |
4886 __ sqrtd(result, base); | |
4887 __ b(&skip_call); | |
4888 | |
4889 __ Bind(&return_zero); | |
4890 __ movd(result, DTMP); | |
4891 __ b(&skip_call); | |
4892 | |
4893 __ Bind(&do_pow); | |
4894 | |
4895 // double values are passed and returned in vfp registers. | |
4896 __ CallRuntime(instr->TargetFunction(), kInputCount); | |
4897 __ Bind(&skip_call); | |
4898 } | |
4899 | |
4900 | |
4901 void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4902 // For pow-function return NaN if exponent is NaN. | |
4903 if (recognized_kind() == MethodRecognizer::kMathDoublePow) { | |
4904 InvokeDoublePow(compiler, this); | |
4905 return; | |
4906 } | |
4907 // double values are passed and returned in vfp registers. | |
4908 __ CallRuntime(TargetFunction(), InputCount()); | |
4909 } | |
4910 | |
4911 | |
4912 LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone, | |
4913 bool opt) const { | |
4914 // Only use this instruction in optimized code. | |
4915 ASSERT(opt); | |
4916 const intptr_t kNumInputs = 1; | |
4917 LocationSummary* summary = | |
4918 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); | |
4919 if (representation() == kUnboxedDouble) { | |
4920 if (index() == 0) { | |
4921 summary->set_in( | |
4922 0, Location::Pair(Location::RequiresFpuRegister(), Location::Any())); | |
4923 } else { | |
4924 ASSERT(index() == 1); | |
4925 summary->set_in( | |
4926 0, Location::Pair(Location::Any(), Location::RequiresFpuRegister())); | |
4927 } | |
4928 summary->set_out(0, Location::RequiresFpuRegister()); | |
4929 } else { | |
4930 ASSERT(representation() == kTagged); | |
4931 if (index() == 0) { | |
4932 summary->set_in( | |
4933 0, Location::Pair(Location::RequiresRegister(), Location::Any())); | |
4934 } else { | |
4935 ASSERT(index() == 1); | |
4936 summary->set_in( | |
4937 0, Location::Pair(Location::Any(), Location::RequiresRegister())); | |
4938 } | |
4939 summary->set_out(0, Location::RequiresRegister()); | |
4940 } | |
4941 return summary; | |
4942 } | |
4943 | |
4944 | |
4945 void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4946 ASSERT(locs()->in(0).IsPairLocation()); | |
4947 PairLocation* pair = locs()->in(0).AsPairLocation(); | |
4948 Location in_loc = pair->At(index()); | |
4949 if (representation() == kUnboxedDouble) { | |
4950 DRegister out = locs()->out(0).fpu_reg(); | |
4951 DRegister in = in_loc.fpu_reg(); | |
4952 __ movd(out, in); | |
4953 } else { | |
4954 ASSERT(representation() == kTagged); | |
4955 Register out = locs()->out(0).reg(); | |
4956 Register in = in_loc.reg(); | |
4957 __ mov(out, in); | |
4958 } | |
4959 } | |
4960 | |
4961 | |
4962 LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone, | |
4963 bool opt) const { | |
4964 const intptr_t kNumInputs = 2; | |
4965 const intptr_t kNumTemps = 1; | |
4966 LocationSummary* summary = new (zone) | |
4967 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
4968 summary->set_in(0, Location::RequiresRegister()); | |
4969 summary->set_in(1, Location::RequiresRegister()); | |
4970 summary->set_temp(0, Location::RequiresRegister()); | |
4971 // Output is a pair of registers. | |
4972 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
4973 Location::RequiresRegister())); | |
4974 return summary; | |
4975 } | |
4976 | |
4977 | |
4978 void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
4979 ASSERT(CanDeoptimize()); | |
4980 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); | |
4981 Register left = locs()->in(0).reg(); | |
4982 Register right = locs()->in(1).reg(); | |
4983 Register temp = locs()->temp(0).reg(); | |
4984 ASSERT(locs()->out(0).IsPairLocation()); | |
4985 PairLocation* pair = locs()->out(0).AsPairLocation(); | |
4986 Register result_div = pair->At(0).reg(); | |
4987 Register result_mod = pair->At(1).reg(); | |
4988 if (RangeUtils::CanBeZero(divisor_range())) { | |
4989 // Handle divide by zero in runtime. | |
4990 __ beq(right, ZR, deopt); | |
4991 } | |
4992 __ SmiUntag(temp, left); | |
4993 __ SmiUntag(TMP, right); | |
4994 __ div(temp, TMP); | |
4995 __ mflo(result_div); | |
4996 __ mfhi(result_mod); | |
4997 // Check the corner case of dividing the 'MIN_SMI' with -1, in which | |
4998 // case we cannot tag the result. | |
4999 __ BranchEqual(result_div, Immediate(0x40000000), deopt); | |
5000 // res = left % right; | |
5001 // if (res < 0) { | |
5002 // if (right < 0) { | |
5003 // res = res - right; | |
5004 // } else { | |
5005 // res = res + right; | |
5006 // } | |
5007 // } | |
5008 Label done; | |
5009 __ bgez(result_mod, &done); | |
5010 if (RangeUtils::Overlaps(divisor_range(), -1, 1)) { | |
5011 Label subtract; | |
5012 __ bltz(right, &subtract); | |
5013 __ addu(result_mod, result_mod, TMP); | |
5014 __ b(&done); | |
5015 __ Bind(&subtract); | |
5016 __ subu(result_mod, result_mod, TMP); | |
5017 } else if (divisor_range()->IsPositive()) { | |
5018 // Right is positive. | |
5019 __ addu(result_mod, result_mod, TMP); | |
5020 } else { | |
5021 // Right is negative. | |
5022 __ subu(result_mod, result_mod, TMP); | |
5023 } | |
5024 __ Bind(&done); | |
5025 | |
5026 __ SmiTag(result_div); | |
5027 __ SmiTag(result_mod); | |
5028 } | |
5029 | |
5030 | |
5031 LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary( | |
5032 Zone* zone, | |
5033 bool opt) const { | |
5034 return MakeCallSummary(zone); | |
5035 } | |
5036 | |
5037 | |
5038 LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
5039 comparison()->InitializeLocationSummary(zone, opt); | |
5040 // Branches don't produce a result. | |
5041 comparison()->locs()->set_out(0, Location::NoLocation()); | |
5042 return comparison()->locs(); | |
5043 } | |
5044 | |
5045 | |
5046 void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5047 __ Comment("BranchInstr"); | |
5048 comparison()->EmitBranchCode(compiler, this); | |
5049 } | |
5050 | |
5051 | |
5052 LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone, | |
5053 bool opt) const { | |
5054 const intptr_t kNumInputs = 1; | |
5055 const bool need_mask_temp = IsBitTest(); | |
5056 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0; | |
5057 LocationSummary* summary = new (zone) | |
5058 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5059 summary->set_in(0, Location::RequiresRegister()); | |
5060 if (!IsNullCheck()) { | |
5061 summary->set_temp(0, Location::RequiresRegister()); | |
5062 if (need_mask_temp) { | |
5063 summary->set_temp(1, Location::RequiresRegister()); | |
5064 } | |
5065 } | |
5066 return summary; | |
5067 } | |
5068 | |
5069 | |
5070 void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler, Label* deopt) { | |
5071 if (IsDeoptIfNull()) { | |
5072 __ BranchEqual(locs()->in(0).reg(), Object::null_object(), deopt); | |
5073 } else { | |
5074 ASSERT(IsDeoptIfNotNull()); | |
5075 __ BranchNotEqual(locs()->in(0).reg(), Object::null_object(), deopt); | |
5076 } | |
5077 } | |
5078 | |
5079 | |
5080 void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler, | |
5081 intptr_t min, | |
5082 intptr_t max, | |
5083 intptr_t mask, | |
5084 Label* deopt) { | |
5085 Register biased_cid = locs()->temp(0).reg(); | |
5086 __ LoadImmediate(TMP, min); | |
5087 __ subu(biased_cid, biased_cid, TMP); | |
5088 __ LoadImmediate(TMP, max - min); | |
5089 __ BranchUnsignedGreater(biased_cid, TMP, deopt); | |
5090 | |
5091 Register bit_reg = locs()->temp(1).reg(); | |
5092 __ LoadImmediate(bit_reg, 1); | |
5093 __ sllv(bit_reg, bit_reg, biased_cid); | |
5094 __ AndImmediate(bit_reg, bit_reg, mask); | |
5095 __ beq(bit_reg, ZR, deopt); | |
5096 } | |
5097 | |
5098 | |
5099 int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler, | |
5100 int bias, | |
5101 intptr_t cid_start, | |
5102 intptr_t cid_end, | |
5103 bool is_last, | |
5104 Label* is_ok, | |
5105 Label* deopt, | |
5106 bool use_near_jump) { | |
5107 Register biased_cid = locs()->temp(0).reg(); | |
5108 if (cid_start == cid_end) { | |
5109 __ LoadImmediate(TMP, cid_start - bias); | |
5110 if (is_last) { | |
5111 __ bne(biased_cid, TMP, deopt); | |
5112 } else { | |
5113 __ beq(biased_cid, TMP, is_ok); | |
5114 } | |
5115 } else { | |
5116 // For class ID ranges use a subtract followed by an unsigned | |
5117 // comparison to check both ends of the ranges with one comparison. | |
5118 __ AddImmediate(biased_cid, biased_cid, bias - cid_start); | |
5119 bias = cid_start; | |
5120 // TODO(erikcorry): We should use sltiu instead of the temporary TMP if | |
5121 // the range is small enough. | |
5122 __ LoadImmediate(TMP, cid_end - cid_start); | |
5123 // Reverse comparison so we get 1 if biased_cid > tmp ie cid is out of | |
5124 // range. | |
5125 __ sltu(TMP, TMP, biased_cid); | |
5126 if (is_last) { | |
5127 __ bne(TMP, ZR, deopt); | |
5128 } else { | |
5129 __ beq(TMP, ZR, is_ok); | |
5130 } | |
5131 } | |
5132 return bias; | |
5133 } | |
5134 | |
5135 | |
5136 LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone, | |
5137 bool opt) const { | |
5138 const intptr_t kNumInputs = 1; | |
5139 const intptr_t kNumTemps = 0; | |
5140 LocationSummary* summary = new (zone) | |
5141 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5142 summary->set_in(0, Location::RequiresRegister()); | |
5143 return summary; | |
5144 } | |
5145 | |
5146 | |
5147 void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5148 __ Comment("CheckSmiInstr"); | |
5149 Register value = locs()->in(0).reg(); | |
5150 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi, | |
5151 licm_hoisted_ ? ICData::kHoisted : 0); | |
5152 __ BranchIfNotSmi(value, deopt); | |
5153 } | |
5154 | |
5155 | |
5156 LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone, | |
5157 bool opt) const { | |
5158 const intptr_t kNumInputs = 1; | |
5159 const intptr_t kNumTemps = 0; | |
5160 LocationSummary* summary = new (zone) | |
5161 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5162 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister() | |
5163 : Location::WritableRegister()); | |
5164 | |
5165 return summary; | |
5166 } | |
5167 | |
5168 | |
5169 void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5170 Register value = locs()->in(0).reg(); | |
5171 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass); | |
5172 if (cids_.IsSingleCid()) { | |
5173 __ BranchNotEqual(value, Immediate(Smi::RawValue(cids_.cid_start)), deopt); | |
5174 } else { | |
5175 __ AddImmediate(value, value, -Smi::RawValue(cids_.cid_start)); | |
5176 // TODO(erikcorry): We should use sltiu instead of the temporary TMP if | |
5177 // the range is small enough. | |
5178 __ LoadImmediate(TMP, cids_.Extent()); | |
5179 // Reverse comparison so we get 1 if biased_cid > tmp ie cid is out of | |
5180 // range. | |
5181 __ sltu(TMP, TMP, value); | |
5182 __ bne(TMP, ZR, deopt); | |
5183 } | |
5184 } | |
5185 | |
5186 | |
5187 LocationSummary* GenericCheckBoundInstr::MakeLocationSummary(Zone* zone, | |
5188 bool opt) const { | |
5189 const intptr_t kNumInputs = 2; | |
5190 const intptr_t kNumTemps = 0; | |
5191 LocationSummary* locs = new (zone) LocationSummary( | |
5192 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
5193 locs->set_in(kLengthPos, Location::RequiresRegister()); | |
5194 locs->set_in(kIndexPos, Location::RequiresRegister()); | |
5195 return locs; | |
5196 } | |
5197 | |
5198 | |
5199 class RangeErrorSlowPath : public SlowPathCode { | |
5200 public: | |
5201 RangeErrorSlowPath(GenericCheckBoundInstr* instruction, intptr_t try_index) | |
5202 : instruction_(instruction), try_index_(try_index) {} | |
5203 | |
5204 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
5205 if (Assembler::EmittingComments()) { | |
5206 __ Comment("slow path check bound operation"); | |
5207 } | |
5208 __ Bind(entry_label()); | |
5209 LocationSummary* locs = instruction_->locs(); | |
5210 compiler->SaveLiveRegisters(locs); | |
5211 __ Push(locs->in(0).reg()); | |
5212 __ Push(locs->in(1).reg()); | |
5213 __ CallRuntime(kRangeErrorRuntimeEntry, 2); | |
5214 compiler->AddDescriptor( | |
5215 RawPcDescriptors::kOther, compiler->assembler()->CodeSize(), | |
5216 instruction_->deopt_id(), instruction_->token_pos(), try_index_); | |
5217 Environment* env = compiler->SlowPathEnvironmentFor(instruction_); | |
5218 compiler->EmitCatchEntryState(env, try_index_); | |
5219 __ break_(0); | |
5220 } | |
5221 | |
5222 private: | |
5223 GenericCheckBoundInstr* instruction_; | |
5224 intptr_t try_index_; | |
5225 }; | |
5226 | |
5227 | |
5228 void GenericCheckBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5229 RangeErrorSlowPath* slow_path = | |
5230 new RangeErrorSlowPath(this, compiler->CurrentTryIndex()); | |
5231 compiler->AddSlowPathCode(slow_path); | |
5232 | |
5233 Location length_loc = locs()->in(kLengthPos); | |
5234 Location index_loc = locs()->in(kIndexPos); | |
5235 Register length = length_loc.reg(); | |
5236 Register index = index_loc.reg(); | |
5237 const intptr_t index_cid = this->index()->Type()->ToCid(); | |
5238 if (index_cid != kSmiCid) { | |
5239 __ BranchIfNotSmi(index, slow_path->entry_label()); | |
5240 } | |
5241 __ BranchUnsignedGreaterEqual(index, length, slow_path->entry_label()); | |
5242 } | |
5243 | |
5244 | |
5245 LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone, | |
5246 bool opt) const { | |
5247 const intptr_t kNumInputs = 2; | |
5248 const intptr_t kNumTemps = 0; | |
5249 LocationSummary* locs = new (zone) | |
5250 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5251 locs->set_in(kLengthPos, Location::RegisterOrSmiConstant(length())); | |
5252 locs->set_in(kIndexPos, Location::RegisterOrSmiConstant(index())); | |
5253 return locs; | |
5254 } | |
5255 | |
5256 | |
5257 void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5258 uint32_t flags = generalized_ ? ICData::kGeneralized : 0; | |
5259 flags |= licm_hoisted_ ? ICData::kHoisted : 0; | |
5260 Label* deopt = | |
5261 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags); | |
5262 | |
5263 Location length_loc = locs()->in(kLengthPos); | |
5264 Location index_loc = locs()->in(kIndexPos); | |
5265 | |
5266 if (length_loc.IsConstant() && index_loc.IsConstant()) { | |
5267 ASSERT((Smi::Cast(length_loc.constant()).Value() <= | |
5268 Smi::Cast(index_loc.constant()).Value()) || | |
5269 (Smi::Cast(index_loc.constant()).Value() < 0)); | |
5270 // Unconditionally deoptimize for constant bounds checks because they | |
5271 // only occur only when index is out-of-bounds. | |
5272 __ b(deopt); | |
5273 return; | |
5274 } | |
5275 | |
5276 const intptr_t index_cid = index()->Type()->ToCid(); | |
5277 if (index_loc.IsConstant()) { | |
5278 Register length = length_loc.reg(); | |
5279 const Smi& index = Smi::Cast(index_loc.constant()); | |
5280 __ BranchUnsignedLessEqual( | |
5281 length, Immediate(reinterpret_cast<int32_t>(index.raw())), deopt); | |
5282 } else if (length_loc.IsConstant()) { | |
5283 const Smi& length = Smi::Cast(length_loc.constant()); | |
5284 Register index = index_loc.reg(); | |
5285 if (index_cid != kSmiCid) { | |
5286 __ BranchIfNotSmi(index, deopt); | |
5287 } | |
5288 if (length.Value() == Smi::kMaxValue) { | |
5289 __ BranchSignedLess(index, Immediate(0), deopt); | |
5290 } else { | |
5291 __ BranchUnsignedGreaterEqual( | |
5292 index, Immediate(reinterpret_cast<int32_t>(length.raw())), deopt); | |
5293 } | |
5294 } else { | |
5295 Register length = length_loc.reg(); | |
5296 Register index = index_loc.reg(); | |
5297 if (index_cid != kSmiCid) { | |
5298 __ BranchIfNotSmi(index, deopt); | |
5299 } | |
5300 __ BranchUnsignedGreaterEqual(index, length, deopt); | |
5301 } | |
5302 } | |
5303 | |
5304 LocationSummary* BinaryMintOpInstr::MakeLocationSummary(Zone* zone, | |
5305 bool opt) const { | |
5306 const intptr_t kNumInputs = 2; | |
5307 const intptr_t kNumTemps = 0; | |
5308 LocationSummary* summary = new (zone) | |
5309 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5310 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
5311 Location::RequiresRegister())); | |
5312 summary->set_in(1, Location::Pair(Location::RequiresRegister(), | |
5313 Location::RequiresRegister())); | |
5314 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
5315 Location::RequiresRegister())); | |
5316 return summary; | |
5317 } | |
5318 | |
5319 | |
5320 void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5321 PairLocation* left_pair = locs()->in(0).AsPairLocation(); | |
5322 Register left_lo = left_pair->At(0).reg(); | |
5323 Register left_hi = left_pair->At(1).reg(); | |
5324 PairLocation* right_pair = locs()->in(1).AsPairLocation(); | |
5325 Register right_lo = right_pair->At(0).reg(); | |
5326 Register right_hi = right_pair->At(1).reg(); | |
5327 PairLocation* out_pair = locs()->out(0).AsPairLocation(); | |
5328 Register out_lo = out_pair->At(0).reg(); | |
5329 Register out_hi = out_pair->At(1).reg(); | |
5330 | |
5331 Label* deopt = NULL; | |
5332 if (CanDeoptimize()) { | |
5333 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); | |
5334 } | |
5335 switch (op_kind()) { | |
5336 case Token::kBIT_AND: { | |
5337 __ and_(out_lo, left_lo, right_lo); | |
5338 __ and_(out_hi, left_hi, right_hi); | |
5339 break; | |
5340 } | |
5341 case Token::kBIT_OR: { | |
5342 __ or_(out_lo, left_lo, right_lo); | |
5343 __ or_(out_hi, left_hi, right_hi); | |
5344 break; | |
5345 } | |
5346 case Token::kBIT_XOR: { | |
5347 __ xor_(out_lo, left_lo, right_lo); | |
5348 __ xor_(out_hi, left_hi, right_hi); | |
5349 break; | |
5350 } | |
5351 case Token::kADD: | |
5352 case Token::kSUB: { | |
5353 if (op_kind() == Token::kADD) { | |
5354 __ addu(out_lo, left_lo, right_lo); | |
5355 __ sltu(TMP, out_lo, left_lo); // TMP = carry of left_lo + right_lo. | |
5356 __ addu(out_hi, left_hi, right_hi); | |
5357 __ addu(out_hi, out_hi, TMP); | |
5358 if (can_overflow()) { | |
5359 __ xor_(CMPRES1, out_hi, left_hi); | |
5360 __ xor_(TMP, out_hi, right_hi); | |
5361 __ and_(CMPRES1, TMP, CMPRES1); | |
5362 __ bltz(CMPRES1, deopt); | |
5363 } | |
5364 } else { | |
5365 __ subu(out_lo, left_lo, right_lo); | |
5366 __ sltu(TMP, left_lo, out_lo); // TMP = borrow of left_lo - right_lo. | |
5367 __ subu(out_hi, left_hi, right_hi); | |
5368 __ subu(out_hi, out_hi, TMP); | |
5369 if (can_overflow()) { | |
5370 __ xor_(CMPRES1, out_hi, left_hi); | |
5371 __ xor_(TMP, left_hi, right_hi); | |
5372 __ and_(CMPRES1, TMP, CMPRES1); | |
5373 __ bltz(CMPRES1, deopt); | |
5374 } | |
5375 } | |
5376 break; | |
5377 } | |
5378 case Token::kMUL: { | |
5379 // The product of two signed 32-bit integers fits in a signed 64-bit | |
5380 // result without causing overflow. | |
5381 // We deopt on larger inputs. | |
5382 // TODO(regis): Range analysis may eliminate the deopt check. | |
5383 __ sra(CMPRES1, left_lo, 31); | |
5384 __ bne(CMPRES1, left_hi, deopt); | |
5385 __ delay_slot()->sra(CMPRES2, right_lo, 31); | |
5386 __ bne(CMPRES2, right_hi, deopt); | |
5387 __ delay_slot()->mult(left_lo, right_lo); | |
5388 __ mflo(out_lo); | |
5389 __ mfhi(out_hi); | |
5390 break; | |
5391 } | |
5392 default: | |
5393 UNREACHABLE(); | |
5394 } | |
5395 } | |
5396 | |
5397 | |
5398 LocationSummary* ShiftMintOpInstr::MakeLocationSummary(Zone* zone, | |
5399 bool opt) const { | |
5400 const intptr_t kNumInputs = 2; | |
5401 const intptr_t kNumTemps = 0; | |
5402 LocationSummary* summary = new (zone) | |
5403 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5404 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
5405 Location::RequiresRegister())); | |
5406 summary->set_in(1, Location::WritableRegisterOrSmiConstant(right())); | |
5407 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
5408 Location::RequiresRegister())); | |
5409 return summary; | |
5410 } | |
5411 | |
5412 | |
5413 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5414 PairLocation* left_pair = locs()->in(0).AsPairLocation(); | |
5415 Register left_lo = left_pair->At(0).reg(); | |
5416 Register left_hi = left_pair->At(1).reg(); | |
5417 PairLocation* out_pair = locs()->out(0).AsPairLocation(); | |
5418 Register out_lo = out_pair->At(0).reg(); | |
5419 Register out_hi = out_pair->At(1).reg(); | |
5420 | |
5421 Label* deopt = NULL; | |
5422 if (CanDeoptimize()) { | |
5423 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); | |
5424 } | |
5425 if (locs()->in(1).IsConstant()) { | |
5426 // Code for a constant shift amount. | |
5427 ASSERT(locs()->in(1).constant().IsSmi()); | |
5428 const int32_t shift = | |
5429 reinterpret_cast<int32_t>(locs()->in(1).constant().raw()) >> 1; | |
5430 switch (op_kind()) { | |
5431 case Token::kSHR: { | |
5432 if (shift < 32) { | |
5433 __ sll(out_lo, left_hi, 32 - shift); | |
5434 __ srl(TMP, left_lo, shift); | |
5435 __ or_(out_lo, out_lo, TMP); | |
5436 __ sra(out_hi, left_hi, shift); | |
5437 } else { | |
5438 if (shift == 32) { | |
5439 __ mov(out_lo, left_hi); | |
5440 } else if (shift < 64) { | |
5441 __ sra(out_lo, left_hi, shift - 32); | |
5442 } else { | |
5443 __ sra(out_lo, left_hi, 31); | |
5444 } | |
5445 __ sra(out_hi, left_hi, 31); | |
5446 } | |
5447 break; | |
5448 } | |
5449 case Token::kSHL: { | |
5450 ASSERT(shift < 64); | |
5451 if (shift < 32) { | |
5452 __ srl(out_hi, left_lo, 32 - shift); | |
5453 __ sll(TMP, left_hi, shift); | |
5454 __ or_(out_hi, out_hi, TMP); | |
5455 __ sll(out_lo, left_lo, shift); | |
5456 } else { | |
5457 __ sll(out_hi, left_lo, shift - 32); | |
5458 __ mov(out_lo, ZR); | |
5459 } | |
5460 // Check for overflow. | |
5461 if (can_overflow()) { | |
5462 // Compare high word from input with shifted high word from output. | |
5463 // Overflow if they aren't equal. | |
5464 // If shift > 32, also compare low word from input with high word from | |
5465 // output shifted back shift - 32. | |
5466 if (shift > 32) { | |
5467 __ sra(TMP, out_hi, shift - 32); | |
5468 __ bne(left_lo, TMP, deopt); | |
5469 __ delay_slot()->sra(TMP, out_hi, 31); | |
5470 } else if (shift == 32) { | |
5471 __ sra(TMP, out_hi, 31); | |
5472 } else { | |
5473 __ sra(TMP, out_hi, shift); | |
5474 } | |
5475 __ bne(left_hi, TMP, deopt); | |
5476 } | |
5477 break; | |
5478 } | |
5479 default: | |
5480 UNREACHABLE(); | |
5481 } | |
5482 } else { | |
5483 // Code for a variable shift amount. | |
5484 Register shift = locs()->in(1).reg(); | |
5485 | |
5486 // Code below assumes shift amount is not 0 (cannot shift by 32 - 0). | |
5487 Label non_zero_shift, done; | |
5488 __ bne(shift, ZR, &non_zero_shift); | |
5489 __ delay_slot()->mov(out_lo, left_lo); | |
5490 __ b(&done); | |
5491 __ delay_slot()->mov(out_hi, left_hi); | |
5492 __ Bind(&non_zero_shift); | |
5493 | |
5494 // Deopt if shift is larger than 63 or less than 0. | |
5495 if (has_shift_count_check()) { | |
5496 __ sltiu(CMPRES1, shift, Immediate(2 * (kMintShiftCountLimit + 1))); | |
5497 __ beq(CMPRES1, ZR, deopt); | |
5498 // Untag shift count. | |
5499 __ delay_slot()->SmiUntag(shift); | |
5500 } else { | |
5501 // Untag shift count. | |
5502 __ SmiUntag(shift); | |
5503 } | |
5504 | |
5505 switch (op_kind()) { | |
5506 case Token::kSHR: { | |
5507 Label large_shift; | |
5508 __ sltiu(CMPRES1, shift, Immediate(32)); | |
5509 __ beq(CMPRES1, ZR, &large_shift); | |
5510 | |
5511 // 0 < shift < 32. | |
5512 __ delay_slot()->ori(TMP, ZR, Immediate(32)); | |
5513 __ subu(TMP, TMP, shift); // TMP = 32 - shift; 0 < TMP <= 31. | |
5514 __ sllv(out_lo, left_hi, TMP); | |
5515 __ srlv(TMP, left_lo, shift); | |
5516 __ or_(out_lo, out_lo, TMP); | |
5517 __ b(&done); | |
5518 __ delay_slot()->srav(out_hi, left_hi, shift); | |
5519 | |
5520 // shift >= 32. | |
5521 __ Bind(&large_shift); | |
5522 __ sra(out_hi, left_hi, 31); | |
5523 __ srav(out_lo, left_hi, shift); // Only 5 low bits of shift used. | |
5524 | |
5525 break; | |
5526 } | |
5527 case Token::kSHL: { | |
5528 Label large_shift; | |
5529 __ sltiu(CMPRES1, shift, Immediate(32)); | |
5530 __ beq(CMPRES1, ZR, &large_shift); | |
5531 | |
5532 // 0 < shift < 32. | |
5533 __ delay_slot()->ori(TMP, ZR, Immediate(32)); | |
5534 __ subu(TMP, TMP, shift); // TMP = 32 - shift; 0 < TMP <= 31. | |
5535 __ srlv(out_hi, left_lo, TMP); | |
5536 __ sllv(TMP, left_hi, shift); | |
5537 __ or_(out_hi, out_hi, TMP); | |
5538 // Check for overflow. | |
5539 if (can_overflow()) { | |
5540 // Compare high word from input with shifted high word from output. | |
5541 __ srav(TMP, out_hi, shift); | |
5542 __ beq(TMP, left_hi, &done); | |
5543 __ delay_slot()->sllv(out_lo, left_lo, shift); | |
5544 __ b(deopt); | |
5545 } else { | |
5546 __ b(&done); | |
5547 __ delay_slot()->sllv(out_lo, left_lo, shift); | |
5548 } | |
5549 | |
5550 // shift >= 32. | |
5551 __ Bind(&large_shift); | |
5552 __ sllv(out_hi, left_lo, shift); // Only 5 low bits of shift used. | |
5553 // Check for overflow. | |
5554 if (can_overflow()) { | |
5555 // Compare low word from input with shifted high word from output and | |
5556 // high word from input to sign of output. | |
5557 // Overflow if they aren't equal. | |
5558 __ srav(TMP, out_hi, shift); | |
5559 __ bne(TMP, left_lo, deopt); | |
5560 __ delay_slot()->sra(TMP, out_hi, 31); | |
5561 __ bne(TMP, left_hi, deopt); | |
5562 __ delay_slot()->mov(out_lo, ZR); | |
5563 } else { | |
5564 __ mov(out_lo, ZR); | |
5565 } | |
5566 break; | |
5567 } | |
5568 default: | |
5569 UNREACHABLE(); | |
5570 } | |
5571 __ Bind(&done); | |
5572 } | |
5573 } | |
5574 | |
5575 | |
5576 LocationSummary* UnaryMintOpInstr::MakeLocationSummary(Zone* zone, | |
5577 bool opt) const { | |
5578 const intptr_t kNumInputs = 1; | |
5579 const intptr_t kNumTemps = 0; | |
5580 LocationSummary* summary = new (zone) | |
5581 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5582 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
5583 Location::RequiresRegister())); | |
5584 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
5585 Location::RequiresRegister())); | |
5586 return summary; | |
5587 } | |
5588 | |
5589 | |
5590 void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5591 ASSERT(op_kind() == Token::kBIT_NOT); | |
5592 PairLocation* left_pair = locs()->in(0).AsPairLocation(); | |
5593 Register left_lo = left_pair->At(0).reg(); | |
5594 Register left_hi = left_pair->At(1).reg(); | |
5595 | |
5596 PairLocation* out_pair = locs()->out(0).AsPairLocation(); | |
5597 Register out_lo = out_pair->At(0).reg(); | |
5598 Register out_hi = out_pair->At(1).reg(); | |
5599 | |
5600 __ nor(out_lo, ZR, left_lo); | |
5601 __ nor(out_hi, ZR, left_hi); | |
5602 } | |
5603 | |
5604 | |
5605 CompileType BinaryUint32OpInstr::ComputeType() const { | |
5606 return CompileType::Int(); | |
5607 } | |
5608 | |
5609 | |
5610 CompileType ShiftUint32OpInstr::ComputeType() const { | |
5611 return CompileType::Int(); | |
5612 } | |
5613 | |
5614 | |
5615 CompileType UnaryUint32OpInstr::ComputeType() const { | |
5616 return CompileType::Int(); | |
5617 } | |
5618 | |
5619 | |
5620 LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone, | |
5621 bool opt) const { | |
5622 const intptr_t kNumInputs = 2; | |
5623 const intptr_t kNumTemps = 0; | |
5624 LocationSummary* summary = new (zone) | |
5625 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5626 summary->set_in(0, Location::RequiresRegister()); | |
5627 summary->set_in(1, Location::RequiresRegister()); | |
5628 summary->set_out(0, Location::RequiresRegister()); | |
5629 return summary; | |
5630 } | |
5631 | |
5632 | |
5633 void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5634 Register left = locs()->in(0).reg(); | |
5635 Register right = locs()->in(1).reg(); | |
5636 Register out = locs()->out(0).reg(); | |
5637 ASSERT(out != left); | |
5638 switch (op_kind()) { | |
5639 case Token::kBIT_AND: | |
5640 __ and_(out, left, right); | |
5641 break; | |
5642 case Token::kBIT_OR: | |
5643 __ or_(out, left, right); | |
5644 break; | |
5645 case Token::kBIT_XOR: | |
5646 __ xor_(out, left, right); | |
5647 break; | |
5648 case Token::kADD: | |
5649 __ addu(out, left, right); | |
5650 break; | |
5651 case Token::kSUB: | |
5652 __ subu(out, left, right); | |
5653 break; | |
5654 case Token::kMUL: | |
5655 __ multu(left, right); | |
5656 __ mflo(out); | |
5657 break; | |
5658 default: | |
5659 UNREACHABLE(); | |
5660 } | |
5661 } | |
5662 | |
5663 | |
5664 LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone, | |
5665 bool opt) const { | |
5666 const intptr_t kNumInputs = 2; | |
5667 const intptr_t kNumTemps = 1; | |
5668 LocationSummary* summary = new (zone) | |
5669 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5670 summary->set_in(0, Location::RequiresRegister()); | |
5671 summary->set_in(1, Location::RegisterOrSmiConstant(right())); | |
5672 summary->set_temp(0, Location::RequiresRegister()); | |
5673 summary->set_out(0, Location::RequiresRegister()); | |
5674 return summary; | |
5675 } | |
5676 | |
5677 | |
5678 void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5679 const intptr_t kShifterLimit = 31; | |
5680 | |
5681 Register left = locs()->in(0).reg(); | |
5682 Register out = locs()->out(0).reg(); | |
5683 Register temp = locs()->temp(0).reg(); | |
5684 | |
5685 ASSERT(left != out); | |
5686 | |
5687 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); | |
5688 | |
5689 if (locs()->in(1).IsConstant()) { | |
5690 // Shifter is constant. | |
5691 | |
5692 const Object& constant = locs()->in(1).constant(); | |
5693 ASSERT(constant.IsSmi()); | |
5694 const intptr_t shift_value = Smi::Cast(constant).Value(); | |
5695 | |
5696 // Do the shift: (shift_value > 0) && (shift_value <= kShifterLimit). | |
5697 switch (op_kind()) { | |
5698 case Token::kSHR: | |
5699 __ srl(out, left, shift_value); | |
5700 break; | |
5701 case Token::kSHL: | |
5702 __ sll(out, left, shift_value); | |
5703 break; | |
5704 default: | |
5705 UNREACHABLE(); | |
5706 } | |
5707 return; | |
5708 } | |
5709 | |
5710 // Non constant shift value. | |
5711 Register shifter = locs()->in(1).reg(); | |
5712 | |
5713 __ SmiUntag(temp, shifter); | |
5714 // If shift value is < 0, deoptimize. | |
5715 __ bltz(temp, deopt); | |
5716 __ delay_slot()->mov(out, left); | |
5717 __ sltiu(CMPRES1, temp, Immediate(kShifterLimit + 1)); | |
5718 __ movz(out, ZR, CMPRES1); // out = shift > kShifterLimit ? 0 : left. | |
5719 // Do the shift % 32. | |
5720 switch (op_kind()) { | |
5721 case Token::kSHR: | |
5722 __ srlv(out, out, temp); | |
5723 break; | |
5724 case Token::kSHL: | |
5725 __ sllv(out, out, temp); | |
5726 break; | |
5727 default: | |
5728 UNREACHABLE(); | |
5729 } | |
5730 } | |
5731 | |
5732 | |
5733 LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone, | |
5734 bool opt) const { | |
5735 const intptr_t kNumInputs = 1; | |
5736 const intptr_t kNumTemps = 0; | |
5737 LocationSummary* summary = new (zone) | |
5738 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5739 summary->set_in(0, Location::RequiresRegister()); | |
5740 summary->set_out(0, Location::RequiresRegister()); | |
5741 return summary; | |
5742 } | |
5743 | |
5744 | |
5745 void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5746 Register left = locs()->in(0).reg(); | |
5747 Register out = locs()->out(0).reg(); | |
5748 ASSERT(left != out); | |
5749 | |
5750 ASSERT(op_kind() == Token::kBIT_NOT); | |
5751 | |
5752 __ nor(out, ZR, left); | |
5753 } | |
5754 | |
5755 | |
5756 DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr) | |
5757 | |
5758 | |
5759 LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone, | |
5760 bool opt) const { | |
5761 const intptr_t kNumInputs = 1; | |
5762 const intptr_t kNumTemps = 0; | |
5763 LocationSummary* summary = new (zone) | |
5764 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5765 if (from() == kUnboxedMint) { | |
5766 ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32)); | |
5767 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
5768 Location::RequiresRegister())); | |
5769 summary->set_out(0, Location::RequiresRegister()); | |
5770 } else if (to() == kUnboxedMint) { | |
5771 ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32)); | |
5772 summary->set_in(0, Location::RequiresRegister()); | |
5773 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
5774 Location::RequiresRegister())); | |
5775 } else { | |
5776 ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32)); | |
5777 ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32)); | |
5778 summary->set_in(0, Location::RequiresRegister()); | |
5779 summary->set_out(0, Location::SameAsFirstInput()); | |
5780 } | |
5781 return summary; | |
5782 } | |
5783 | |
5784 | |
5785 void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5786 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) { | |
5787 const Register out = locs()->out(0).reg(); | |
5788 // Representations are bitwise equivalent. | |
5789 ASSERT(out == locs()->in(0).reg()); | |
5790 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) { | |
5791 const Register out = locs()->out(0).reg(); | |
5792 // Representations are bitwise equivalent. | |
5793 ASSERT(out == locs()->in(0).reg()); | |
5794 if (CanDeoptimize()) { | |
5795 Label* deopt = | |
5796 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); | |
5797 __ BranchSignedLess(out, Immediate(0), deopt); | |
5798 } | |
5799 } else if (from() == kUnboxedMint) { | |
5800 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32); | |
5801 PairLocation* in_pair = locs()->in(0).AsPairLocation(); | |
5802 Register in_lo = in_pair->At(0).reg(); | |
5803 Register in_hi = in_pair->At(1).reg(); | |
5804 Register out = locs()->out(0).reg(); | |
5805 // Copy low word. | |
5806 __ mov(out, in_lo); | |
5807 if (CanDeoptimize()) { | |
5808 Label* deopt = | |
5809 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); | |
5810 ASSERT(to() == kUnboxedInt32); | |
5811 __ sra(TMP, in_lo, 31); | |
5812 __ bne(in_hi, TMP, deopt); | |
5813 } | |
5814 } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) { | |
5815 ASSERT(to() == kUnboxedMint); | |
5816 Register in = locs()->in(0).reg(); | |
5817 PairLocation* out_pair = locs()->out(0).AsPairLocation(); | |
5818 Register out_lo = out_pair->At(0).reg(); | |
5819 Register out_hi = out_pair->At(1).reg(); | |
5820 // Copy low word. | |
5821 __ mov(out_lo, in); | |
5822 if (from() == kUnboxedUint32) { | |
5823 __ xor_(out_hi, out_hi, out_hi); | |
5824 } else { | |
5825 ASSERT(from() == kUnboxedInt32); | |
5826 __ sra(out_hi, in, 31); | |
5827 } | |
5828 } else { | |
5829 UNREACHABLE(); | |
5830 } | |
5831 } | |
5832 | |
5833 | |
5834 LocationSummary* ThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
5835 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); | |
5836 } | |
5837 | |
5838 | |
5839 void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5840 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), kThrowRuntimeEntry, 1, | |
5841 locs()); | |
5842 __ break_(0); | |
5843 } | |
5844 | |
5845 | |
5846 LocationSummary* ReThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
5847 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); | |
5848 } | |
5849 | |
5850 | |
5851 void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5852 compiler->SetNeedsStackTrace(catch_try_index()); | |
5853 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), kReThrowRuntimeEntry, | |
5854 2, locs()); | |
5855 __ break_(0); | |
5856 } | |
5857 | |
5858 | |
5859 LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
5860 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); | |
5861 } | |
5862 | |
5863 | |
5864 void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5865 __ Stop(message()); | |
5866 } | |
5867 | |
5868 | |
5869 void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5870 if (!compiler->CanFallThroughTo(normal_entry())) { | |
5871 __ b(compiler->GetJumpLabel(normal_entry())); | |
5872 } | |
5873 } | |
5874 | |
5875 | |
5876 LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
5877 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); | |
5878 } | |
5879 | |
5880 | |
5881 void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5882 __ Comment("GotoInstr"); | |
5883 if (!compiler->is_optimizing()) { | |
5884 if (FLAG_reorder_basic_blocks) { | |
5885 compiler->EmitEdgeCounter(block()->preorder_number()); | |
5886 } | |
5887 // Add a deoptimization descriptor for deoptimizing instructions that | |
5888 // may be inserted before this instruction. | |
5889 compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(), | |
5890 TokenPosition::kNoSource); | |
5891 } | |
5892 if (HasParallelMove()) { | |
5893 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); | |
5894 } | |
5895 | |
5896 // We can fall through if the successor is the next block in the list. | |
5897 // Otherwise, we need a jump. | |
5898 if (!compiler->CanFallThroughTo(successor())) { | |
5899 __ b(compiler->GetJumpLabel(successor())); | |
5900 } | |
5901 } | |
5902 | |
5903 | |
5904 LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone, | |
5905 bool opt) const { | |
5906 const intptr_t kNumInputs = 1; | |
5907 const intptr_t kNumTemps = 1; | |
5908 | |
5909 LocationSummary* summary = new (zone) | |
5910 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5911 | |
5912 summary->set_in(0, Location::RequiresRegister()); | |
5913 summary->set_temp(0, Location::RequiresRegister()); | |
5914 | |
5915 return summary; | |
5916 } | |
5917 | |
5918 | |
5919 void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5920 Register target_reg = locs()->temp_slot(0)->reg(); | |
5921 | |
5922 __ GetNextPC(target_reg, TMP); | |
5923 const intptr_t entry_offset = __ CodeSize() - 1 * Instr::kInstrSize; | |
5924 __ AddImmediate(target_reg, target_reg, -entry_offset); | |
5925 | |
5926 // Add the offset. | |
5927 Register offset_reg = locs()->in(0).reg(); | |
5928 if (offset()->definition()->representation() == kTagged) { | |
5929 __ SmiUntag(offset_reg); | |
5930 } | |
5931 __ addu(target_reg, target_reg, offset_reg); | |
5932 | |
5933 // Jump to the absolute address. | |
5934 __ jr(target_reg); | |
5935 } | |
5936 | |
5937 | |
5938 LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone, | |
5939 bool opt) const { | |
5940 const intptr_t kNumInputs = 2; | |
5941 const intptr_t kNumTemps = 0; | |
5942 if (needs_number_check()) { | |
5943 LocationSummary* locs = new (zone) | |
5944 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
5945 locs->set_in(0, Location::RegisterLocation(A0)); | |
5946 locs->set_in(1, Location::RegisterLocation(A1)); | |
5947 locs->set_out(0, Location::RegisterLocation(A0)); | |
5948 return locs; | |
5949 } | |
5950 LocationSummary* locs = new (zone) | |
5951 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
5952 locs->set_in(0, Location::RegisterOrConstant(left())); | |
5953 // Only one of the inputs can be a constant. Choose register if the first one | |
5954 // is a constant. | |
5955 locs->set_in(1, locs->in(0).IsConstant() | |
5956 ? Location::RequiresRegister() | |
5957 : Location::RegisterOrConstant(right())); | |
5958 locs->set_out(0, Location::RequiresRegister()); | |
5959 return locs; | |
5960 } | |
5961 | |
5962 | |
5963 Condition StrictCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
5964 BranchLabels labels) { | |
5965 Location left = locs()->in(0); | |
5966 Location right = locs()->in(1); | |
5967 ASSERT(!left.IsConstant() || !right.IsConstant()); | |
5968 Condition true_condition; | |
5969 if (left.IsConstant()) { | |
5970 true_condition = compiler->EmitEqualityRegConstCompare( | |
5971 right.reg(), left.constant(), needs_number_check(), token_pos()); | |
5972 } else if (right.IsConstant()) { | |
5973 true_condition = compiler->EmitEqualityRegConstCompare( | |
5974 left.reg(), right.constant(), needs_number_check(), token_pos()); | |
5975 } else { | |
5976 true_condition = compiler->EmitEqualityRegRegCompare( | |
5977 left.reg(), right.reg(), needs_number_check(), token_pos()); | |
5978 } | |
5979 if (kind() != Token::kEQ_STRICT) { | |
5980 ASSERT(kind() == Token::kNE_STRICT); | |
5981 true_condition = NegateCondition(true_condition); | |
5982 } | |
5983 return true_condition; | |
5984 } | |
5985 | |
5986 | |
5987 void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
5988 __ Comment("StrictCompareInstr"); | |
5989 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT); | |
5990 | |
5991 Label is_true, is_false; | |
5992 BranchLabels labels = {&is_true, &is_false, &is_false}; | |
5993 Condition true_condition = EmitComparisonCode(compiler, labels); | |
5994 EmitBranchOnCondition(compiler, true_condition, labels); | |
5995 | |
5996 Register result = locs()->out(0).reg(); | |
5997 Label done; | |
5998 __ Bind(&is_false); | |
5999 __ LoadObject(result, Bool::False()); | |
6000 __ b(&done); | |
6001 __ Bind(&is_true); | |
6002 __ LoadObject(result, Bool::True()); | |
6003 __ Bind(&done); | |
6004 } | |
6005 | |
6006 | |
6007 void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
6008 BranchInstr* branch) { | |
6009 __ Comment("StrictCompareInstr::EmitBranchCode"); | |
6010 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT); | |
6011 | |
6012 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
6013 Condition true_condition = EmitComparisonCode(compiler, labels); | |
6014 EmitBranchOnCondition(compiler, true_condition, labels); | |
6015 } | |
6016 | |
6017 | |
6018 LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone, | |
6019 bool opt) const { | |
6020 return LocationSummary::Make(zone, 1, Location::RequiresRegister(), | |
6021 LocationSummary::kNoCall); | |
6022 } | |
6023 | |
6024 | |
6025 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
6026 Register value = locs()->in(0).reg(); | |
6027 Register result = locs()->out(0).reg(); | |
6028 | |
6029 __ LoadObject(result, Bool::True()); | |
6030 __ LoadObject(TMP, Bool::False()); | |
6031 __ subu(CMPRES1, value, result); | |
6032 __ movz(result, TMP, CMPRES1); // If value is True, move False into result. | |
6033 } | |
6034 | |
6035 | |
6036 LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone, | |
6037 bool opt) const { | |
6038 return MakeCallSummary(zone); | |
6039 } | |
6040 | |
6041 | |
6042 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
6043 __ Comment("AllocateObjectInstr"); | |
6044 const Code& stub = Code::ZoneHandle( | |
6045 compiler->zone(), StubCode::GetAllocationStubForClass(cls())); | |
6046 const StubEntry stub_entry(stub); | |
6047 compiler->GenerateCall(token_pos(), stub_entry, RawPcDescriptors::kOther, | |
6048 locs()); | |
6049 compiler->AddStubCallTarget(stub); | |
6050 __ Drop(ArgumentCount()); // Discard arguments. | |
6051 } | |
6052 | |
6053 | |
6054 void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
6055 ASSERT(!compiler->is_optimizing()); | |
6056 __ BranchLinkPatchable(*StubCode::DebugStepCheck_entry()); | |
6057 compiler->AddCurrentDescriptor(stub_kind_, Thread::kNoDeoptId, token_pos()); | |
6058 compiler->RecordSafepoint(locs()); | |
6059 } | |
6060 | |
6061 | |
6062 LocationSummary* GrowRegExpStackInstr::MakeLocationSummary(Zone* zone, | |
6063 bool opt) const { | |
6064 const intptr_t kNumInputs = 1; | |
6065 const intptr_t kNumTemps = 0; | |
6066 LocationSummary* locs = new (zone) | |
6067 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
6068 locs->set_in(0, Location::RegisterLocation(T0)); | |
6069 locs->set_out(0, Location::RegisterLocation(T0)); | |
6070 return locs; | |
6071 } | |
6072 | |
6073 | |
6074 void GrowRegExpStackInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
6075 const Register typed_data = locs()->in(0).reg(); | |
6076 const Register result = locs()->out(0).reg(); | |
6077 __ Comment("GrowRegExpStackInstr"); | |
6078 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
6079 __ LoadObject(TMP, Object::null_object()); | |
6080 __ sw(TMP, Address(SP, 1 * kWordSize)); | |
6081 __ sw(typed_data, Address(SP, 0 * kWordSize)); | |
6082 compiler->GenerateRuntimeCall(TokenPosition::kNoSource, deopt_id(), | |
6083 kGrowRegExpStackRuntimeEntry, 1, locs()); | |
6084 __ lw(result, Address(SP, 1 * kWordSize)); | |
6085 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
6086 } | |
6087 | |
6088 | |
6089 } // namespace dart | |
6090 | |
6091 #endif // defined TARGET_ARCH_MIPS | |
OLD | NEW |