| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
| 2 // for details. All rights reserved. Use of this source code is governed by a | |
| 3 // BSD-style license that can be found in the LICENSE file. | |
| 4 | |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. | |
| 6 #if defined(TARGET_ARCH_MIPS) | |
| 7 | |
| 8 #include "vm/intermediate_language.h" | |
| 9 | |
| 10 #include "vm/compiler.h" | |
| 11 #include "vm/dart_entry.h" | |
| 12 #include "vm/flow_graph.h" | |
| 13 #include "vm/flow_graph_compiler.h" | |
| 14 #include "vm/flow_graph_range_analysis.h" | |
| 15 #include "vm/instructions.h" | |
| 16 #include "vm/locations.h" | |
| 17 #include "vm/object_store.h" | |
| 18 #include "vm/parser.h" | |
| 19 #include "vm/simulator.h" | |
| 20 #include "vm/stack_frame.h" | |
| 21 #include "vm/stub_code.h" | |
| 22 #include "vm/symbols.h" | |
| 23 | |
| 24 #define __ compiler->assembler()-> | |
| 25 #define Z (compiler->zone()) | |
| 26 | |
| 27 namespace dart { | |
| 28 | |
| 29 // Generic summary for call instructions that have all arguments pushed | |
| 30 // on the stack and return the result in a fixed register V0. | |
| 31 LocationSummary* Instruction::MakeCallSummary(Zone* zone) { | |
| 32 LocationSummary* result = | |
| 33 new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); | |
| 34 result->set_out(0, Location::RegisterLocation(V0)); | |
| 35 return result; | |
| 36 } | |
| 37 | |
| 38 | |
| 39 LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, | |
| 40 bool opt) const { | |
| 41 const intptr_t kNumInputs = 1; | |
| 42 const intptr_t kNumTemps = 0; | |
| 43 LocationSummary* locs = new (zone) | |
| 44 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 45 locs->set_in(0, Location::AnyOrConstant(value())); | |
| 46 return locs; | |
| 47 } | |
| 48 | |
| 49 | |
| 50 void PushArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 51 // In SSA mode, we need an explicit push. Nothing to do in non-SSA mode | |
| 52 // where PushArgument is handled by BindInstr::EmitNativeCode. | |
| 53 __ Comment("PushArgumentInstr"); | |
| 54 if (compiler->is_optimizing()) { | |
| 55 Location value = locs()->in(0); | |
| 56 if (value.IsRegister()) { | |
| 57 __ Push(value.reg()); | |
| 58 } else if (value.IsConstant()) { | |
| 59 __ PushObject(value.constant()); | |
| 60 } else { | |
| 61 ASSERT(value.IsStackSlot()); | |
| 62 const intptr_t value_offset = value.ToStackSlotOffset(); | |
| 63 __ LoadFromOffset(TMP, FP, value_offset); | |
| 64 __ Push(TMP); | |
| 65 } | |
| 66 } | |
| 67 } | |
| 68 | |
| 69 | |
| 70 LocationSummary* ReturnInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 71 const intptr_t kNumInputs = 1; | |
| 72 const intptr_t kNumTemps = 0; | |
| 73 LocationSummary* locs = new (zone) | |
| 74 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 75 locs->set_in(0, Location::RegisterLocation(V0)); | |
| 76 return locs; | |
| 77 } | |
| 78 | |
| 79 | |
| 80 // Attempt optimized compilation at return instruction instead of at the entry. | |
| 81 // The entry needs to be patchable, no inlined objects are allowed in the area | |
| 82 // that will be overwritten by the patch instructions: a branch macro sequence. | |
| 83 void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 84 __ Comment("ReturnInstr"); | |
| 85 Register result = locs()->in(0).reg(); | |
| 86 ASSERT(result == V0); | |
| 87 | |
| 88 if (compiler->intrinsic_mode()) { | |
| 89 // Intrinsics don't have a frame. | |
| 90 __ Ret(); | |
| 91 return; | |
| 92 } | |
| 93 | |
| 94 #if defined(DEBUG) | |
| 95 Label stack_ok; | |
| 96 __ Comment("Stack Check"); | |
| 97 const intptr_t fp_sp_dist = | |
| 98 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; | |
| 99 ASSERT(fp_sp_dist <= 0); | |
| 100 __ subu(CMPRES1, SP, FP); | |
| 101 | |
| 102 __ BranchEqual(CMPRES1, Immediate(fp_sp_dist), &stack_ok); | |
| 103 __ break_(0); | |
| 104 | |
| 105 __ Bind(&stack_ok); | |
| 106 #endif | |
| 107 __ LeaveDartFrameAndReturn(); | |
| 108 } | |
| 109 | |
| 110 | |
| 111 static Condition NegateCondition(Condition condition) { | |
| 112 switch (condition.rel_op()) { | |
| 113 case AL: | |
| 114 condition.set_rel_op(NV); | |
| 115 break; | |
| 116 case NV: | |
| 117 condition.set_rel_op(AL); | |
| 118 break; | |
| 119 case EQ: | |
| 120 condition.set_rel_op(NE); | |
| 121 break; | |
| 122 case NE: | |
| 123 condition.set_rel_op(EQ); | |
| 124 break; | |
| 125 case LT: | |
| 126 condition.set_rel_op(GE); | |
| 127 break; | |
| 128 case LE: | |
| 129 condition.set_rel_op(GT); | |
| 130 break; | |
| 131 case GT: | |
| 132 condition.set_rel_op(LE); | |
| 133 break; | |
| 134 case GE: | |
| 135 condition.set_rel_op(LT); | |
| 136 break; | |
| 137 case ULT: | |
| 138 condition.set_rel_op(UGE); | |
| 139 break; | |
| 140 case ULE: | |
| 141 condition.set_rel_op(UGT); | |
| 142 break; | |
| 143 case UGT: | |
| 144 condition.set_rel_op(ULE); | |
| 145 break; | |
| 146 case UGE: | |
| 147 condition.set_rel_op(ULT); | |
| 148 break; | |
| 149 default: | |
| 150 UNREACHABLE(); | |
| 151 } | |
| 152 return condition; | |
| 153 } | |
| 154 | |
| 155 | |
| 156 LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone, | |
| 157 bool opt) const { | |
| 158 comparison()->InitializeLocationSummary(zone, opt); | |
| 159 return comparison()->locs(); | |
| 160 } | |
| 161 | |
| 162 | |
| 163 void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 164 const Register result = locs()->out(0).reg(); | |
| 165 | |
| 166 intptr_t true_value = if_true_; | |
| 167 intptr_t false_value = if_false_; | |
| 168 bool swapped = false; | |
| 169 if (true_value == 0) { | |
| 170 // Swap values so that false_value is zero. | |
| 171 intptr_t temp = true_value; | |
| 172 true_value = false_value; | |
| 173 false_value = temp; | |
| 174 swapped = true; | |
| 175 } | |
| 176 | |
| 177 // Initialize result with the true value. | |
| 178 __ LoadImmediate(result, Smi::RawValue(true_value)); | |
| 179 | |
| 180 // Emit comparison code. This must not overwrite the result register. | |
| 181 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using | |
| 182 // the labels or returning an invalid condition. | |
| 183 BranchLabels labels = {NULL, NULL, NULL}; // Emit branch-free code. | |
| 184 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels); | |
| 185 ASSERT(true_condition.IsValid()); | |
| 186 if (swapped) { | |
| 187 true_condition = NegateCondition(true_condition); | |
| 188 } | |
| 189 | |
| 190 // Evaluate condition and provide result in CMPRES1. | |
| 191 Register left = true_condition.left(); | |
| 192 Register right = true_condition.right(); | |
| 193 bool zero_is_false = true; // Zero in CMPRES1 indicates a false condition. | |
| 194 switch (true_condition.rel_op()) { | |
| 195 case AL: | |
| 196 return; // Result holds true_value. | |
| 197 case NV: | |
| 198 __ LoadImmediate(result, false_value); | |
| 199 return; | |
| 200 case EQ: | |
| 201 zero_is_false = false; | |
| 202 // fall through. | |
| 203 case NE: { | |
| 204 if (left == IMM) { | |
| 205 __ XorImmediate(CMPRES1, right, true_condition.imm()); | |
| 206 } else if (right == IMM) { | |
| 207 __ XorImmediate(CMPRES1, left, true_condition.imm()); | |
| 208 } else { | |
| 209 __ xor_(CMPRES1, left, right); | |
| 210 } | |
| 211 break; | |
| 212 } | |
| 213 case GE: | |
| 214 zero_is_false = false; | |
| 215 // fall through. | |
| 216 case LT: { | |
| 217 if (left == IMM) { | |
| 218 __ slti(CMPRES1, right, Immediate(true_condition.imm() + 1)); | |
| 219 zero_is_false = !zero_is_false; | |
| 220 } else if (right == IMM) { | |
| 221 __ slti(CMPRES1, left, Immediate(true_condition.imm())); | |
| 222 } else { | |
| 223 __ slt(CMPRES1, left, right); | |
| 224 } | |
| 225 break; | |
| 226 } | |
| 227 case LE: | |
| 228 zero_is_false = false; | |
| 229 // fall through. | |
| 230 case GT: { | |
| 231 if (left == IMM) { | |
| 232 __ slti(CMPRES1, right, Immediate(true_condition.imm())); | |
| 233 } else if (right == IMM) { | |
| 234 __ slti(CMPRES1, left, Immediate(true_condition.imm() + 1)); | |
| 235 zero_is_false = !zero_is_false; | |
| 236 } else { | |
| 237 __ slt(CMPRES1, right, left); | |
| 238 } | |
| 239 break; | |
| 240 } | |
| 241 case UGE: | |
| 242 zero_is_false = false; | |
| 243 // fall through. | |
| 244 case ULT: { | |
| 245 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
| 246 __ sltu(CMPRES1, left, right); | |
| 247 break; | |
| 248 } | |
| 249 case ULE: | |
| 250 zero_is_false = false; | |
| 251 // fall through. | |
| 252 case UGT: { | |
| 253 ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. | |
| 254 __ sltu(CMPRES1, right, left); | |
| 255 break; | |
| 256 } | |
| 257 default: | |
| 258 UNREACHABLE(); | |
| 259 } | |
| 260 | |
| 261 // CMPRES1 is the evaluated condition, zero or non-zero, as specified by the | |
| 262 // flag zero_is_false. | |
| 263 Register false_value_reg; | |
| 264 if (false_value == 0) { | |
| 265 false_value_reg = ZR; | |
| 266 } else { | |
| 267 __ LoadImmediate(CMPRES2, Smi::RawValue(false_value)); | |
| 268 false_value_reg = CMPRES2; | |
| 269 } | |
| 270 if (zero_is_false) { | |
| 271 __ movz(result, false_value_reg, CMPRES1); | |
| 272 } else { | |
| 273 __ movn(result, false_value_reg, CMPRES1); | |
| 274 } | |
| 275 } | |
| 276 | |
| 277 | |
| 278 LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone, | |
| 279 bool opt) const { | |
| 280 const intptr_t kNumInputs = 1; | |
| 281 const intptr_t kNumTemps = 0; | |
| 282 LocationSummary* summary = new (zone) | |
| 283 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 284 summary->set_in(0, Location::RegisterLocation(T0)); // Function. | |
| 285 summary->set_out(0, Location::RegisterLocation(V0)); | |
| 286 return summary; | |
| 287 } | |
| 288 | |
| 289 | |
| 290 void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 291 // Load arguments descriptor in S4. | |
| 292 const intptr_t argument_count = ArgumentCount(); // Includes type args. | |
| 293 const Array& arguments_descriptor = | |
| 294 Array::ZoneHandle(Z, GetArgumentsDescriptor()); | |
| 295 __ LoadObject(S4, arguments_descriptor); | |
| 296 | |
| 297 // Load closure function code in T2. | |
| 298 // S4: arguments descriptor array. | |
| 299 // S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value). | |
| 300 ASSERT(locs()->in(0).reg() == T0); | |
| 301 __ LoadImmediate(S5, 0); | |
| 302 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | |
| 303 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 304 __ jalr(T2); | |
| 305 compiler->RecordSafepoint(locs()); | |
| 306 compiler->EmitCatchEntryState(); | |
| 307 // Marks either the continuation point in unoptimized code or the | |
| 308 // deoptimization point in optimized code, after call. | |
| 309 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id()); | |
| 310 if (compiler->is_optimizing()) { | |
| 311 compiler->AddDeoptIndexAtCall(deopt_id_after); | |
| 312 } | |
| 313 // Add deoptimization continuation point after the call and before the | |
| 314 // arguments are removed. | |
| 315 // In optimized code this descriptor is needed for exception handling. | |
| 316 compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, | |
| 317 token_pos()); | |
| 318 __ Drop(argument_count); | |
| 319 } | |
| 320 | |
| 321 | |
| 322 LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone, | |
| 323 bool opt) const { | |
| 324 return LocationSummary::Make(zone, 0, Location::RequiresRegister(), | |
| 325 LocationSummary::kNoCall); | |
| 326 } | |
| 327 | |
| 328 | |
| 329 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 330 __ Comment("LoadLocalInstr"); | |
| 331 Register result = locs()->out(0).reg(); | |
| 332 __ LoadFromOffset(result, FP, local().index() * kWordSize); | |
| 333 } | |
| 334 | |
| 335 | |
| 336 LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone, | |
| 337 bool opt) const { | |
| 338 return LocationSummary::Make(zone, 1, Location::SameAsFirstInput(), | |
| 339 LocationSummary::kNoCall); | |
| 340 } | |
| 341 | |
| 342 | |
| 343 void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 344 __ Comment("StoreLocalInstr"); | |
| 345 Register value = locs()->in(0).reg(); | |
| 346 Register result = locs()->out(0).reg(); | |
| 347 ASSERT(result == value); // Assert that register assignment is correct. | |
| 348 __ StoreToOffset(value, FP, local().index() * kWordSize); | |
| 349 } | |
| 350 | |
| 351 | |
| 352 LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone, | |
| 353 bool opt) const { | |
| 354 return LocationSummary::Make(zone, 0, Location::RequiresRegister(), | |
| 355 LocationSummary::kNoCall); | |
| 356 } | |
| 357 | |
| 358 | |
| 359 void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 360 // The register allocator drops constant definitions that have no uses. | |
| 361 if (!locs()->out(0).IsInvalid()) { | |
| 362 __ Comment("ConstantInstr"); | |
| 363 Register result = locs()->out(0).reg(); | |
| 364 __ LoadObject(result, value()); | |
| 365 } | |
| 366 } | |
| 367 | |
| 368 | |
| 369 LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone, | |
| 370 bool opt) const { | |
| 371 const intptr_t kNumInputs = 0; | |
| 372 const intptr_t kNumTemps = (representation_ == kUnboxedInt32) ? 0 : 1; | |
| 373 LocationSummary* locs = new (zone) | |
| 374 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 375 if (representation_ == kUnboxedInt32) { | |
| 376 locs->set_out(0, Location::RequiresRegister()); | |
| 377 } else { | |
| 378 ASSERT(representation_ == kUnboxedDouble); | |
| 379 locs->set_out(0, Location::RequiresFpuRegister()); | |
| 380 } | |
| 381 if (kNumTemps > 0) { | |
| 382 locs->set_temp(0, Location::RequiresRegister()); | |
| 383 } | |
| 384 return locs; | |
| 385 } | |
| 386 | |
| 387 | |
| 388 void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 389 // The register allocator drops constant definitions that have no uses. | |
| 390 if (!locs()->out(0).IsInvalid()) { | |
| 391 switch (representation_) { | |
| 392 case kUnboxedDouble: { | |
| 393 ASSERT(value().IsDouble()); | |
| 394 const Register const_value = locs()->temp(0).reg(); | |
| 395 const DRegister result = locs()->out(0).fpu_reg(); | |
| 396 __ LoadObject(const_value, value()); | |
| 397 __ LoadDFromOffset(result, const_value, | |
| 398 Double::value_offset() - kHeapObjectTag); | |
| 399 break; | |
| 400 } | |
| 401 | |
| 402 case kUnboxedInt32: | |
| 403 __ LoadImmediate(locs()->out(0).reg(), Smi::Cast(value()).Value()); | |
| 404 break; | |
| 405 | |
| 406 default: | |
| 407 UNREACHABLE(); | |
| 408 } | |
| 409 } | |
| 410 } | |
| 411 | |
| 412 | |
| 413 LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone, | |
| 414 bool opt) const { | |
| 415 const intptr_t kNumInputs = 3; | |
| 416 const intptr_t kNumTemps = 0; | |
| 417 LocationSummary* summary = new (zone) | |
| 418 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 419 summary->set_in(0, Location::RegisterLocation(A0)); // Value. | |
| 420 summary->set_in(1, Location::RegisterLocation(A1)); // Instant. type args. | |
| 421 summary->set_in(2, Location::RegisterLocation(A2)); // Function type args. | |
| 422 summary->set_out(0, Location::RegisterLocation(A0)); | |
| 423 return summary; | |
| 424 } | |
| 425 | |
| 426 | |
| 427 LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone, | |
| 428 bool opt) const { | |
| 429 const intptr_t kNumInputs = 1; | |
| 430 const intptr_t kNumTemps = 0; | |
| 431 LocationSummary* locs = new (zone) | |
| 432 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 433 locs->set_in(0, Location::RegisterLocation(A0)); | |
| 434 locs->set_out(0, Location::RegisterLocation(A0)); | |
| 435 return locs; | |
| 436 } | |
| 437 | |
| 438 | |
| 439 static void EmitAssertBoolean(Register reg, | |
| 440 TokenPosition token_pos, | |
| 441 intptr_t deopt_id, | |
| 442 LocationSummary* locs, | |
| 443 FlowGraphCompiler* compiler) { | |
| 444 // Check that the type of the value is allowed in conditional context. | |
| 445 // Call the runtime if the object is not bool::true or bool::false. | |
| 446 ASSERT(locs->always_calls()); | |
| 447 Label done; | |
| 448 | |
| 449 if (Isolate::Current()->type_checks()) { | |
| 450 __ BranchEqual(reg, Bool::True(), &done); | |
| 451 __ BranchEqual(reg, Bool::False(), &done); | |
| 452 } else { | |
| 453 ASSERT(Isolate::Current()->asserts()); | |
| 454 __ BranchNotEqual(reg, Object::null_instance(), &done); | |
| 455 } | |
| 456 | |
| 457 __ Push(reg); // Push the source object. | |
| 458 compiler->GenerateRuntimeCall(token_pos, deopt_id, | |
| 459 kNonBoolTypeErrorRuntimeEntry, 1, locs); | |
| 460 // We should never return here. | |
| 461 __ break_(0); | |
| 462 __ Bind(&done); | |
| 463 } | |
| 464 | |
| 465 | |
| 466 void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 467 Register obj = locs()->in(0).reg(); | |
| 468 Register result = locs()->out(0).reg(); | |
| 469 | |
| 470 __ Comment("AssertBooleanInstr"); | |
| 471 EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler); | |
| 472 ASSERT(obj == result); | |
| 473 } | |
| 474 | |
| 475 | |
| 476 LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone, | |
| 477 bool opt) const { | |
| 478 const intptr_t kNumInputs = 2; | |
| 479 if (operation_cid() == kMintCid) { | |
| 480 const intptr_t kNumTemps = 0; | |
| 481 LocationSummary* locs = new (zone) | |
| 482 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 483 locs->set_in(0, Location::Pair(Location::RequiresRegister(), | |
| 484 Location::RequiresRegister())); | |
| 485 locs->set_in(1, Location::Pair(Location::RequiresRegister(), | |
| 486 Location::RequiresRegister())); | |
| 487 locs->set_out(0, Location::RequiresRegister()); | |
| 488 return locs; | |
| 489 } | |
| 490 if (operation_cid() == kDoubleCid) { | |
| 491 const intptr_t kNumTemps = 0; | |
| 492 LocationSummary* locs = new (zone) | |
| 493 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 494 locs->set_in(0, Location::RequiresFpuRegister()); | |
| 495 locs->set_in(1, Location::RequiresFpuRegister()); | |
| 496 locs->set_out(0, Location::RequiresRegister()); | |
| 497 return locs; | |
| 498 } | |
| 499 if (operation_cid() == kSmiCid) { | |
| 500 const intptr_t kNumTemps = 0; | |
| 501 LocationSummary* locs = new (zone) | |
| 502 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 503 locs->set_in(0, Location::RegisterOrConstant(left())); | |
| 504 // Only one input can be a constant operand. The case of two constant | |
| 505 // operands should be handled by constant propagation. | |
| 506 locs->set_in(1, locs->in(0).IsConstant() | |
| 507 ? Location::RequiresRegister() | |
| 508 : Location::RegisterOrConstant(right())); | |
| 509 locs->set_out(0, Location::RequiresRegister()); | |
| 510 return locs; | |
| 511 } | |
| 512 UNREACHABLE(); | |
| 513 return NULL; | |
| 514 } | |
| 515 | |
| 516 | |
| 517 static void LoadValueCid(FlowGraphCompiler* compiler, | |
| 518 Register value_cid_reg, | |
| 519 Register value_reg, | |
| 520 Label* value_is_smi = NULL) { | |
| 521 __ Comment("LoadValueCid"); | |
| 522 Label done; | |
| 523 if (value_is_smi == NULL) { | |
| 524 __ LoadImmediate(value_cid_reg, kSmiCid); | |
| 525 } | |
| 526 __ andi(CMPRES1, value_reg, Immediate(kSmiTagMask)); | |
| 527 if (value_is_smi == NULL) { | |
| 528 __ beq(CMPRES1, ZR, &done); | |
| 529 } else { | |
| 530 __ beq(CMPRES1, ZR, value_is_smi); | |
| 531 } | |
| 532 __ LoadClassId(value_cid_reg, value_reg); | |
| 533 __ Bind(&done); | |
| 534 } | |
| 535 | |
| 536 | |
| 537 static RelationOperator TokenKindToIntRelOp(Token::Kind kind) { | |
| 538 switch (kind) { | |
| 539 case Token::kEQ: | |
| 540 return EQ; | |
| 541 case Token::kNE: | |
| 542 return NE; | |
| 543 case Token::kLT: | |
| 544 return LT; | |
| 545 case Token::kGT: | |
| 546 return GT; | |
| 547 case Token::kLTE: | |
| 548 return LE; | |
| 549 case Token::kGTE: | |
| 550 return GE; | |
| 551 default: | |
| 552 UNREACHABLE(); | |
| 553 return NV; | |
| 554 } | |
| 555 } | |
| 556 | |
| 557 | |
| 558 static RelationOperator TokenKindToUintRelOp(Token::Kind kind) { | |
| 559 switch (kind) { | |
| 560 case Token::kEQ: | |
| 561 return EQ; | |
| 562 case Token::kNE: | |
| 563 return NE; | |
| 564 case Token::kLT: | |
| 565 return ULT; | |
| 566 case Token::kGT: | |
| 567 return UGT; | |
| 568 case Token::kLTE: | |
| 569 return ULE; | |
| 570 case Token::kGTE: | |
| 571 return UGE; | |
| 572 default: | |
| 573 UNREACHABLE(); | |
| 574 return NV; | |
| 575 } | |
| 576 } | |
| 577 | |
| 578 | |
| 579 // The comparison code to emit is specified by true_condition. | |
| 580 static void EmitBranchOnCondition(FlowGraphCompiler* compiler, | |
| 581 Condition true_condition, | |
| 582 BranchLabels labels) { | |
| 583 __ Comment("ControlInstruction::EmitBranchOnCondition"); | |
| 584 if (labels.fall_through == labels.false_label) { | |
| 585 // If the next block is the false successor, fall through to it. | |
| 586 __ BranchOnCondition(true_condition, labels.true_label); | |
| 587 } else { | |
| 588 // If the next block is not the false successor, branch to it. | |
| 589 Condition false_condition = NegateCondition(true_condition); | |
| 590 __ BranchOnCondition(false_condition, labels.false_label); | |
| 591 // Fall through or jump to the true successor. | |
| 592 if (labels.fall_through != labels.true_label) { | |
| 593 __ b(labels.true_label); | |
| 594 } | |
| 595 } | |
| 596 } | |
| 597 | |
| 598 | |
| 599 static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler, | |
| 600 const LocationSummary& locs, | |
| 601 Token::Kind kind) { | |
| 602 __ Comment("EmitSmiComparisonOp"); | |
| 603 const Location left = locs.in(0); | |
| 604 const Location right = locs.in(1); | |
| 605 ASSERT(!left.IsConstant() || !right.IsConstant()); | |
| 606 ASSERT(left.IsRegister() || left.IsConstant()); | |
| 607 ASSERT(right.IsRegister() || right.IsConstant()); | |
| 608 | |
| 609 int16_t imm = 0; | |
| 610 const Register left_reg = | |
| 611 left.IsRegister() ? left.reg() : __ LoadConditionOperand( | |
| 612 CMPRES1, left.constant(), &imm); | |
| 613 const Register right_reg = | |
| 614 right.IsRegister() ? right.reg() : __ LoadConditionOperand( | |
| 615 CMPRES2, right.constant(), &imm); | |
| 616 return Condition(left_reg, right_reg, TokenKindToIntRelOp(kind), imm); | |
| 617 } | |
| 618 | |
| 619 | |
| 620 static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler, | |
| 621 const LocationSummary& locs, | |
| 622 Token::Kind kind, | |
| 623 BranchLabels labels) { | |
| 624 __ Comment("EmitUnboxedMintEqualityOp"); | |
| 625 ASSERT(Token::IsEqualityOperator(kind)); | |
| 626 PairLocation* left_pair = locs.in(0).AsPairLocation(); | |
| 627 Register left_lo = left_pair->At(0).reg(); | |
| 628 Register left_hi = left_pair->At(1).reg(); | |
| 629 PairLocation* right_pair = locs.in(1).AsPairLocation(); | |
| 630 Register right_lo = right_pair->At(0).reg(); | |
| 631 Register right_hi = right_pair->At(1).reg(); | |
| 632 | |
| 633 if (labels.false_label == NULL) { | |
| 634 // Generate branch-free code. | |
| 635 __ xor_(CMPRES1, left_lo, right_lo); | |
| 636 __ xor_(AT, left_hi, right_hi); | |
| 637 __ or_(CMPRES1, CMPRES1, AT); | |
| 638 return Condition(CMPRES1, ZR, TokenKindToUintRelOp(kind)); | |
| 639 } else { | |
| 640 if (kind == Token::kEQ) { | |
| 641 __ bne(left_hi, right_hi, labels.false_label); | |
| 642 } else { | |
| 643 ASSERT(kind == Token::kNE); | |
| 644 __ bne(left_hi, right_hi, labels.true_label); | |
| 645 } | |
| 646 return Condition(left_lo, right_lo, TokenKindToUintRelOp(kind)); | |
| 647 } | |
| 648 } | |
| 649 | |
| 650 | |
| 651 static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler, | |
| 652 const LocationSummary& locs, | |
| 653 Token::Kind kind, | |
| 654 BranchLabels labels) { | |
| 655 __ Comment("EmitUnboxedMintComparisonOp"); | |
| 656 PairLocation* left_pair = locs.in(0).AsPairLocation(); | |
| 657 Register left_lo = left_pair->At(0).reg(); | |
| 658 Register left_hi = left_pair->At(1).reg(); | |
| 659 PairLocation* right_pair = locs.in(1).AsPairLocation(); | |
| 660 Register right_lo = right_pair->At(0).reg(); | |
| 661 Register right_hi = right_pair->At(1).reg(); | |
| 662 | |
| 663 if (labels.false_label == NULL) { | |
| 664 // Generate branch-free code (except for skipping the lower words compare). | |
| 665 // Result in CMPRES1, CMPRES2, so that CMPRES1 op CMPRES2 === left op right. | |
| 666 Label done; | |
| 667 // Compare upper halves first. | |
| 668 __ slt(CMPRES1, right_hi, left_hi); | |
| 669 __ slt(CMPRES2, left_hi, right_hi); | |
| 670 // If higher words aren't equal, skip comparing lower words. | |
| 671 __ bne(CMPRES1, CMPRES2, &done); | |
| 672 | |
| 673 __ sltu(CMPRES1, right_lo, left_lo); | |
| 674 __ sltu(CMPRES2, left_lo, right_lo); | |
| 675 __ Bind(&done); | |
| 676 return Condition(CMPRES1, CMPRES2, TokenKindToUintRelOp(kind)); | |
| 677 } else { | |
| 678 switch (kind) { | |
| 679 case Token::kLT: | |
| 680 case Token::kLTE: { | |
| 681 __ slt(AT, left_hi, right_hi); | |
| 682 __ bne(AT, ZR, labels.true_label); | |
| 683 __ delay_slot()->slt(AT, right_hi, left_hi); | |
| 684 __ bne(AT, ZR, labels.false_label); | |
| 685 break; | |
| 686 } | |
| 687 case Token::kGT: | |
| 688 case Token::kGTE: { | |
| 689 __ slt(AT, left_hi, right_hi); | |
| 690 __ bne(AT, ZR, labels.false_label); | |
| 691 __ delay_slot()->slt(AT, right_hi, left_hi); | |
| 692 __ bne(AT, ZR, labels.true_label); | |
| 693 break; | |
| 694 } | |
| 695 default: | |
| 696 UNREACHABLE(); | |
| 697 } | |
| 698 return Condition(left_lo, right_lo, TokenKindToUintRelOp(kind)); | |
| 699 } | |
| 700 } | |
| 701 | |
| 702 | |
| 703 static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler, | |
| 704 const LocationSummary& locs, | |
| 705 Token::Kind kind, | |
| 706 BranchLabels labels) { | |
| 707 DRegister left = locs.in(0).fpu_reg(); | |
| 708 DRegister right = locs.in(1).fpu_reg(); | |
| 709 | |
| 710 __ Comment("DoubleComparisonOp(left=%d, right=%d)", left, right); | |
| 711 | |
| 712 __ cund(left, right); | |
| 713 Label* nan_label = | |
| 714 (kind == Token::kNE) ? labels.true_label : labels.false_label; | |
| 715 __ bc1t(nan_label); | |
| 716 | |
| 717 switch (kind) { | |
| 718 case Token::kEQ: | |
| 719 __ ceqd(left, right); | |
| 720 break; | |
| 721 case Token::kNE: | |
| 722 __ ceqd(left, right); | |
| 723 break; | |
| 724 case Token::kLT: | |
| 725 __ coltd(left, right); | |
| 726 break; | |
| 727 case Token::kLTE: | |
| 728 __ coled(left, right); | |
| 729 break; | |
| 730 case Token::kGT: | |
| 731 __ coltd(right, left); | |
| 732 break; | |
| 733 case Token::kGTE: | |
| 734 __ coled(right, left); | |
| 735 break; | |
| 736 default: { | |
| 737 // We should only be passing the above conditions to this function. | |
| 738 UNREACHABLE(); | |
| 739 break; | |
| 740 } | |
| 741 } | |
| 742 | |
| 743 if (labels.false_label == NULL) { | |
| 744 // Generate branch-free code and return result in condition. | |
| 745 __ LoadImmediate(CMPRES1, 1); | |
| 746 if (kind == Token::kNE) { | |
| 747 __ movf(CMPRES1, ZR); | |
| 748 } else { | |
| 749 __ movt(CMPRES1, ZR); | |
| 750 } | |
| 751 return Condition(CMPRES1, ZR, EQ); | |
| 752 } else { | |
| 753 if (labels.fall_through == labels.false_label) { | |
| 754 if (kind == Token::kNE) { | |
| 755 __ bc1f(labels.true_label); | |
| 756 } else { | |
| 757 __ bc1t(labels.true_label); | |
| 758 } | |
| 759 // Since we already branched on true, return the never true condition. | |
| 760 return Condition(CMPRES1, CMPRES2, NV); | |
| 761 } else { | |
| 762 if (kind == Token::kNE) { | |
| 763 __ bc1t(labels.false_label); | |
| 764 } else { | |
| 765 __ bc1f(labels.false_label); | |
| 766 } | |
| 767 // Since we already branched on false, return the always true condition. | |
| 768 return Condition(CMPRES1, CMPRES2, AL); | |
| 769 } | |
| 770 } | |
| 771 } | |
| 772 | |
| 773 | |
| 774 Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
| 775 BranchLabels labels) { | |
| 776 if (operation_cid() == kSmiCid) { | |
| 777 return EmitSmiComparisonOp(compiler, *locs(), kind()); | |
| 778 } else if (operation_cid() == kMintCid) { | |
| 779 return EmitUnboxedMintEqualityOp(compiler, *locs(), kind(), labels); | |
| 780 } else { | |
| 781 ASSERT(operation_cid() == kDoubleCid); | |
| 782 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels); | |
| 783 } | |
| 784 } | |
| 785 | |
| 786 | |
| 787 void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 788 Label is_true, is_false; | |
| 789 BranchLabels labels = {&is_true, &is_false, &is_false}; | |
| 790 Condition true_condition = EmitComparisonCode(compiler, labels); | |
| 791 if (true_condition.IsValid()) { | |
| 792 EmitBranchOnCondition(compiler, true_condition, labels); | |
| 793 } | |
| 794 | |
| 795 Register result = this->locs()->out(0).reg(); | |
| 796 Label done; | |
| 797 __ Bind(&is_false); | |
| 798 __ LoadObject(result, Bool::False()); | |
| 799 __ b(&done); | |
| 800 __ Bind(&is_true); | |
| 801 __ LoadObject(result, Bool::True()); | |
| 802 __ Bind(&done); | |
| 803 } | |
| 804 | |
| 805 | |
| 806 void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
| 807 BranchInstr* branch) { | |
| 808 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
| 809 Condition true_condition = EmitComparisonCode(compiler, labels); | |
| 810 if (true_condition.IsValid()) { | |
| 811 EmitBranchOnCondition(compiler, true_condition, labels); | |
| 812 } | |
| 813 } | |
| 814 | |
| 815 | |
| 816 LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 817 const intptr_t kNumInputs = 2; | |
| 818 const intptr_t kNumTemps = 0; | |
| 819 LocationSummary* locs = new (zone) | |
| 820 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 821 locs->set_in(0, Location::RequiresRegister()); | |
| 822 // Only one input can be a constant operand. The case of two constant | |
| 823 // operands should be handled by constant propagation. | |
| 824 locs->set_in(1, Location::RegisterOrConstant(right())); | |
| 825 return locs; | |
| 826 } | |
| 827 | |
| 828 | |
| 829 Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
| 830 BranchLabels labels) { | |
| 831 Register left = locs()->in(0).reg(); | |
| 832 Location right = locs()->in(1); | |
| 833 if (right.IsConstant()) { | |
| 834 ASSERT(right.constant().IsSmi()); | |
| 835 const int32_t imm = reinterpret_cast<int32_t>(right.constant().raw()); | |
| 836 __ AndImmediate(CMPRES1, left, imm); | |
| 837 } else { | |
| 838 __ and_(CMPRES1, left, right.reg()); | |
| 839 } | |
| 840 return Condition(CMPRES1, ZR, (kind() == Token::kNE) ? NE : EQ); | |
| 841 } | |
| 842 | |
| 843 | |
| 844 LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone, | |
| 845 bool opt) const { | |
| 846 const intptr_t kNumInputs = 1; | |
| 847 const intptr_t kNumTemps = 1; | |
| 848 LocationSummary* locs = new (zone) | |
| 849 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 850 locs->set_in(0, Location::RequiresRegister()); | |
| 851 locs->set_temp(0, Location::RequiresRegister()); | |
| 852 locs->set_out(0, Location::RequiresRegister()); | |
| 853 return locs; | |
| 854 } | |
| 855 | |
| 856 | |
| 857 Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
| 858 BranchLabels labels) { | |
| 859 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT)); | |
| 860 Register val_reg = locs()->in(0).reg(); | |
| 861 Register cid_reg = locs()->temp(0).reg(); | |
| 862 | |
| 863 Label* deopt = | |
| 864 CanDeoptimize() | |
| 865 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids, | |
| 866 licm_hoisted_ ? ICData::kHoisted : 0) | |
| 867 : NULL; | |
| 868 | |
| 869 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; | |
| 870 const ZoneGrowableArray<intptr_t>& data = cid_results(); | |
| 871 ASSERT(data[0] == kSmiCid); | |
| 872 bool result = data[1] == true_result; | |
| 873 __ andi(CMPRES1, val_reg, Immediate(kSmiTagMask)); | |
| 874 __ beq(CMPRES1, ZR, result ? labels.true_label : labels.false_label); | |
| 875 | |
| 876 __ LoadClassId(cid_reg, val_reg); | |
| 877 for (intptr_t i = 2; i < data.length(); i += 2) { | |
| 878 const intptr_t test_cid = data[i]; | |
| 879 ASSERT(test_cid != kSmiCid); | |
| 880 result = data[i + 1] == true_result; | |
| 881 __ BranchEqual(cid_reg, Immediate(test_cid), | |
| 882 result ? labels.true_label : labels.false_label); | |
| 883 } | |
| 884 // No match found, deoptimize or default action. | |
| 885 if (deopt == NULL) { | |
| 886 // If the cid is not in the list, jump to the opposite label from the cids | |
| 887 // that are in the list. These must be all the same (see asserts in the | |
| 888 // constructor). | |
| 889 Label* target = result ? labels.false_label : labels.true_label; | |
| 890 if (target != labels.fall_through) { | |
| 891 __ b(target); | |
| 892 } | |
| 893 } else { | |
| 894 __ b(deopt); | |
| 895 } | |
| 896 // Dummy result as this method already did the jump, there's no need | |
| 897 // for the caller to branch on a condition. | |
| 898 return Condition(ZR, ZR, INVALID_RELATION); | |
| 899 } | |
| 900 | |
| 901 | |
| 902 LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone, | |
| 903 bool opt) const { | |
| 904 const intptr_t kNumInputs = 2; | |
| 905 const intptr_t kNumTemps = 0; | |
| 906 if (operation_cid() == kMintCid) { | |
| 907 const intptr_t kNumTemps = 0; | |
| 908 LocationSummary* locs = new (zone) | |
| 909 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 910 locs->set_in(0, Location::Pair(Location::RequiresRegister(), | |
| 911 Location::RequiresRegister())); | |
| 912 locs->set_in(1, Location::Pair(Location::RequiresRegister(), | |
| 913 Location::RequiresRegister())); | |
| 914 locs->set_out(0, Location::RequiresRegister()); | |
| 915 return locs; | |
| 916 } | |
| 917 if (operation_cid() == kDoubleCid) { | |
| 918 LocationSummary* summary = new (zone) | |
| 919 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 920 summary->set_in(0, Location::RequiresFpuRegister()); | |
| 921 summary->set_in(1, Location::RequiresFpuRegister()); | |
| 922 summary->set_out(0, Location::RequiresRegister()); | |
| 923 return summary; | |
| 924 } | |
| 925 ASSERT(operation_cid() == kSmiCid); | |
| 926 LocationSummary* summary = new (zone) | |
| 927 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 928 summary->set_in(0, Location::RegisterOrConstant(left())); | |
| 929 // Only one input can be a constant operand. The case of two constant | |
| 930 // operands should be handled by constant propagation. | |
| 931 summary->set_in(1, summary->in(0).IsConstant() | |
| 932 ? Location::RequiresRegister() | |
| 933 : Location::RegisterOrConstant(right())); | |
| 934 summary->set_out(0, Location::RequiresRegister()); | |
| 935 return summary; | |
| 936 } | |
| 937 | |
| 938 | |
| 939 Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
| 940 BranchLabels labels) { | |
| 941 if (operation_cid() == kSmiCid) { | |
| 942 return EmitSmiComparisonOp(compiler, *locs(), kind()); | |
| 943 } else if (operation_cid() == kMintCid) { | |
| 944 return EmitUnboxedMintComparisonOp(compiler, *locs(), kind(), labels); | |
| 945 } else { | |
| 946 ASSERT(operation_cid() == kDoubleCid); | |
| 947 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels); | |
| 948 } | |
| 949 } | |
| 950 | |
| 951 | |
| 952 LocationSummary* NativeCallInstr::MakeLocationSummary(Zone* zone, | |
| 953 bool opt) const { | |
| 954 return MakeCallSummary(zone); | |
| 955 } | |
| 956 | |
| 957 | |
| 958 void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 959 SetupNative(); | |
| 960 __ Comment("NativeCallInstr"); | |
| 961 Register result = locs()->out(0).reg(); | |
| 962 | |
| 963 // Push the result place holder initialized to NULL. | |
| 964 __ PushObject(Object::null_object()); | |
| 965 // Pass a pointer to the first argument in A2. | |
| 966 if (!function().HasOptionalParameters()) { | |
| 967 __ AddImmediate( | |
| 968 A2, FP, (kParamEndSlotFromFp + function().NumParameters()) * kWordSize); | |
| 969 } else { | |
| 970 __ AddImmediate(A2, FP, kFirstLocalSlotFromFp * kWordSize); | |
| 971 } | |
| 972 // Compute the effective address. When running under the simulator, | |
| 973 // this is a redirection address that forces the simulator to call | |
| 974 // into the runtime system. | |
| 975 uword entry; | |
| 976 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function()); | |
| 977 const StubEntry* stub_entry; | |
| 978 if (link_lazily()) { | |
| 979 stub_entry = StubCode::CallBootstrapNative_entry(); | |
| 980 entry = NativeEntry::LinkNativeCallEntry(); | |
| 981 } else { | |
| 982 entry = reinterpret_cast<uword>(native_c_function()); | |
| 983 if (is_bootstrap_native()) { | |
| 984 stub_entry = StubCode::CallBootstrapNative_entry(); | |
| 985 #if defined(USING_SIMULATOR) | |
| 986 entry = Simulator::RedirectExternalReference( | |
| 987 entry, Simulator::kBootstrapNativeCall, NativeEntry::kNumArguments); | |
| 988 #endif | |
| 989 } else if (is_auto_scope()) { | |
| 990 // In the case of non bootstrap native methods the CallNativeCFunction | |
| 991 // stub generates the redirection address when running under the simulator | |
| 992 // and hence we do not change 'entry' here. | |
| 993 stub_entry = StubCode::CallAutoScopeNative_entry(); | |
| 994 } else { | |
| 995 // In the case of non bootstrap native methods the CallNativeCFunction | |
| 996 // stub generates the redirection address when running under the simulator | |
| 997 // and hence we do not change 'entry' here. | |
| 998 stub_entry = StubCode::CallNoScopeNative_entry(); | |
| 999 } | |
| 1000 } | |
| 1001 __ LoadImmediate(A1, argc_tag); | |
| 1002 ExternalLabel label(entry); | |
| 1003 __ LoadNativeEntry(T5, &label, kNotPatchable); | |
| 1004 if (link_lazily()) { | |
| 1005 compiler->GeneratePatchableCall(token_pos(), *stub_entry, | |
| 1006 RawPcDescriptors::kOther, locs()); | |
| 1007 } else { | |
| 1008 compiler->GenerateCall(token_pos(), *stub_entry, RawPcDescriptors::kOther, | |
| 1009 locs()); | |
| 1010 } | |
| 1011 __ Pop(result); | |
| 1012 } | |
| 1013 | |
| 1014 | |
| 1015 LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary( | |
| 1016 Zone* zone, | |
| 1017 bool opt) const { | |
| 1018 const intptr_t kNumInputs = 1; | |
| 1019 // TODO(fschneider): Allow immediate operands for the char code. | |
| 1020 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), | |
| 1021 LocationSummary::kNoCall); | |
| 1022 } | |
| 1023 | |
| 1024 | |
| 1025 void OneByteStringFromCharCodeInstr::EmitNativeCode( | |
| 1026 FlowGraphCompiler* compiler) { | |
| 1027 ASSERT(compiler->is_optimizing()); | |
| 1028 Register char_code = locs()->in(0).reg(); | |
| 1029 Register result = locs()->out(0).reg(); | |
| 1030 | |
| 1031 __ lw(result, Address(THR, Thread::predefined_symbols_address_offset())); | |
| 1032 __ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize); | |
| 1033 __ sll(TMP, char_code, 1); // Char code is a smi. | |
| 1034 __ addu(TMP, TMP, result); | |
| 1035 __ lw(result, Address(TMP)); | |
| 1036 } | |
| 1037 | |
| 1038 | |
| 1039 LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone, | |
| 1040 bool opt) const { | |
| 1041 const intptr_t kNumInputs = 1; | |
| 1042 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), | |
| 1043 LocationSummary::kNoCall); | |
| 1044 } | |
| 1045 | |
| 1046 | |
| 1047 void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1048 __ Comment("StringToCharCodeInstr"); | |
| 1049 | |
| 1050 ASSERT(cid_ == kOneByteStringCid); | |
| 1051 Register str = locs()->in(0).reg(); | |
| 1052 Register result = locs()->out(0).reg(); | |
| 1053 ASSERT(str != result); | |
| 1054 Label done; | |
| 1055 __ lw(result, FieldAddress(str, String::length_offset())); | |
| 1056 __ BranchNotEqual(result, Immediate(Smi::RawValue(1)), &done); | |
| 1057 __ delay_slot()->addiu(result, ZR, Immediate(Smi::RawValue(-1))); | |
| 1058 __ lbu(result, FieldAddress(str, OneByteString::data_offset())); | |
| 1059 __ SmiTag(result); | |
| 1060 __ Bind(&done); | |
| 1061 } | |
| 1062 | |
| 1063 | |
| 1064 LocationSummary* StringInterpolateInstr::MakeLocationSummary(Zone* zone, | |
| 1065 bool opt) const { | |
| 1066 const intptr_t kNumInputs = 1; | |
| 1067 const intptr_t kNumTemps = 0; | |
| 1068 LocationSummary* summary = new (zone) | |
| 1069 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 1070 summary->set_in(0, Location::RegisterLocation(A0)); | |
| 1071 summary->set_out(0, Location::RegisterLocation(V0)); | |
| 1072 return summary; | |
| 1073 } | |
| 1074 | |
| 1075 | |
| 1076 void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1077 Register array = locs()->in(0).reg(); | |
| 1078 __ Push(array); | |
| 1079 const int kTypeArgsLen = 0; | |
| 1080 const int kNumberOfArguments = 1; | |
| 1081 const Array& kNoArgumentNames = Object::null_array(); | |
| 1082 ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kNoArgumentNames); | |
| 1083 compiler->GenerateStaticCall(deopt_id(), token_pos(), CallFunction(), | |
| 1084 args_info, locs(), ICData::Handle()); | |
| 1085 ASSERT(locs()->out(0).reg() == V0); | |
| 1086 } | |
| 1087 | |
| 1088 | |
| 1089 LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone, | |
| 1090 bool opt) const { | |
| 1091 const intptr_t kNumInputs = 1; | |
| 1092 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), | |
| 1093 LocationSummary::kNoCall); | |
| 1094 } | |
| 1095 | |
| 1096 | |
| 1097 void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1098 Register obj = locs()->in(0).reg(); | |
| 1099 Register result = locs()->out(0).reg(); | |
| 1100 if (object()->definition()->representation() == kUntagged) { | |
| 1101 __ LoadFromOffset(result, obj, offset()); | |
| 1102 } else { | |
| 1103 ASSERT(object()->definition()->representation() == kTagged); | |
| 1104 __ LoadFieldFromOffset(result, obj, offset()); | |
| 1105 } | |
| 1106 } | |
| 1107 | |
| 1108 | |
| 1109 LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone, | |
| 1110 bool opt) const { | |
| 1111 const intptr_t kNumInputs = 1; | |
| 1112 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(), | |
| 1113 LocationSummary::kNoCall); | |
| 1114 } | |
| 1115 | |
| 1116 | |
| 1117 void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1118 Register object = locs()->in(0).reg(); | |
| 1119 Register result = locs()->out(0).reg(); | |
| 1120 const AbstractType& value_type = *this->object()->Type()->ToAbstractType(); | |
| 1121 if (CompileType::Smi().IsAssignableTo(value_type) || | |
| 1122 value_type.IsTypeParameter()) { | |
| 1123 __ LoadTaggedClassIdMayBeSmi(result, object); | |
| 1124 } else { | |
| 1125 __ LoadClassId(result, object); | |
| 1126 __ SmiTag(result); | |
| 1127 } | |
| 1128 } | |
| 1129 | |
| 1130 | |
| 1131 CompileType LoadIndexedInstr::ComputeType() const { | |
| 1132 switch (class_id_) { | |
| 1133 case kArrayCid: | |
| 1134 case kImmutableArrayCid: | |
| 1135 return CompileType::Dynamic(); | |
| 1136 | |
| 1137 case kTypedDataFloat32ArrayCid: | |
| 1138 case kTypedDataFloat64ArrayCid: | |
| 1139 return CompileType::FromCid(kDoubleCid); | |
| 1140 case kTypedDataFloat32x4ArrayCid: | |
| 1141 return CompileType::FromCid(kFloat32x4Cid); | |
| 1142 case kTypedDataInt32x4ArrayCid: | |
| 1143 return CompileType::FromCid(kInt32x4Cid); | |
| 1144 | |
| 1145 case kTypedDataInt8ArrayCid: | |
| 1146 case kTypedDataUint8ArrayCid: | |
| 1147 case kTypedDataUint8ClampedArrayCid: | |
| 1148 case kExternalTypedDataUint8ArrayCid: | |
| 1149 case kExternalTypedDataUint8ClampedArrayCid: | |
| 1150 case kTypedDataInt16ArrayCid: | |
| 1151 case kTypedDataUint16ArrayCid: | |
| 1152 case kOneByteStringCid: | |
| 1153 case kTwoByteStringCid: | |
| 1154 case kExternalOneByteStringCid: | |
| 1155 case kExternalTwoByteStringCid: | |
| 1156 return CompileType::FromCid(kSmiCid); | |
| 1157 | |
| 1158 case kTypedDataInt32ArrayCid: | |
| 1159 case kTypedDataUint32ArrayCid: | |
| 1160 return CompileType::Int(); | |
| 1161 | |
| 1162 default: | |
| 1163 UNIMPLEMENTED(); | |
| 1164 return CompileType::Dynamic(); | |
| 1165 } | |
| 1166 } | |
| 1167 | |
| 1168 | |
| 1169 Representation LoadIndexedInstr::representation() const { | |
| 1170 switch (class_id_) { | |
| 1171 case kArrayCid: | |
| 1172 case kImmutableArrayCid: | |
| 1173 case kTypedDataInt8ArrayCid: | |
| 1174 case kTypedDataUint8ArrayCid: | |
| 1175 case kTypedDataUint8ClampedArrayCid: | |
| 1176 case kExternalTypedDataUint8ArrayCid: | |
| 1177 case kExternalTypedDataUint8ClampedArrayCid: | |
| 1178 case kTypedDataInt16ArrayCid: | |
| 1179 case kTypedDataUint16ArrayCid: | |
| 1180 case kOneByteStringCid: | |
| 1181 case kTwoByteStringCid: | |
| 1182 case kExternalOneByteStringCid: | |
| 1183 case kExternalTwoByteStringCid: | |
| 1184 return kTagged; | |
| 1185 case kTypedDataInt32ArrayCid: | |
| 1186 return kUnboxedInt32; | |
| 1187 case kTypedDataUint32ArrayCid: | |
| 1188 return kUnboxedUint32; | |
| 1189 case kTypedDataFloat32ArrayCid: | |
| 1190 case kTypedDataFloat64ArrayCid: | |
| 1191 return kUnboxedDouble; | |
| 1192 case kTypedDataInt32x4ArrayCid: | |
| 1193 return kUnboxedInt32x4; | |
| 1194 case kTypedDataFloat32x4ArrayCid: | |
| 1195 return kUnboxedFloat32x4; | |
| 1196 default: | |
| 1197 UNIMPLEMENTED(); | |
| 1198 return kTagged; | |
| 1199 } | |
| 1200 } | |
| 1201 | |
| 1202 | |
| 1203 static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) { | |
| 1204 ConstantInstr* constant = value->definition()->AsConstant(); | |
| 1205 if ((constant == NULL) || !Assembler::IsSafeSmi(constant->value())) { | |
| 1206 return false; | |
| 1207 } | |
| 1208 const int64_t index = Smi::Cast(constant->value()).AsInt64Value(); | |
| 1209 const intptr_t scale = Instance::ElementSizeFor(cid); | |
| 1210 const int64_t offset = | |
| 1211 index * scale + | |
| 1212 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | |
| 1213 if (!Utils::IsInt(32, offset)) { | |
| 1214 return false; | |
| 1215 } | |
| 1216 return Address::CanHoldOffset(static_cast<int32_t>(offset)); | |
| 1217 } | |
| 1218 | |
| 1219 | |
| 1220 LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone, | |
| 1221 bool opt) const { | |
| 1222 const intptr_t kNumInputs = 2; | |
| 1223 const intptr_t kNumTemps = aligned() ? 0 : 1; | |
| 1224 LocationSummary* locs = new (zone) | |
| 1225 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 1226 locs->set_in(0, Location::RequiresRegister()); | |
| 1227 if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { | |
| 1228 locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); | |
| 1229 } else { | |
| 1230 locs->set_in(1, Location::RequiresRegister()); | |
| 1231 } | |
| 1232 if ((representation() == kUnboxedDouble) || | |
| 1233 (representation() == kUnboxedFloat32x4) || | |
| 1234 (representation() == kUnboxedInt32x4)) { | |
| 1235 locs->set_out(0, Location::RequiresFpuRegister()); | |
| 1236 } else { | |
| 1237 locs->set_out(0, Location::RequiresRegister()); | |
| 1238 } | |
| 1239 if (!aligned()) { | |
| 1240 locs->set_temp(0, Location::RequiresRegister()); | |
| 1241 } | |
| 1242 return locs; | |
| 1243 } | |
| 1244 | |
| 1245 | |
| 1246 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1247 __ Comment("LoadIndexedInstr"); | |
| 1248 // The array register points to the backing store for external arrays. | |
| 1249 const Register array = locs()->in(0).reg(); | |
| 1250 const Location index = locs()->in(1); | |
| 1251 const Register address = aligned() ? kNoRegister : locs()->temp(0).reg(); | |
| 1252 | |
| 1253 Address element_address(kNoRegister); | |
| 1254 if (aligned()) { | |
| 1255 element_address = | |
| 1256 index.IsRegister() | |
| 1257 ? __ ElementAddressForRegIndex(true, // Load. | |
| 1258 IsExternal(), class_id(), | |
| 1259 index_scale(), array, index.reg()) | |
| 1260 : __ ElementAddressForIntIndex(IsExternal(), class_id(), | |
| 1261 index_scale(), array, | |
| 1262 Smi::Cast(index.constant()).Value()); | |
| 1263 // Warning: element_address may use register TMP as base. | |
| 1264 } else { | |
| 1265 if (index.IsRegister()) { | |
| 1266 __ LoadElementAddressForRegIndex(address, | |
| 1267 true, // Load. | |
| 1268 IsExternal(), class_id(), index_scale(), | |
| 1269 array, index.reg()); | |
| 1270 } else { | |
| 1271 __ LoadElementAddressForIntIndex(address, IsExternal(), class_id(), | |
| 1272 index_scale(), array, | |
| 1273 Smi::Cast(index.constant()).Value()); | |
| 1274 } | |
| 1275 } | |
| 1276 | |
| 1277 if ((representation() == kUnboxedDouble) || | |
| 1278 (representation() == kUnboxedFloat32x4) || | |
| 1279 (representation() == kUnboxedInt32x4)) { | |
| 1280 DRegister result = locs()->out(0).fpu_reg(); | |
| 1281 switch (class_id()) { | |
| 1282 case kTypedDataFloat32ArrayCid: | |
| 1283 // Load single precision float. | |
| 1284 __ lwc1(EvenFRegisterOf(result), element_address); | |
| 1285 break; | |
| 1286 case kTypedDataFloat64ArrayCid: | |
| 1287 __ LoadDFromOffset(result, element_address.base(), | |
| 1288 element_address.offset()); | |
| 1289 break; | |
| 1290 case kTypedDataInt32x4ArrayCid: | |
| 1291 case kTypedDataFloat32x4ArrayCid: | |
| 1292 UNIMPLEMENTED(); | |
| 1293 break; | |
| 1294 } | |
| 1295 return; | |
| 1296 } | |
| 1297 | |
| 1298 if ((representation() == kUnboxedUint32) || | |
| 1299 (representation() == kUnboxedInt32)) { | |
| 1300 const Register result = locs()->out(0).reg(); | |
| 1301 switch (class_id()) { | |
| 1302 case kTypedDataInt32ArrayCid: | |
| 1303 ASSERT(representation() == kUnboxedInt32); | |
| 1304 if (aligned()) { | |
| 1305 __ lw(result, element_address); | |
| 1306 } else { | |
| 1307 __ LoadWordUnaligned(result, address, TMP); | |
| 1308 } | |
| 1309 break; | |
| 1310 case kTypedDataUint32ArrayCid: | |
| 1311 ASSERT(representation() == kUnboxedUint32); | |
| 1312 if (aligned()) { | |
| 1313 __ lw(result, element_address); | |
| 1314 } else { | |
| 1315 __ LoadWordUnaligned(result, address, TMP); | |
| 1316 } | |
| 1317 break; | |
| 1318 default: | |
| 1319 UNREACHABLE(); | |
| 1320 } | |
| 1321 return; | |
| 1322 } | |
| 1323 | |
| 1324 ASSERT(representation() == kTagged); | |
| 1325 | |
| 1326 const Register result = locs()->out(0).reg(); | |
| 1327 switch (class_id()) { | |
| 1328 case kTypedDataInt8ArrayCid: | |
| 1329 ASSERT(index_scale() == 1); | |
| 1330 __ lb(result, element_address); | |
| 1331 __ SmiTag(result); | |
| 1332 break; | |
| 1333 case kTypedDataUint8ArrayCid: | |
| 1334 case kTypedDataUint8ClampedArrayCid: | |
| 1335 case kExternalTypedDataUint8ArrayCid: | |
| 1336 case kExternalTypedDataUint8ClampedArrayCid: | |
| 1337 case kOneByteStringCid: | |
| 1338 case kExternalOneByteStringCid: | |
| 1339 ASSERT(index_scale() == 1); | |
| 1340 __ lbu(result, element_address); | |
| 1341 __ SmiTag(result); | |
| 1342 break; | |
| 1343 case kTypedDataInt16ArrayCid: | |
| 1344 if (aligned()) { | |
| 1345 __ lh(result, element_address); | |
| 1346 } else { | |
| 1347 __ LoadHalfWordUnaligned(result, address, TMP); | |
| 1348 } | |
| 1349 __ SmiTag(result); | |
| 1350 break; | |
| 1351 case kTypedDataUint16ArrayCid: | |
| 1352 case kTwoByteStringCid: | |
| 1353 case kExternalTwoByteStringCid: | |
| 1354 if (aligned()) { | |
| 1355 __ lhu(result, element_address); | |
| 1356 } else { | |
| 1357 __ LoadHalfWordUnsignedUnaligned(result, address, TMP); | |
| 1358 } | |
| 1359 __ SmiTag(result); | |
| 1360 break; | |
| 1361 default: | |
| 1362 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid)); | |
| 1363 ASSERT(aligned()); | |
| 1364 __ lw(result, element_address); | |
| 1365 break; | |
| 1366 } | |
| 1367 } | |
| 1368 | |
| 1369 | |
| 1370 LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone, | |
| 1371 bool opt) const { | |
| 1372 const intptr_t kNumInputs = 2; | |
| 1373 const intptr_t kNumTemps = 0; | |
| 1374 LocationSummary* summary = new (zone) | |
| 1375 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 1376 summary->set_in(0, Location::RequiresRegister()); | |
| 1377 summary->set_in(1, Location::RequiresRegister()); | |
| 1378 | |
| 1379 // TODO(zerny): Handle mints properly once possible. | |
| 1380 ASSERT(representation() == kTagged); | |
| 1381 summary->set_out(0, Location::RequiresRegister()); | |
| 1382 | |
| 1383 return summary; | |
| 1384 } | |
| 1385 | |
| 1386 | |
| 1387 void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1388 // The string register points to the backing store for external strings. | |
| 1389 const Register str = locs()->in(0).reg(); | |
| 1390 const Location index = locs()->in(1); | |
| 1391 | |
| 1392 Address element_address = __ ElementAddressForRegIndex( | |
| 1393 true, IsExternal(), class_id(), index_scale(), str, index.reg()); | |
| 1394 // Warning: element_address may use register TMP as base. | |
| 1395 | |
| 1396 ASSERT(representation() == kTagged); | |
| 1397 Register result = locs()->out(0).reg(); | |
| 1398 switch (class_id()) { | |
| 1399 case kOneByteStringCid: | |
| 1400 case kExternalOneByteStringCid: | |
| 1401 switch (element_count()) { | |
| 1402 case 1: | |
| 1403 __ lbu(result, element_address); | |
| 1404 break; | |
| 1405 case 2: | |
| 1406 __ lhu(result, element_address); | |
| 1407 break; | |
| 1408 case 4: // Loading multiple code units is disabled on MIPS. | |
| 1409 default: | |
| 1410 UNREACHABLE(); | |
| 1411 } | |
| 1412 __ SmiTag(result); | |
| 1413 break; | |
| 1414 case kTwoByteStringCid: | |
| 1415 case kExternalTwoByteStringCid: | |
| 1416 switch (element_count()) { | |
| 1417 case 1: | |
| 1418 __ lhu(result, element_address); | |
| 1419 break; | |
| 1420 case 2: // Loading multiple code units is disabled on MIPS. | |
| 1421 default: | |
| 1422 UNREACHABLE(); | |
| 1423 } | |
| 1424 __ SmiTag(result); | |
| 1425 break; | |
| 1426 default: | |
| 1427 UNREACHABLE(); | |
| 1428 break; | |
| 1429 } | |
| 1430 } | |
| 1431 | |
| 1432 | |
| 1433 Representation StoreIndexedInstr::RequiredInputRepresentation( | |
| 1434 intptr_t idx) const { | |
| 1435 // Array can be a Dart object or a pointer to external data. | |
| 1436 if (idx == 0) return kNoRepresentation; // Flexible input representation. | |
| 1437 if (idx == 1) return kTagged; // Index is a smi. | |
| 1438 ASSERT(idx == 2); | |
| 1439 switch (class_id_) { | |
| 1440 case kArrayCid: | |
| 1441 case kOneByteStringCid: | |
| 1442 case kTypedDataInt8ArrayCid: | |
| 1443 case kTypedDataUint8ArrayCid: | |
| 1444 case kExternalTypedDataUint8ArrayCid: | |
| 1445 case kTypedDataUint8ClampedArrayCid: | |
| 1446 case kExternalTypedDataUint8ClampedArrayCid: | |
| 1447 case kTypedDataInt16ArrayCid: | |
| 1448 case kTypedDataUint16ArrayCid: | |
| 1449 return kTagged; | |
| 1450 case kTypedDataInt32ArrayCid: | |
| 1451 return kUnboxedInt32; | |
| 1452 case kTypedDataUint32ArrayCid: | |
| 1453 return kUnboxedUint32; | |
| 1454 case kTypedDataFloat32ArrayCid: | |
| 1455 case kTypedDataFloat64ArrayCid: | |
| 1456 return kUnboxedDouble; | |
| 1457 case kTypedDataFloat32x4ArrayCid: | |
| 1458 return kUnboxedFloat32x4; | |
| 1459 case kTypedDataInt32x4ArrayCid: | |
| 1460 return kUnboxedInt32x4; | |
| 1461 default: | |
| 1462 UNIMPLEMENTED(); | |
| 1463 return kTagged; | |
| 1464 } | |
| 1465 } | |
| 1466 | |
| 1467 | |
| 1468 LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone, | |
| 1469 bool opt) const { | |
| 1470 const intptr_t kNumInputs = 3; | |
| 1471 const intptr_t kNumTemps = aligned() ? 0 : 2; | |
| 1472 LocationSummary* locs = new (zone) | |
| 1473 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 1474 locs->set_in(0, Location::RequiresRegister()); | |
| 1475 if (CanBeImmediateIndex(index(), class_id(), IsExternal())) { | |
| 1476 locs->set_in(1, Location::Constant(index()->definition()->AsConstant())); | |
| 1477 } else { | |
| 1478 locs->set_in(1, Location::WritableRegister()); | |
| 1479 } | |
| 1480 switch (class_id()) { | |
| 1481 case kArrayCid: | |
| 1482 locs->set_in(2, ShouldEmitStoreBarrier() | |
| 1483 ? Location::WritableRegister() | |
| 1484 : Location::RegisterOrConstant(value())); | |
| 1485 break; | |
| 1486 case kExternalTypedDataUint8ArrayCid: | |
| 1487 case kExternalTypedDataUint8ClampedArrayCid: | |
| 1488 case kTypedDataInt8ArrayCid: | |
| 1489 case kTypedDataUint8ArrayCid: | |
| 1490 case kTypedDataUint8ClampedArrayCid: | |
| 1491 case kOneByteStringCid: | |
| 1492 case kTypedDataInt16ArrayCid: | |
| 1493 case kTypedDataUint16ArrayCid: | |
| 1494 case kTypedDataInt32ArrayCid: | |
| 1495 case kTypedDataUint32ArrayCid: | |
| 1496 locs->set_in(2, Location::RequiresRegister()); | |
| 1497 break; | |
| 1498 case kTypedDataFloat32ArrayCid: | |
| 1499 case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants. | |
| 1500 case kTypedDataInt32x4ArrayCid: | |
| 1501 case kTypedDataFloat32x4ArrayCid: | |
| 1502 locs->set_in(2, Location::RequiresFpuRegister()); | |
| 1503 break; | |
| 1504 default: | |
| 1505 UNREACHABLE(); | |
| 1506 return NULL; | |
| 1507 } | |
| 1508 if (!aligned()) { | |
| 1509 locs->set_temp(0, Location::RequiresRegister()); | |
| 1510 locs->set_temp(1, Location::RequiresRegister()); | |
| 1511 } | |
| 1512 return locs; | |
| 1513 } | |
| 1514 | |
| 1515 | |
| 1516 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1517 __ Comment("StoreIndexedInstr"); | |
| 1518 // The array register points to the backing store for external arrays. | |
| 1519 const Register array = locs()->in(0).reg(); | |
| 1520 const Location index = locs()->in(1); | |
| 1521 const Register address = aligned() ? kNoRegister : locs()->temp(0).reg(); | |
| 1522 const Register scratch = aligned() ? kNoRegister : locs()->temp(1).reg(); | |
| 1523 | |
| 1524 Address element_address(kNoRegister); | |
| 1525 if (aligned()) { | |
| 1526 element_address = | |
| 1527 index.IsRegister() | |
| 1528 ? __ ElementAddressForRegIndex(false, // Store. | |
| 1529 IsExternal(), class_id(), | |
| 1530 index_scale(), array, index.reg()) | |
| 1531 : __ ElementAddressForIntIndex(IsExternal(), class_id(), | |
| 1532 index_scale(), array, | |
| 1533 Smi::Cast(index.constant()).Value()); | |
| 1534 ASSERT(element_address.base() != TMP); // Allowed for load only. | |
| 1535 } else { | |
| 1536 if (index.IsRegister()) { | |
| 1537 __ LoadElementAddressForRegIndex(address, | |
| 1538 false, // Store. | |
| 1539 IsExternal(), class_id(), index_scale(), | |
| 1540 array, index.reg()); | |
| 1541 } else { | |
| 1542 __ LoadElementAddressForIntIndex(address, IsExternal(), class_id(), | |
| 1543 index_scale(), array, | |
| 1544 Smi::Cast(index.constant()).Value()); | |
| 1545 } | |
| 1546 } | |
| 1547 | |
| 1548 switch (class_id()) { | |
| 1549 case kArrayCid: | |
| 1550 ASSERT(aligned()); | |
| 1551 if (ShouldEmitStoreBarrier()) { | |
| 1552 Register value = locs()->in(2).reg(); | |
| 1553 __ StoreIntoObject(array, element_address, value); | |
| 1554 } else if (locs()->in(2).IsConstant()) { | |
| 1555 const Object& constant = locs()->in(2).constant(); | |
| 1556 __ StoreIntoObjectNoBarrier(array, element_address, constant); | |
| 1557 } else { | |
| 1558 Register value = locs()->in(2).reg(); | |
| 1559 __ StoreIntoObjectNoBarrier(array, element_address, value); | |
| 1560 } | |
| 1561 break; | |
| 1562 case kTypedDataInt8ArrayCid: | |
| 1563 case kTypedDataUint8ArrayCid: | |
| 1564 case kExternalTypedDataUint8ArrayCid: | |
| 1565 case kOneByteStringCid: { | |
| 1566 ASSERT(aligned()); | |
| 1567 if (locs()->in(2).IsConstant()) { | |
| 1568 const Smi& constant = Smi::Cast(locs()->in(2).constant()); | |
| 1569 __ LoadImmediate(TMP, static_cast<int8_t>(constant.Value())); | |
| 1570 __ sb(TMP, element_address); | |
| 1571 } else { | |
| 1572 Register value = locs()->in(2).reg(); | |
| 1573 __ SmiUntag(TMP, value); | |
| 1574 __ sb(TMP, element_address); | |
| 1575 } | |
| 1576 break; | |
| 1577 } | |
| 1578 case kTypedDataUint8ClampedArrayCid: | |
| 1579 case kExternalTypedDataUint8ClampedArrayCid: { | |
| 1580 ASSERT(aligned()); | |
| 1581 if (locs()->in(2).IsConstant()) { | |
| 1582 const Smi& constant = Smi::Cast(locs()->in(2).constant()); | |
| 1583 intptr_t value = constant.Value(); | |
| 1584 // Clamp to 0x0 or 0xFF respectively. | |
| 1585 if (value > 0xFF) { | |
| 1586 value = 0xFF; | |
| 1587 } else if (value < 0) { | |
| 1588 value = 0; | |
| 1589 } | |
| 1590 __ LoadImmediate(TMP, static_cast<int8_t>(value)); | |
| 1591 __ sb(TMP, element_address); | |
| 1592 } else { | |
| 1593 Register value = locs()->in(2).reg(); | |
| 1594 Label store_value, bigger, smaller; | |
| 1595 __ SmiUntag(TMP, value); | |
| 1596 __ BranchUnsignedLess(TMP, Immediate(0xFF + 1), &store_value); | |
| 1597 __ LoadImmediate(TMP, 0xFF); | |
| 1598 __ slti(CMPRES1, value, Immediate(1)); | |
| 1599 __ movn(TMP, ZR, CMPRES1); | |
| 1600 __ Bind(&store_value); | |
| 1601 __ sb(TMP, element_address); | |
| 1602 } | |
| 1603 break; | |
| 1604 } | |
| 1605 case kTypedDataInt16ArrayCid: | |
| 1606 case kTypedDataUint16ArrayCid: { | |
| 1607 Register value = locs()->in(2).reg(); | |
| 1608 __ SmiUntag(TMP, value); | |
| 1609 if (aligned()) { | |
| 1610 __ sh(TMP, element_address); | |
| 1611 } else { | |
| 1612 __ StoreHalfWordUnaligned(TMP, address, scratch); | |
| 1613 } | |
| 1614 break; | |
| 1615 } | |
| 1616 case kTypedDataInt32ArrayCid: | |
| 1617 case kTypedDataUint32ArrayCid: { | |
| 1618 if (aligned()) { | |
| 1619 __ sw(locs()->in(2).reg(), element_address); | |
| 1620 } else { | |
| 1621 __ StoreWordUnaligned(locs()->in(2).reg(), address, scratch); | |
| 1622 } | |
| 1623 break; | |
| 1624 } | |
| 1625 case kTypedDataFloat32ArrayCid: { | |
| 1626 ASSERT(aligned()); | |
| 1627 FRegister value = EvenFRegisterOf(locs()->in(2).fpu_reg()); | |
| 1628 __ swc1(value, element_address); | |
| 1629 break; | |
| 1630 } | |
| 1631 case kTypedDataFloat64ArrayCid: | |
| 1632 ASSERT(aligned()); | |
| 1633 __ StoreDToOffset(locs()->in(2).fpu_reg(), element_address.base(), | |
| 1634 element_address.offset()); | |
| 1635 break; | |
| 1636 case kTypedDataInt32x4ArrayCid: | |
| 1637 case kTypedDataFloat32x4ArrayCid: | |
| 1638 UNIMPLEMENTED(); | |
| 1639 break; | |
| 1640 default: | |
| 1641 UNREACHABLE(); | |
| 1642 } | |
| 1643 } | |
| 1644 | |
| 1645 | |
| 1646 LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone, | |
| 1647 bool opt) const { | |
| 1648 const intptr_t kNumInputs = 1; | |
| 1649 | |
| 1650 const intptr_t value_cid = value()->Type()->ToCid(); | |
| 1651 const intptr_t field_cid = field().guarded_cid(); | |
| 1652 | |
| 1653 const bool emit_full_guard = !opt || (field_cid == kIllegalCid); | |
| 1654 const bool needs_value_cid_temp_reg = | |
| 1655 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid)); | |
| 1656 const bool needs_field_temp_reg = emit_full_guard; | |
| 1657 | |
| 1658 intptr_t num_temps = 0; | |
| 1659 if (needs_value_cid_temp_reg) { | |
| 1660 num_temps++; | |
| 1661 } | |
| 1662 if (needs_field_temp_reg) { | |
| 1663 num_temps++; | |
| 1664 } | |
| 1665 | |
| 1666 LocationSummary* summary = new (zone) | |
| 1667 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall); | |
| 1668 summary->set_in(0, Location::RequiresRegister()); | |
| 1669 | |
| 1670 for (intptr_t i = 0; i < num_temps; i++) { | |
| 1671 summary->set_temp(i, Location::RequiresRegister()); | |
| 1672 } | |
| 1673 | |
| 1674 return summary; | |
| 1675 } | |
| 1676 | |
| 1677 | |
| 1678 void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1679 ASSERT(sizeof(classid_t) == kInt16Size); | |
| 1680 __ Comment("GuardFieldClassInstr"); | |
| 1681 | |
| 1682 const intptr_t value_cid = value()->Type()->ToCid(); | |
| 1683 const intptr_t field_cid = field().guarded_cid(); | |
| 1684 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid; | |
| 1685 | |
| 1686 if (field_cid == kDynamicCid) { | |
| 1687 if (Compiler::IsBackgroundCompilation()) { | |
| 1688 // Field state changed while compiling. | |
| 1689 Compiler::AbortBackgroundCompilation( | |
| 1690 deopt_id(), | |
| 1691 "GuardFieldClassInstr: field state changed while compiling"); | |
| 1692 } | |
| 1693 ASSERT(!compiler->is_optimizing()); | |
| 1694 return; // Nothing to emit. | |
| 1695 } | |
| 1696 | |
| 1697 const bool emit_full_guard = | |
| 1698 !compiler->is_optimizing() || (field_cid == kIllegalCid); | |
| 1699 | |
| 1700 const bool needs_value_cid_temp_reg = | |
| 1701 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid)); | |
| 1702 | |
| 1703 const bool needs_field_temp_reg = emit_full_guard; | |
| 1704 | |
| 1705 const Register value_reg = locs()->in(0).reg(); | |
| 1706 | |
| 1707 const Register value_cid_reg = | |
| 1708 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister; | |
| 1709 | |
| 1710 const Register field_reg = needs_field_temp_reg | |
| 1711 ? locs()->temp(locs()->temp_count() - 1).reg() | |
| 1712 : kNoRegister; | |
| 1713 | |
| 1714 Label ok, fail_label; | |
| 1715 | |
| 1716 Label* deopt = | |
| 1717 compiler->is_optimizing() | |
| 1718 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) | |
| 1719 : NULL; | |
| 1720 | |
| 1721 Label* fail = (deopt != NULL) ? deopt : &fail_label; | |
| 1722 | |
| 1723 if (emit_full_guard) { | |
| 1724 __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); | |
| 1725 | |
| 1726 FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset()); | |
| 1727 FieldAddress field_nullability_operand(field_reg, | |
| 1728 Field::is_nullable_offset()); | |
| 1729 | |
| 1730 if (value_cid == kDynamicCid) { | |
| 1731 LoadValueCid(compiler, value_cid_reg, value_reg); | |
| 1732 | |
| 1733 __ lhu(CMPRES1, field_cid_operand); | |
| 1734 __ beq(value_cid_reg, CMPRES1, &ok); | |
| 1735 __ lhu(TMP, field_nullability_operand); | |
| 1736 __ subu(CMPRES1, value_cid_reg, TMP); | |
| 1737 } else if (value_cid == kNullCid) { | |
| 1738 __ lhu(TMP, field_nullability_operand); | |
| 1739 __ LoadImmediate(CMPRES1, value_cid); | |
| 1740 __ subu(CMPRES1, TMP, CMPRES1); | |
| 1741 } else { | |
| 1742 __ lhu(TMP, field_cid_operand); | |
| 1743 __ LoadImmediate(CMPRES1, value_cid); | |
| 1744 __ subu(CMPRES1, TMP, CMPRES1); | |
| 1745 } | |
| 1746 __ beq(CMPRES1, ZR, &ok); | |
| 1747 | |
| 1748 // Check if the tracked state of the guarded field can be initialized | |
| 1749 // inline. If the field needs length check we fall through to runtime | |
| 1750 // which is responsible for computing offset of the length field | |
| 1751 // based on the class id. | |
| 1752 // Length guard will be emitted separately when needed via GuardFieldLength | |
| 1753 // instruction after GuardFieldClass. | |
| 1754 if (!field().needs_length_check()) { | |
| 1755 // Uninitialized field can be handled inline. Check if the | |
| 1756 // field is still unitialized. | |
| 1757 __ lhu(CMPRES1, field_cid_operand); | |
| 1758 __ BranchNotEqual(CMPRES1, Immediate(kIllegalCid), fail); | |
| 1759 | |
| 1760 if (value_cid == kDynamicCid) { | |
| 1761 __ sh(value_cid_reg, field_cid_operand); | |
| 1762 __ sh(value_cid_reg, field_nullability_operand); | |
| 1763 } else { | |
| 1764 __ LoadImmediate(TMP, value_cid); | |
| 1765 __ sh(TMP, field_cid_operand); | |
| 1766 __ sh(TMP, field_nullability_operand); | |
| 1767 } | |
| 1768 | |
| 1769 if (deopt == NULL) { | |
| 1770 ASSERT(!compiler->is_optimizing()); | |
| 1771 __ b(&ok); | |
| 1772 } | |
| 1773 } | |
| 1774 | |
| 1775 if (deopt == NULL) { | |
| 1776 ASSERT(!compiler->is_optimizing()); | |
| 1777 __ Bind(fail); | |
| 1778 | |
| 1779 __ lhu(CMPRES1, FieldAddress(field_reg, Field::guarded_cid_offset())); | |
| 1780 __ BranchEqual(CMPRES1, Immediate(kDynamicCid), &ok); | |
| 1781 | |
| 1782 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 1783 __ sw(field_reg, Address(SP, 1 * kWordSize)); | |
| 1784 __ sw(value_reg, Address(SP, 0 * kWordSize)); | |
| 1785 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); | |
| 1786 __ Drop(2); // Drop the field and the value. | |
| 1787 } | |
| 1788 } else { | |
| 1789 ASSERT(compiler->is_optimizing()); | |
| 1790 ASSERT(deopt != NULL); | |
| 1791 | |
| 1792 // Field guard class has been initialized and is known. | |
| 1793 if (value_cid == kDynamicCid) { | |
| 1794 // Value's class id is not known. | |
| 1795 __ andi(CMPRES1, value_reg, Immediate(kSmiTagMask)); | |
| 1796 | |
| 1797 if (field_cid != kSmiCid) { | |
| 1798 __ beq(CMPRES1, ZR, fail); | |
| 1799 __ LoadClassId(value_cid_reg, value_reg); | |
| 1800 __ LoadImmediate(TMP, field_cid); | |
| 1801 __ subu(CMPRES1, value_cid_reg, TMP); | |
| 1802 } | |
| 1803 | |
| 1804 if (field().is_nullable() && (field_cid != kNullCid)) { | |
| 1805 __ beq(CMPRES1, ZR, &ok); | |
| 1806 if (field_cid != kSmiCid) { | |
| 1807 __ LoadImmediate(TMP, kNullCid); | |
| 1808 __ subu(CMPRES1, value_cid_reg, TMP); | |
| 1809 } else { | |
| 1810 __ LoadObject(TMP, Object::null_object()); | |
| 1811 __ subu(CMPRES1, value_reg, TMP); | |
| 1812 } | |
| 1813 } | |
| 1814 | |
| 1815 __ bne(CMPRES1, ZR, fail); | |
| 1816 } else { | |
| 1817 // Both value's and field's class id is known. | |
| 1818 ASSERT((value_cid != field_cid) && (value_cid != nullability)); | |
| 1819 __ b(fail); | |
| 1820 } | |
| 1821 } | |
| 1822 __ Bind(&ok); | |
| 1823 } | |
| 1824 | |
| 1825 | |
| 1826 LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone, | |
| 1827 bool opt) const { | |
| 1828 const intptr_t kNumInputs = 1; | |
| 1829 | |
| 1830 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) { | |
| 1831 const intptr_t kNumTemps = 1; | |
| 1832 LocationSummary* summary = new (zone) | |
| 1833 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 1834 summary->set_in(0, Location::RequiresRegister()); | |
| 1835 // We need temporaries for field object. | |
| 1836 summary->set_temp(0, Location::RequiresRegister()); | |
| 1837 return summary; | |
| 1838 } | |
| 1839 LocationSummary* summary = | |
| 1840 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); | |
| 1841 summary->set_in(0, Location::RequiresRegister()); | |
| 1842 return summary; | |
| 1843 } | |
| 1844 | |
| 1845 | |
| 1846 void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1847 if (field().guarded_list_length() == Field::kNoFixedLength) { | |
| 1848 if (Compiler::IsBackgroundCompilation()) { | |
| 1849 // Field state changed while compiling. | |
| 1850 Compiler::AbortBackgroundCompilation( | |
| 1851 deopt_id(), | |
| 1852 "GuardFieldLengthInstr: field state changed while compiling"); | |
| 1853 } | |
| 1854 ASSERT(!compiler->is_optimizing()); | |
| 1855 return; // Nothing to emit. | |
| 1856 } | |
| 1857 | |
| 1858 Label* deopt = | |
| 1859 compiler->is_optimizing() | |
| 1860 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) | |
| 1861 : NULL; | |
| 1862 | |
| 1863 const Register value_reg = locs()->in(0).reg(); | |
| 1864 | |
| 1865 if (!compiler->is_optimizing() || | |
| 1866 (field().guarded_list_length() == Field::kUnknownFixedLength)) { | |
| 1867 const Register field_reg = locs()->temp(0).reg(); | |
| 1868 | |
| 1869 Label ok; | |
| 1870 | |
| 1871 __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); | |
| 1872 | |
| 1873 __ lb(CMPRES1, | |
| 1874 FieldAddress(field_reg, | |
| 1875 Field::guarded_list_length_in_object_offset_offset())); | |
| 1876 __ blez(CMPRES1, &ok); | |
| 1877 | |
| 1878 __ lw(CMPRES2, | |
| 1879 FieldAddress(field_reg, Field::guarded_list_length_offset())); | |
| 1880 | |
| 1881 // Load the length from the value. GuardFieldClass already verified that | |
| 1882 // value's class matches guarded class id of the field. | |
| 1883 // CMPRES1 contains offset already corrected by -kHeapObjectTag that is | |
| 1884 // why we can use Address instead of FieldAddress. | |
| 1885 __ addu(TMP, value_reg, CMPRES1); | |
| 1886 __ lw(TMP, Address(TMP)); | |
| 1887 | |
| 1888 if (deopt == NULL) { | |
| 1889 __ beq(CMPRES2, TMP, &ok); | |
| 1890 | |
| 1891 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 1892 __ sw(field_reg, Address(SP, 1 * kWordSize)); | |
| 1893 __ sw(value_reg, Address(SP, 0 * kWordSize)); | |
| 1894 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); | |
| 1895 __ Drop(2); // Drop the field and the value. | |
| 1896 } else { | |
| 1897 __ bne(CMPRES2, TMP, deopt); | |
| 1898 } | |
| 1899 | |
| 1900 __ Bind(&ok); | |
| 1901 } else { | |
| 1902 ASSERT(compiler->is_optimizing()); | |
| 1903 ASSERT(field().guarded_list_length() >= 0); | |
| 1904 ASSERT(field().guarded_list_length_in_object_offset() != | |
| 1905 Field::kUnknownLengthOffset); | |
| 1906 | |
| 1907 __ lw(CMPRES1, | |
| 1908 FieldAddress(value_reg, | |
| 1909 field().guarded_list_length_in_object_offset())); | |
| 1910 __ LoadImmediate(TMP, Smi::RawValue(field().guarded_list_length())); | |
| 1911 __ bne(CMPRES1, TMP, deopt); | |
| 1912 } | |
| 1913 } | |
| 1914 | |
| 1915 | |
| 1916 class BoxAllocationSlowPath : public SlowPathCode { | |
| 1917 public: | |
| 1918 BoxAllocationSlowPath(Instruction* instruction, | |
| 1919 const Class& cls, | |
| 1920 Register result) | |
| 1921 : instruction_(instruction), cls_(cls), result_(result) {} | |
| 1922 | |
| 1923 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 1924 if (Assembler::EmittingComments()) { | |
| 1925 __ Comment("%s slow path allocation of %s", instruction_->DebugName(), | |
| 1926 String::Handle(cls_.ScrubbedName()).ToCString()); | |
| 1927 } | |
| 1928 __ Bind(entry_label()); | |
| 1929 const Code& stub = Code::ZoneHandle( | |
| 1930 compiler->zone(), StubCode::GetAllocationStubForClass(cls_)); | |
| 1931 const StubEntry stub_entry(stub); | |
| 1932 | |
| 1933 LocationSummary* locs = instruction_->locs(); | |
| 1934 locs->live_registers()->Remove(Location::RegisterLocation(result_)); | |
| 1935 | |
| 1936 compiler->SaveLiveRegisters(locs); | |
| 1937 compiler->GenerateCall(TokenPosition::kNoSource, // No token position. | |
| 1938 stub_entry, RawPcDescriptors::kOther, locs); | |
| 1939 compiler->AddStubCallTarget(stub); | |
| 1940 if (result_ != V0) { | |
| 1941 __ mov(result_, V0); | |
| 1942 } | |
| 1943 compiler->RestoreLiveRegisters(locs); | |
| 1944 __ b(exit_label()); | |
| 1945 } | |
| 1946 | |
| 1947 static void Allocate(FlowGraphCompiler* compiler, | |
| 1948 Instruction* instruction, | |
| 1949 const Class& cls, | |
| 1950 Register result, | |
| 1951 Register temp) { | |
| 1952 if (compiler->intrinsic_mode()) { | |
| 1953 __ TryAllocate(cls, compiler->intrinsic_slow_path_label(), result, temp); | |
| 1954 } else { | |
| 1955 BoxAllocationSlowPath* slow_path = | |
| 1956 new BoxAllocationSlowPath(instruction, cls, result); | |
| 1957 compiler->AddSlowPathCode(slow_path); | |
| 1958 | |
| 1959 __ TryAllocate(cls, slow_path->entry_label(), result, temp); | |
| 1960 __ Bind(slow_path->exit_label()); | |
| 1961 } | |
| 1962 } | |
| 1963 | |
| 1964 private: | |
| 1965 Instruction* instruction_; | |
| 1966 const Class& cls_; | |
| 1967 const Register result_; | |
| 1968 }; | |
| 1969 | |
| 1970 | |
| 1971 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone, | |
| 1972 bool opt) const { | |
| 1973 const intptr_t kNumInputs = 2; | |
| 1974 const intptr_t kNumTemps = | |
| 1975 (IsUnboxedStore() && opt) ? 2 : ((IsPotentialUnboxedStore()) ? 3 : 0); | |
| 1976 LocationSummary* summary = new (zone) | |
| 1977 LocationSummary(zone, kNumInputs, kNumTemps, | |
| 1978 ((IsUnboxedStore() && opt && is_initialization()) || | |
| 1979 IsPotentialUnboxedStore()) | |
| 1980 ? LocationSummary::kCallOnSlowPath | |
| 1981 : LocationSummary::kNoCall); | |
| 1982 | |
| 1983 summary->set_in(0, Location::RequiresRegister()); | |
| 1984 if (IsUnboxedStore() && opt) { | |
| 1985 summary->set_in(1, Location::RequiresFpuRegister()); | |
| 1986 summary->set_temp(0, Location::RequiresRegister()); | |
| 1987 summary->set_temp(1, Location::RequiresRegister()); | |
| 1988 } else if (IsPotentialUnboxedStore()) { | |
| 1989 summary->set_in(1, ShouldEmitStoreBarrier() ? Location::WritableRegister() | |
| 1990 : Location::RequiresRegister()); | |
| 1991 summary->set_temp(0, Location::RequiresRegister()); | |
| 1992 summary->set_temp(1, Location::RequiresRegister()); | |
| 1993 summary->set_temp(2, opt ? Location::RequiresFpuRegister() | |
| 1994 : Location::FpuRegisterLocation(D1)); | |
| 1995 } else { | |
| 1996 summary->set_in(1, ShouldEmitStoreBarrier() | |
| 1997 ? Location::WritableRegister() | |
| 1998 : Location::RegisterOrConstant(value())); | |
| 1999 } | |
| 2000 return summary; | |
| 2001 } | |
| 2002 | |
| 2003 | |
| 2004 static void EnsureMutableBox(FlowGraphCompiler* compiler, | |
| 2005 StoreInstanceFieldInstr* instruction, | |
| 2006 Register box_reg, | |
| 2007 const Class& cls, | |
| 2008 Register instance_reg, | |
| 2009 intptr_t offset, | |
| 2010 Register temp) { | |
| 2011 Label done; | |
| 2012 __ lw(box_reg, FieldAddress(instance_reg, offset)); | |
| 2013 __ BranchNotEqual(box_reg, Object::null_object(), &done); | |
| 2014 BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp); | |
| 2015 __ mov(temp, box_reg); | |
| 2016 __ StoreIntoObjectOffset(instance_reg, offset, temp); | |
| 2017 __ Bind(&done); | |
| 2018 } | |
| 2019 | |
| 2020 | |
| 2021 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2022 ASSERT(sizeof(classid_t) == kInt16Size); | |
| 2023 Label skip_store; | |
| 2024 | |
| 2025 Register instance_reg = locs()->in(0).reg(); | |
| 2026 | |
| 2027 if (IsUnboxedStore() && compiler->is_optimizing()) { | |
| 2028 DRegister value = locs()->in(1).fpu_reg(); | |
| 2029 Register temp = locs()->temp(0).reg(); | |
| 2030 Register temp2 = locs()->temp(1).reg(); | |
| 2031 const intptr_t cid = field().UnboxedFieldCid(); | |
| 2032 | |
| 2033 if (is_initialization()) { | |
| 2034 const Class* cls = NULL; | |
| 2035 switch (cid) { | |
| 2036 case kDoubleCid: | |
| 2037 cls = &compiler->double_class(); | |
| 2038 break; | |
| 2039 default: | |
| 2040 UNREACHABLE(); | |
| 2041 } | |
| 2042 | |
| 2043 BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2); | |
| 2044 __ mov(temp2, temp); | |
| 2045 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2); | |
| 2046 } else { | |
| 2047 __ lw(temp, FieldAddress(instance_reg, offset_in_bytes_)); | |
| 2048 } | |
| 2049 switch (cid) { | |
| 2050 case kDoubleCid: | |
| 2051 __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag); | |
| 2052 break; | |
| 2053 default: | |
| 2054 UNREACHABLE(); | |
| 2055 } | |
| 2056 return; | |
| 2057 } | |
| 2058 | |
| 2059 if (IsPotentialUnboxedStore()) { | |
| 2060 Register value_reg = locs()->in(1).reg(); | |
| 2061 Register temp = locs()->temp(0).reg(); | |
| 2062 Register temp2 = locs()->temp(1).reg(); | |
| 2063 DRegister fpu_temp = locs()->temp(2).fpu_reg(); | |
| 2064 | |
| 2065 if (ShouldEmitStoreBarrier()) { | |
| 2066 // Value input is a writable register and should be manually preserved | |
| 2067 // across allocation slow-path. | |
| 2068 locs()->live_registers()->Add(locs()->in(1), kTagged); | |
| 2069 } | |
| 2070 | |
| 2071 Label store_pointer; | |
| 2072 Label store_double; | |
| 2073 | |
| 2074 __ LoadObject(temp, Field::ZoneHandle(Z, field().Original())); | |
| 2075 | |
| 2076 __ lhu(temp2, FieldAddress(temp, Field::is_nullable_offset())); | |
| 2077 __ BranchEqual(temp2, Immediate(kNullCid), &store_pointer); | |
| 2078 | |
| 2079 __ lbu(temp2, FieldAddress(temp, Field::kind_bits_offset())); | |
| 2080 __ andi(CMPRES1, temp2, Immediate(1 << Field::kUnboxingCandidateBit)); | |
| 2081 __ beq(CMPRES1, ZR, &store_pointer); | |
| 2082 | |
| 2083 __ lhu(temp2, FieldAddress(temp, Field::guarded_cid_offset())); | |
| 2084 __ BranchEqual(temp2, Immediate(kDoubleCid), &store_double); | |
| 2085 | |
| 2086 // Fall through. | |
| 2087 __ b(&store_pointer); | |
| 2088 | |
| 2089 if (!compiler->is_optimizing()) { | |
| 2090 locs()->live_registers()->Add(locs()->in(0)); | |
| 2091 locs()->live_registers()->Add(locs()->in(1)); | |
| 2092 } | |
| 2093 | |
| 2094 { | |
| 2095 __ Bind(&store_double); | |
| 2096 EnsureMutableBox(compiler, this, temp, compiler->double_class(), | |
| 2097 instance_reg, offset_in_bytes_, temp2); | |
| 2098 __ LoadDFromOffset(fpu_temp, value_reg, | |
| 2099 Double::value_offset() - kHeapObjectTag); | |
| 2100 __ StoreDToOffset(fpu_temp, temp, | |
| 2101 Double::value_offset() - kHeapObjectTag); | |
| 2102 __ b(&skip_store); | |
| 2103 } | |
| 2104 | |
| 2105 __ Bind(&store_pointer); | |
| 2106 } | |
| 2107 | |
| 2108 if (ShouldEmitStoreBarrier()) { | |
| 2109 Register value_reg = locs()->in(1).reg(); | |
| 2110 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, value_reg, | |
| 2111 CanValueBeSmi()); | |
| 2112 } else { | |
| 2113 if (locs()->in(1).IsConstant()) { | |
| 2114 __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes_, | |
| 2115 locs()->in(1).constant()); | |
| 2116 } else { | |
| 2117 Register value_reg = locs()->in(1).reg(); | |
| 2118 __ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes_, | |
| 2119 value_reg); | |
| 2120 } | |
| 2121 } | |
| 2122 __ Bind(&skip_store); | |
| 2123 } | |
| 2124 | |
| 2125 | |
| 2126 LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone, | |
| 2127 bool opt) const { | |
| 2128 const intptr_t kNumInputs = 1; | |
| 2129 const intptr_t kNumTemps = 0; | |
| 2130 LocationSummary* summary = new (zone) | |
| 2131 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 2132 summary->set_in(0, Location::RequiresRegister()); | |
| 2133 summary->set_out(0, Location::RequiresRegister()); | |
| 2134 return summary; | |
| 2135 } | |
| 2136 | |
| 2137 | |
| 2138 // When the parser is building an implicit static getter for optimization, | |
| 2139 // it can generate a function body where deoptimization ids do not line up | |
| 2140 // with the unoptimized code. | |
| 2141 // | |
| 2142 // This is safe only so long as LoadStaticFieldInstr cannot deoptimize. | |
| 2143 void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2144 __ Comment("LoadStaticFieldInstr"); | |
| 2145 Register field = locs()->in(0).reg(); | |
| 2146 Register result = locs()->out(0).reg(); | |
| 2147 __ LoadFromOffset(result, field, | |
| 2148 Field::static_value_offset() - kHeapObjectTag); | |
| 2149 } | |
| 2150 | |
| 2151 | |
| 2152 LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone, | |
| 2153 bool opt) const { | |
| 2154 LocationSummary* locs = | |
| 2155 new (zone) LocationSummary(zone, 1, 1, LocationSummary::kNoCall); | |
| 2156 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister() | |
| 2157 : Location::RequiresRegister()); | |
| 2158 locs->set_temp(0, Location::RequiresRegister()); | |
| 2159 return locs; | |
| 2160 } | |
| 2161 | |
| 2162 | |
| 2163 void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2164 __ Comment("StoreStaticFieldInstr"); | |
| 2165 Register value = locs()->in(0).reg(); | |
| 2166 Register temp = locs()->temp(0).reg(); | |
| 2167 | |
| 2168 __ LoadObject(temp, Field::ZoneHandle(Z, field().Original())); | |
| 2169 if (this->value()->NeedsStoreBuffer()) { | |
| 2170 __ StoreIntoObject(temp, FieldAddress(temp, Field::static_value_offset()), | |
| 2171 value, CanValueBeSmi()); | |
| 2172 } else { | |
| 2173 __ StoreIntoObjectNoBarrier( | |
| 2174 temp, FieldAddress(temp, Field::static_value_offset()), value); | |
| 2175 } | |
| 2176 } | |
| 2177 | |
| 2178 | |
| 2179 LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone, | |
| 2180 bool opt) const { | |
| 2181 const intptr_t kNumInputs = 3; | |
| 2182 const intptr_t kNumTemps = 0; | |
| 2183 LocationSummary* summary = new (zone) | |
| 2184 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 2185 summary->set_in(0, Location::RegisterLocation(A0)); // Instance. | |
| 2186 summary->set_in(1, Location::RegisterLocation(A1)); // Instant. type args. | |
| 2187 summary->set_in(2, Location::RegisterLocation(A2)); // Function type args. | |
| 2188 summary->set_out(0, Location::RegisterLocation(V0)); | |
| 2189 return summary; | |
| 2190 } | |
| 2191 | |
| 2192 | |
| 2193 void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2194 ASSERT(locs()->in(0).reg() == A0); // Value. | |
| 2195 ASSERT(locs()->in(1).reg() == A1); // Instantiator type arguments. | |
| 2196 ASSERT(locs()->in(2).reg() == A2); // Function type arguments. | |
| 2197 | |
| 2198 __ Comment("InstanceOfInstr"); | |
| 2199 compiler->GenerateInstanceOf(token_pos(), deopt_id(), type(), locs()); | |
| 2200 ASSERT(locs()->out(0).reg() == V0); | |
| 2201 } | |
| 2202 | |
| 2203 | |
| 2204 LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone, | |
| 2205 bool opt) const { | |
| 2206 const intptr_t kNumInputs = 2; | |
| 2207 const intptr_t kNumTemps = 0; | |
| 2208 LocationSummary* locs = new (zone) | |
| 2209 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 2210 locs->set_in(0, Location::RegisterLocation(A0)); | |
| 2211 locs->set_in(1, Location::RegisterLocation(A1)); | |
| 2212 locs->set_out(0, Location::RegisterLocation(V0)); | |
| 2213 return locs; | |
| 2214 } | |
| 2215 | |
| 2216 | |
| 2217 // Inlines array allocation for known constant values. | |
| 2218 static void InlineArrayAllocation(FlowGraphCompiler* compiler, | |
| 2219 intptr_t num_elements, | |
| 2220 Label* slow_path, | |
| 2221 Label* done) { | |
| 2222 const int kInlineArraySize = 12; // Same as kInlineInstanceSize. | |
| 2223 const Register kLengthReg = A1; | |
| 2224 const Register kElemTypeReg = A0; | |
| 2225 const intptr_t instance_size = Array::InstanceSize(num_elements); | |
| 2226 | |
| 2227 __ TryAllocateArray(kArrayCid, instance_size, slow_path, | |
| 2228 V0, // instance | |
| 2229 T1, // end address | |
| 2230 T2, T3); | |
| 2231 // V0: new object start as a tagged pointer. | |
| 2232 // T1: new object end address. | |
| 2233 | |
| 2234 // Store the type argument field. | |
| 2235 __ StoreIntoObjectNoBarrier( | |
| 2236 V0, FieldAddress(V0, Array::type_arguments_offset()), kElemTypeReg); | |
| 2237 | |
| 2238 // Set the length field. | |
| 2239 __ StoreIntoObjectNoBarrier(V0, FieldAddress(V0, Array::length_offset()), | |
| 2240 kLengthReg); | |
| 2241 | |
| 2242 // Initialize all array elements to raw_null. | |
| 2243 // V0: new object start as a tagged pointer. | |
| 2244 // T1: new object end address. | |
| 2245 // T2: iterator which initially points to the start of the variable | |
| 2246 // data area to be initialized. | |
| 2247 // T7: null. | |
| 2248 if (num_elements > 0) { | |
| 2249 const intptr_t array_size = instance_size - sizeof(RawArray); | |
| 2250 __ LoadObject(T7, Object::null_object()); | |
| 2251 __ AddImmediate(T2, V0, sizeof(RawArray) - kHeapObjectTag); | |
| 2252 if (array_size < (kInlineArraySize * kWordSize)) { | |
| 2253 intptr_t current_offset = 0; | |
| 2254 while (current_offset < array_size) { | |
| 2255 __ sw(T7, Address(T2, current_offset)); | |
| 2256 current_offset += kWordSize; | |
| 2257 } | |
| 2258 } else { | |
| 2259 Label init_loop; | |
| 2260 __ Bind(&init_loop); | |
| 2261 __ sw(T7, Address(T2, 0)); | |
| 2262 __ addiu(T2, T2, Immediate(kWordSize)); | |
| 2263 __ BranchUnsignedLess(T2, T1, &init_loop); | |
| 2264 } | |
| 2265 } | |
| 2266 __ b(done); | |
| 2267 } | |
| 2268 | |
| 2269 | |
| 2270 void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2271 __ Comment("CreateArrayInstr"); | |
| 2272 const Register kLengthReg = A1; | |
| 2273 const Register kElemTypeReg = A0; | |
| 2274 const Register kResultReg = V0; | |
| 2275 ASSERT(locs()->in(0).reg() == kElemTypeReg); | |
| 2276 ASSERT(locs()->in(1).reg() == kLengthReg); | |
| 2277 | |
| 2278 Label slow_path, done; | |
| 2279 if (compiler->is_optimizing() && !FLAG_precompiled_mode && | |
| 2280 num_elements()->BindsToConstant() && | |
| 2281 num_elements()->BoundConstant().IsSmi()) { | |
| 2282 const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value(); | |
| 2283 if ((length >= 0) && (length <= Array::kMaxElements)) { | |
| 2284 Label slow_path, done; | |
| 2285 InlineArrayAllocation(compiler, length, &slow_path, &done); | |
| 2286 __ Bind(&slow_path); | |
| 2287 __ PushObject(Object::null_object()); // Make room for the result. | |
| 2288 __ Push(kLengthReg); // length. | |
| 2289 __ Push(kElemTypeReg); | |
| 2290 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
| 2291 kAllocateArrayRuntimeEntry, 2, locs()); | |
| 2292 __ Drop(2); | |
| 2293 __ Pop(kResultReg); | |
| 2294 __ Bind(&done); | |
| 2295 return; | |
| 2296 } | |
| 2297 } | |
| 2298 | |
| 2299 __ Bind(&slow_path); | |
| 2300 const Code& stub = Code::ZoneHandle(compiler->zone(), | |
| 2301 StubCode::AllocateArray_entry()->code()); | |
| 2302 compiler->AddStubCallTarget(stub); | |
| 2303 compiler->GenerateCallWithDeopt(token_pos(), deopt_id(), | |
| 2304 *StubCode::AllocateArray_entry(), | |
| 2305 RawPcDescriptors::kOther, locs()); | |
| 2306 __ Bind(&done); | |
| 2307 ASSERT(locs()->out(0).reg() == kResultReg); | |
| 2308 } | |
| 2309 | |
| 2310 | |
| 2311 LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone, | |
| 2312 bool opt) const { | |
| 2313 const intptr_t kNumInputs = 1; | |
| 2314 const intptr_t kNumTemps = | |
| 2315 (IsUnboxedLoad() && opt) ? 1 : ((IsPotentialUnboxedLoad()) ? 2 : 0); | |
| 2316 LocationSummary* locs = new (zone) LocationSummary( | |
| 2317 zone, kNumInputs, kNumTemps, (opt && !IsPotentialUnboxedLoad()) | |
| 2318 ? LocationSummary::kNoCall | |
| 2319 : LocationSummary::kCallOnSlowPath); | |
| 2320 | |
| 2321 locs->set_in(0, Location::RequiresRegister()); | |
| 2322 | |
| 2323 if (IsUnboxedLoad() && opt) { | |
| 2324 locs->set_temp(0, Location::RequiresRegister()); | |
| 2325 } else if (IsPotentialUnboxedLoad()) { | |
| 2326 locs->set_temp(0, opt ? Location::RequiresFpuRegister() | |
| 2327 : Location::FpuRegisterLocation(D1)); | |
| 2328 locs->set_temp(1, Location::RequiresRegister()); | |
| 2329 } | |
| 2330 locs->set_out(0, Location::RequiresRegister()); | |
| 2331 return locs; | |
| 2332 } | |
| 2333 | |
| 2334 | |
| 2335 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2336 ASSERT(sizeof(classid_t) == kInt16Size); | |
| 2337 | |
| 2338 Register instance_reg = locs()->in(0).reg(); | |
| 2339 if (IsUnboxedLoad() && compiler->is_optimizing()) { | |
| 2340 DRegister result = locs()->out(0).fpu_reg(); | |
| 2341 Register temp = locs()->temp(0).reg(); | |
| 2342 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes()); | |
| 2343 intptr_t cid = field()->UnboxedFieldCid(); | |
| 2344 switch (cid) { | |
| 2345 case kDoubleCid: | |
| 2346 __ LoadDFromOffset(result, temp, | |
| 2347 Double::value_offset() - kHeapObjectTag); | |
| 2348 break; | |
| 2349 default: | |
| 2350 UNREACHABLE(); | |
| 2351 } | |
| 2352 return; | |
| 2353 } | |
| 2354 | |
| 2355 Label done; | |
| 2356 Register result_reg = locs()->out(0).reg(); | |
| 2357 if (IsPotentialUnboxedLoad()) { | |
| 2358 Register temp = locs()->temp(1).reg(); | |
| 2359 DRegister value = locs()->temp(0).fpu_reg(); | |
| 2360 | |
| 2361 Label load_pointer; | |
| 2362 Label load_double; | |
| 2363 | |
| 2364 __ LoadObject(result_reg, Field::ZoneHandle(field()->Original())); | |
| 2365 | |
| 2366 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); | |
| 2367 FieldAddress field_nullability_operand(result_reg, | |
| 2368 Field::is_nullable_offset()); | |
| 2369 | |
| 2370 __ lhu(temp, field_nullability_operand); | |
| 2371 __ BranchEqual(temp, Immediate(kNullCid), &load_pointer); | |
| 2372 | |
| 2373 __ lhu(temp, field_cid_operand); | |
| 2374 __ BranchEqual(temp, Immediate(kDoubleCid), &load_double); | |
| 2375 | |
| 2376 // Fall through. | |
| 2377 __ b(&load_pointer); | |
| 2378 | |
| 2379 if (!compiler->is_optimizing()) { | |
| 2380 locs()->live_registers()->Add(locs()->in(0)); | |
| 2381 } | |
| 2382 | |
| 2383 { | |
| 2384 __ Bind(&load_double); | |
| 2385 BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(), | |
| 2386 result_reg, temp); | |
| 2387 __ lw(temp, FieldAddress(instance_reg, offset_in_bytes())); | |
| 2388 __ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag); | |
| 2389 __ StoreDToOffset(value, result_reg, | |
| 2390 Double::value_offset() - kHeapObjectTag); | |
| 2391 __ b(&done); | |
| 2392 } | |
| 2393 | |
| 2394 __ Bind(&load_pointer); | |
| 2395 } | |
| 2396 __ LoadFieldFromOffset(result_reg, instance_reg, offset_in_bytes()); | |
| 2397 __ Bind(&done); | |
| 2398 } | |
| 2399 | |
| 2400 | |
| 2401 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone, | |
| 2402 bool opt) const { | |
| 2403 const intptr_t kNumInputs = 2; | |
| 2404 const intptr_t kNumTemps = 0; | |
| 2405 LocationSummary* locs = new (zone) | |
| 2406 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 2407 locs->set_in(0, Location::RegisterLocation(T0)); // Instant. type args. | |
| 2408 locs->set_in(1, Location::RegisterLocation(T1)); // Function type args. | |
| 2409 locs->set_out(0, Location::RegisterLocation(T0)); | |
| 2410 return locs; | |
| 2411 } | |
| 2412 | |
| 2413 | |
| 2414 void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2415 __ Comment("InstantiateTypeInstr"); | |
| 2416 Register instantiator_type_args_reg = locs()->in(0).reg(); | |
| 2417 Register function_type_args_reg = locs()->in(1).reg(); | |
| 2418 Register result_reg = locs()->out(0).reg(); | |
| 2419 | |
| 2420 // 'instantiator_type_args_reg' is a TypeArguments object (or null). | |
| 2421 // 'function_type_args_reg' is a TypeArguments object (or null). | |
| 2422 // A runtime call to instantiate the type is required. | |
| 2423 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
| 2424 __ LoadObject(TMP, Object::null_object()); | |
| 2425 __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. | |
| 2426 __ LoadObject(TMP, type()); | |
| 2427 __ sw(TMP, Address(SP, 2 * kWordSize)); | |
| 2428 __ sw(instantiator_type_args_reg, Address(SP, 1 * kWordSize)); | |
| 2429 __ sw(function_type_args_reg, Address(SP, 0 * kWordSize)); | |
| 2430 | |
| 2431 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
| 2432 kInstantiateTypeRuntimeEntry, 3, locs()); | |
| 2433 // Pop instantiated type. | |
| 2434 __ lw(result_reg, Address(SP, 3 * kWordSize)); | |
| 2435 | |
| 2436 // Drop instantiator and uninstantiated type. | |
| 2437 __ addiu(SP, SP, Immediate(4 * kWordSize)); | |
| 2438 } | |
| 2439 | |
| 2440 | |
| 2441 LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary( | |
| 2442 Zone* zone, | |
| 2443 bool opt) const { | |
| 2444 const intptr_t kNumInputs = 2; | |
| 2445 const intptr_t kNumTemps = 0; | |
| 2446 LocationSummary* locs = new (zone) | |
| 2447 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 2448 locs->set_in(0, Location::RegisterLocation(T0)); // Instant. type args. | |
| 2449 locs->set_in(1, Location::RegisterLocation(T1)); // Function type args. | |
| 2450 locs->set_out(0, Location::RegisterLocation(T0)); | |
| 2451 return locs; | |
| 2452 } | |
| 2453 | |
| 2454 | |
| 2455 void InstantiateTypeArgumentsInstr::EmitNativeCode( | |
| 2456 FlowGraphCompiler* compiler) { | |
| 2457 __ Comment("InstantiateTypeArgumentsInstr"); | |
| 2458 Register instantiator_type_args_reg = locs()->in(0).reg(); | |
| 2459 Register function_type_args_reg = locs()->in(1).reg(); | |
| 2460 Register result_reg = locs()->out(0).reg(); | |
| 2461 ASSERT(instantiator_type_args_reg == T0); | |
| 2462 ASSERT(instantiator_type_args_reg == result_reg); | |
| 2463 | |
| 2464 // 'instantiator_type_args_reg' is a TypeArguments object (or null). | |
| 2465 // 'function_type_args_reg' is a TypeArguments object (or null). | |
| 2466 ASSERT(!type_arguments().IsUninstantiatedIdentity() && | |
| 2467 !type_arguments().CanShareInstantiatorTypeArguments( | |
| 2468 instantiator_class())); | |
| 2469 // If both the instantiator and function type arguments are null and if the | |
| 2470 // type argument vector instantiated from null becomes a vector of dynamic, | |
| 2471 // then use null as the type arguments. | |
| 2472 Label type_arguments_instantiated; | |
| 2473 const intptr_t len = type_arguments().Length(); | |
| 2474 if (type_arguments().IsRawWhenInstantiatedFromRaw(len)) { | |
| 2475 Label non_null_type_args; | |
| 2476 __ BranchNotEqual(instantiator_type_args_reg, Object::null_object(), | |
| 2477 &non_null_type_args); | |
| 2478 __ BranchEqual(function_type_args_reg, Object::null_object(), | |
| 2479 &type_arguments_instantiated); | |
| 2480 __ Bind(&non_null_type_args); | |
| 2481 } | |
| 2482 | |
| 2483 // Lookup cache before calling runtime. | |
| 2484 // TODO(regis): Consider moving this into a shared stub to reduce | |
| 2485 // generated code size. | |
| 2486 __ LoadObject(T2, type_arguments()); | |
| 2487 __ lw(T2, FieldAddress(T2, TypeArguments::instantiations_offset())); | |
| 2488 __ AddImmediate(T2, Array::data_offset() - kHeapObjectTag); | |
| 2489 // The instantiations cache is initialized with Object::zero_array() and is | |
| 2490 // therefore guaranteed to contain kNoInstantiator. No length check needed. | |
| 2491 Label loop, next, found, slow_case; | |
| 2492 __ Bind(&loop); | |
| 2493 __ lw(T3, Address(T2, 0 * kWordSize)); // Cached instantiator type args. | |
| 2494 __ bne(T3, T0, &next); | |
| 2495 __ lw(T4, Address(T2, 1 * kWordSize)); // Cached function type args. | |
| 2496 __ beq(T4, T1, &found); | |
| 2497 __ Bind(&next); | |
| 2498 __ BranchNotEqual(T3, Immediate(Smi::RawValue(StubCode::kNoInstantiator)), | |
| 2499 &loop); | |
| 2500 __ delay_slot()->addiu( | |
| 2501 T2, T2, Immediate(StubCode::kInstantiationSizeInWords * kWordSize)); | |
| 2502 __ b(&slow_case); | |
| 2503 __ Bind(&found); | |
| 2504 __ lw(T0, Address(T2, 2 * kWordSize)); // Cached instantiated args. | |
| 2505 __ b(&type_arguments_instantiated); | |
| 2506 | |
| 2507 __ Bind(&slow_case); | |
| 2508 // Instantiate non-null type arguments. | |
| 2509 // A runtime call to instantiate the type arguments is required. | |
| 2510 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
| 2511 __ LoadObject(TMP, Object::null_object()); | |
| 2512 __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. | |
| 2513 __ LoadObject(TMP, type_arguments()); | |
| 2514 __ sw(TMP, Address(SP, 2 * kWordSize)); | |
| 2515 __ sw(instantiator_type_args_reg, Address(SP, 1 * kWordSize)); | |
| 2516 __ sw(function_type_args_reg, Address(SP, 0 * kWordSize)); | |
| 2517 | |
| 2518 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
| 2519 kInstantiateTypeArgumentsRuntimeEntry, 3, | |
| 2520 locs()); | |
| 2521 // Pop instantiated type arguments. | |
| 2522 __ lw(result_reg, Address(SP, 3 * kWordSize)); | |
| 2523 // Drop 2 type argument vectors and uninstantiated type arguments. | |
| 2524 __ addiu(SP, SP, Immediate(4 * kWordSize)); | |
| 2525 __ Bind(&type_arguments_instantiated); | |
| 2526 } | |
| 2527 | |
| 2528 | |
| 2529 LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary( | |
| 2530 Zone* zone, | |
| 2531 bool opt) const { | |
| 2532 ASSERT(opt); | |
| 2533 const intptr_t kNumInputs = 0; | |
| 2534 const intptr_t kNumTemps = 3; | |
| 2535 LocationSummary* locs = new (zone) LocationSummary( | |
| 2536 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
| 2537 locs->set_temp(0, Location::RegisterLocation(T1)); | |
| 2538 locs->set_temp(1, Location::RegisterLocation(T2)); | |
| 2539 locs->set_temp(2, Location::RegisterLocation(T3)); | |
| 2540 locs->set_out(0, Location::RegisterLocation(V0)); | |
| 2541 return locs; | |
| 2542 } | |
| 2543 | |
| 2544 | |
| 2545 class AllocateContextSlowPath : public SlowPathCode { | |
| 2546 public: | |
| 2547 explicit AllocateContextSlowPath( | |
| 2548 AllocateUninitializedContextInstr* instruction) | |
| 2549 : instruction_(instruction) {} | |
| 2550 | |
| 2551 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2552 __ Comment("AllocateContextSlowPath"); | |
| 2553 __ Bind(entry_label()); | |
| 2554 | |
| 2555 LocationSummary* locs = instruction_->locs(); | |
| 2556 locs->live_registers()->Remove(locs->out(0)); | |
| 2557 | |
| 2558 compiler->SaveLiveRegisters(locs); | |
| 2559 | |
| 2560 __ LoadImmediate(T1, instruction_->num_context_variables()); | |
| 2561 const Code& stub = Code::ZoneHandle( | |
| 2562 compiler->zone(), StubCode::AllocateContext_entry()->code()); | |
| 2563 compiler->AddStubCallTarget(stub); | |
| 2564 compiler->GenerateCall(instruction_->token_pos(), | |
| 2565 *StubCode::AllocateContext_entry(), | |
| 2566 RawPcDescriptors::kOther, locs); | |
| 2567 ASSERT(instruction_->locs()->out(0).reg() == V0); | |
| 2568 compiler->RestoreLiveRegisters(instruction_->locs()); | |
| 2569 __ b(exit_label()); | |
| 2570 } | |
| 2571 | |
| 2572 private: | |
| 2573 AllocateUninitializedContextInstr* instruction_; | |
| 2574 }; | |
| 2575 | |
| 2576 | |
| 2577 void AllocateUninitializedContextInstr::EmitNativeCode( | |
| 2578 FlowGraphCompiler* compiler) { | |
| 2579 Register temp0 = locs()->temp(0).reg(); | |
| 2580 Register temp1 = locs()->temp(1).reg(); | |
| 2581 Register temp2 = locs()->temp(2).reg(); | |
| 2582 Register result = locs()->out(0).reg(); | |
| 2583 // Try allocate the object. | |
| 2584 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this); | |
| 2585 compiler->AddSlowPathCode(slow_path); | |
| 2586 intptr_t instance_size = Context::InstanceSize(num_context_variables()); | |
| 2587 | |
| 2588 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(), | |
| 2589 result, // instance | |
| 2590 temp0, temp1, temp2); | |
| 2591 | |
| 2592 // Setup up number of context variables field. | |
| 2593 __ LoadImmediate(temp0, num_context_variables()); | |
| 2594 __ sw(temp0, FieldAddress(result, Context::num_variables_offset())); | |
| 2595 | |
| 2596 __ Bind(slow_path->exit_label()); | |
| 2597 } | |
| 2598 | |
| 2599 | |
| 2600 LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone, | |
| 2601 bool opt) const { | |
| 2602 const intptr_t kNumInputs = 0; | |
| 2603 const intptr_t kNumTemps = 1; | |
| 2604 LocationSummary* locs = new (zone) | |
| 2605 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 2606 locs->set_temp(0, Location::RegisterLocation(T1)); | |
| 2607 locs->set_out(0, Location::RegisterLocation(V0)); | |
| 2608 return locs; | |
| 2609 } | |
| 2610 | |
| 2611 | |
| 2612 void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2613 ASSERT(locs()->temp(0).reg() == T1); | |
| 2614 ASSERT(locs()->out(0).reg() == V0); | |
| 2615 | |
| 2616 __ Comment("AllocateContextInstr"); | |
| 2617 __ LoadImmediate(T1, num_context_variables()); | |
| 2618 compiler->GenerateCall(token_pos(), *StubCode::AllocateContext_entry(), | |
| 2619 RawPcDescriptors::kOther, locs()); | |
| 2620 } | |
| 2621 | |
| 2622 | |
| 2623 LocationSummary* InitStaticFieldInstr::MakeLocationSummary(Zone* zone, | |
| 2624 bool opt) const { | |
| 2625 const intptr_t kNumInputs = 1; | |
| 2626 const intptr_t kNumTemps = 1; | |
| 2627 LocationSummary* locs = new (zone) | |
| 2628 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 2629 locs->set_in(0, Location::RegisterLocation(T0)); | |
| 2630 locs->set_temp(0, Location::RegisterLocation(T1)); | |
| 2631 return locs; | |
| 2632 } | |
| 2633 | |
| 2634 | |
| 2635 void InitStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2636 Register field = locs()->in(0).reg(); | |
| 2637 Register temp = locs()->temp(0).reg(); | |
| 2638 | |
| 2639 Label call_runtime, no_call; | |
| 2640 __ Comment("InitStaticFieldInstr"); | |
| 2641 | |
| 2642 __ lw(temp, FieldAddress(field, Field::static_value_offset())); | |
| 2643 __ BranchEqual(temp, Object::sentinel(), &call_runtime); | |
| 2644 __ BranchNotEqual(temp, Object::transition_sentinel(), &no_call); | |
| 2645 | |
| 2646 __ Bind(&call_runtime); | |
| 2647 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 2648 __ LoadObject(TMP, Object::null_object()); | |
| 2649 __ sw(TMP, Address(SP, 1 * kWordSize)); // Make room for (unused) result. | |
| 2650 __ sw(field, Address(SP, 0 * kWordSize)); | |
| 2651 | |
| 2652 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
| 2653 kInitStaticFieldRuntimeEntry, 1, locs()); | |
| 2654 | |
| 2655 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Purge argument and result. | |
| 2656 | |
| 2657 __ Bind(&no_call); | |
| 2658 } | |
| 2659 | |
| 2660 | |
| 2661 LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone, | |
| 2662 bool opt) const { | |
| 2663 const intptr_t kNumInputs = 1; | |
| 2664 const intptr_t kNumTemps = 0; | |
| 2665 LocationSummary* locs = new (zone) | |
| 2666 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 2667 locs->set_in(0, Location::RegisterLocation(T0)); | |
| 2668 locs->set_out(0, Location::RegisterLocation(T0)); | |
| 2669 return locs; | |
| 2670 } | |
| 2671 | |
| 2672 | |
| 2673 void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2674 Register context_value = locs()->in(0).reg(); | |
| 2675 Register result = locs()->out(0).reg(); | |
| 2676 | |
| 2677 __ Comment("CloneContextInstr"); | |
| 2678 | |
| 2679 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 2680 __ LoadObject(TMP, Object::null_object()); // Make room for the result. | |
| 2681 __ sw(TMP, Address(SP, 1 * kWordSize)); | |
| 2682 __ sw(context_value, Address(SP, 0 * kWordSize)); | |
| 2683 | |
| 2684 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), | |
| 2685 kCloneContextRuntimeEntry, 1, locs()); | |
| 2686 __ lw(result, Address(SP, 1 * kWordSize)); // Get result (cloned context). | |
| 2687 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
| 2688 } | |
| 2689 | |
| 2690 | |
| 2691 LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone, | |
| 2692 bool opt) const { | |
| 2693 UNREACHABLE(); | |
| 2694 return NULL; | |
| 2695 } | |
| 2696 | |
| 2697 | |
| 2698 void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2699 __ Bind(compiler->GetJumpLabel(this)); | |
| 2700 compiler->AddExceptionHandler(catch_try_index(), try_index(), | |
| 2701 compiler->assembler()->CodeSize(), | |
| 2702 handler_token_pos(), is_generated(), | |
| 2703 catch_handler_types_, needs_stacktrace()); | |
| 2704 // On lazy deoptimization we patch the optimized code here to enter the | |
| 2705 // deoptimization stub. | |
| 2706 const intptr_t deopt_id = Thread::ToDeoptAfter(GetDeoptId()); | |
| 2707 if (compiler->is_optimizing()) { | |
| 2708 compiler->AddDeoptIndexAtCall(deopt_id); | |
| 2709 } else { | |
| 2710 compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id, | |
| 2711 TokenPosition::kNoSource); | |
| 2712 } | |
| 2713 if (HasParallelMove()) { | |
| 2714 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); | |
| 2715 } | |
| 2716 // Restore SP from FP as we are coming from a throw and the code for | |
| 2717 // popping arguments has not been run. | |
| 2718 const intptr_t fp_sp_dist = | |
| 2719 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; | |
| 2720 ASSERT(fp_sp_dist <= 0); | |
| 2721 __ AddImmediate(SP, FP, fp_sp_dist); | |
| 2722 | |
| 2723 // Auxiliary variables introduced by the try catch can be captured if we are | |
| 2724 // inside a function with yield/resume points. In this case we first need | |
| 2725 // to restore the context to match the context at entry into the closure. | |
| 2726 if (should_restore_closure_context()) { | |
| 2727 const ParsedFunction& parsed_function = compiler->parsed_function(); | |
| 2728 ASSERT(parsed_function.function().IsClosureFunction()); | |
| 2729 LocalScope* scope = parsed_function.node_sequence()->scope(); | |
| 2730 | |
| 2731 LocalVariable* closure_parameter = scope->VariableAt(0); | |
| 2732 ASSERT(!closure_parameter->is_captured()); | |
| 2733 __ LoadFromOffset(CTX, FP, closure_parameter->index() * kWordSize); | |
| 2734 __ LoadFieldFromOffset(CTX, CTX, Closure::context_offset()); | |
| 2735 | |
| 2736 const intptr_t context_index = | |
| 2737 parsed_function.current_context_var()->index(); | |
| 2738 __ StoreToOffset(CTX, FP, context_index * kWordSize); | |
| 2739 } | |
| 2740 | |
| 2741 // Initialize exception and stack trace variables. | |
| 2742 if (exception_var().is_captured()) { | |
| 2743 ASSERT(stacktrace_var().is_captured()); | |
| 2744 __ StoreIntoObjectOffset(CTX, | |
| 2745 Context::variable_offset(exception_var().index()), | |
| 2746 kExceptionObjectReg); | |
| 2747 __ StoreIntoObjectOffset(CTX, | |
| 2748 Context::variable_offset(stacktrace_var().index()), | |
| 2749 kStackTraceObjectReg); | |
| 2750 } else { | |
| 2751 // Restore stack and initialize the two exception variables: | |
| 2752 // exception and stack trace variables. | |
| 2753 __ StoreToOffset(kExceptionObjectReg, FP, | |
| 2754 exception_var().index() * kWordSize); | |
| 2755 __ StoreToOffset(kStackTraceObjectReg, FP, | |
| 2756 stacktrace_var().index() * kWordSize); | |
| 2757 } | |
| 2758 } | |
| 2759 | |
| 2760 | |
| 2761 LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone, | |
| 2762 bool opt) const { | |
| 2763 const intptr_t kNumInputs = 0; | |
| 2764 const intptr_t kNumTemps = 1; | |
| 2765 LocationSummary* summary = new (zone) LocationSummary( | |
| 2766 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
| 2767 summary->set_temp(0, Location::RequiresRegister()); | |
| 2768 return summary; | |
| 2769 } | |
| 2770 | |
| 2771 | |
| 2772 class CheckStackOverflowSlowPath : public SlowPathCode { | |
| 2773 public: | |
| 2774 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction) | |
| 2775 : instruction_(instruction) {} | |
| 2776 | |
| 2777 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2778 if (compiler->isolate()->use_osr() && osr_entry_label()->IsLinked()) { | |
| 2779 Register value = instruction_->locs()->temp(0).reg(); | |
| 2780 __ Comment("CheckStackOverflowSlowPathOsr"); | |
| 2781 __ Bind(osr_entry_label()); | |
| 2782 __ LoadImmediate(value, Thread::kOsrRequest); | |
| 2783 __ sw(value, Address(THR, Thread::stack_overflow_flags_offset())); | |
| 2784 } | |
| 2785 __ Comment("CheckStackOverflowSlowPath"); | |
| 2786 __ Bind(entry_label()); | |
| 2787 compiler->SaveLiveRegisters(instruction_->locs()); | |
| 2788 // pending_deoptimization_env_ is needed to generate a runtime call that | |
| 2789 // may throw an exception. | |
| 2790 ASSERT(compiler->pending_deoptimization_env_ == NULL); | |
| 2791 Environment* env = compiler->SlowPathEnvironmentFor(instruction_); | |
| 2792 compiler->pending_deoptimization_env_ = env; | |
| 2793 compiler->GenerateRuntimeCall( | |
| 2794 instruction_->token_pos(), instruction_->deopt_id(), | |
| 2795 kStackOverflowRuntimeEntry, 0, instruction_->locs()); | |
| 2796 | |
| 2797 if (compiler->isolate()->use_osr() && !compiler->is_optimizing() && | |
| 2798 instruction_->in_loop()) { | |
| 2799 // In unoptimized code, record loop stack checks as possible OSR entries. | |
| 2800 compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry, | |
| 2801 instruction_->deopt_id(), | |
| 2802 TokenPosition::kNoSource); | |
| 2803 } | |
| 2804 compiler->pending_deoptimization_env_ = NULL; | |
| 2805 compiler->RestoreLiveRegisters(instruction_->locs()); | |
| 2806 __ b(exit_label()); | |
| 2807 } | |
| 2808 | |
| 2809 Label* osr_entry_label() { | |
| 2810 ASSERT(Isolate::Current()->use_osr()); | |
| 2811 return &osr_entry_label_; | |
| 2812 } | |
| 2813 | |
| 2814 private: | |
| 2815 CheckStackOverflowInstr* instruction_; | |
| 2816 Label osr_entry_label_; | |
| 2817 }; | |
| 2818 | |
| 2819 | |
| 2820 void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2821 __ Comment("CheckStackOverflowInstr"); | |
| 2822 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); | |
| 2823 compiler->AddSlowPathCode(slow_path); | |
| 2824 | |
| 2825 __ lw(CMPRES1, Address(THR, Thread::stack_limit_offset())); | |
| 2826 __ BranchUnsignedLessEqual(SP, CMPRES1, slow_path->entry_label()); | |
| 2827 if (compiler->CanOSRFunction() && in_loop()) { | |
| 2828 Register temp = locs()->temp(0).reg(); | |
| 2829 // In unoptimized code check the usage counter to trigger OSR at loop | |
| 2830 // stack checks. Use progressively higher thresholds for more deeply | |
| 2831 // nested loops to attempt to hit outer loops with OSR when possible. | |
| 2832 __ LoadObject(temp, compiler->parsed_function().function()); | |
| 2833 intptr_t threshold = | |
| 2834 FLAG_optimization_counter_threshold * (loop_depth() + 1); | |
| 2835 __ lw(temp, FieldAddress(temp, Function::usage_counter_offset())); | |
| 2836 __ BranchSignedGreaterEqual(temp, Immediate(threshold), | |
| 2837 slow_path->osr_entry_label()); | |
| 2838 } | |
| 2839 if (compiler->ForceSlowPathForStackOverflow()) { | |
| 2840 __ b(slow_path->entry_label()); | |
| 2841 } | |
| 2842 __ Bind(slow_path->exit_label()); | |
| 2843 } | |
| 2844 | |
| 2845 | |
| 2846 static void EmitSmiShiftLeft(FlowGraphCompiler* compiler, | |
| 2847 BinarySmiOpInstr* shift_left) { | |
| 2848 const LocationSummary& locs = *shift_left->locs(); | |
| 2849 Register left = locs.in(0).reg(); | |
| 2850 Register result = locs.out(0).reg(); | |
| 2851 Label* deopt = shift_left->CanDeoptimize() | |
| 2852 ? compiler->AddDeoptStub(shift_left->deopt_id(), | |
| 2853 ICData::kDeoptBinarySmiOp) | |
| 2854 : NULL; | |
| 2855 | |
| 2856 __ Comment("EmitSmiShiftLeft"); | |
| 2857 | |
| 2858 if (locs.in(1).IsConstant()) { | |
| 2859 const Object& constant = locs.in(1).constant(); | |
| 2860 ASSERT(constant.IsSmi()); | |
| 2861 // Immediate shift operation takes 5 bits for the count. | |
| 2862 const intptr_t kCountLimit = 0x1F; | |
| 2863 const intptr_t value = Smi::Cast(constant).Value(); | |
| 2864 ASSERT((0 < value) && (value < kCountLimit)); | |
| 2865 if (shift_left->can_overflow()) { | |
| 2866 // Check for overflow (preserve left). | |
| 2867 __ sll(TMP, left, value); | |
| 2868 __ sra(CMPRES1, TMP, value); | |
| 2869 __ bne(CMPRES1, left, deopt); // Overflow. | |
| 2870 } | |
| 2871 // Shift for result now we know there is no overflow. | |
| 2872 __ sll(result, left, value); | |
| 2873 return; | |
| 2874 } | |
| 2875 | |
| 2876 // Right (locs.in(1)) is not constant. | |
| 2877 Register right = locs.in(1).reg(); | |
| 2878 Range* right_range = shift_left->right_range(); | |
| 2879 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) { | |
| 2880 // TODO(srdjan): Implement code below for is_truncating(). | |
| 2881 // If left is constant, we know the maximal allowed size for right. | |
| 2882 const Object& obj = shift_left->left()->BoundConstant(); | |
| 2883 if (obj.IsSmi()) { | |
| 2884 const intptr_t left_int = Smi::Cast(obj).Value(); | |
| 2885 if (left_int == 0) { | |
| 2886 __ bltz(right, deopt); | |
| 2887 __ mov(result, ZR); | |
| 2888 return; | |
| 2889 } | |
| 2890 const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int); | |
| 2891 const bool right_needs_check = | |
| 2892 !RangeUtils::IsWithin(right_range, 0, max_right - 1); | |
| 2893 if (right_needs_check) { | |
| 2894 const Immediate& max_right_imm = | |
| 2895 Immediate(reinterpret_cast<int32_t>(Smi::New(max_right))); | |
| 2896 __ BranchUnsignedGreaterEqual(right, max_right_imm, deopt); | |
| 2897 } | |
| 2898 __ SmiUntag(TMP, right); | |
| 2899 __ sllv(result, left, TMP); | |
| 2900 } | |
| 2901 return; | |
| 2902 } | |
| 2903 | |
| 2904 const bool right_needs_check = | |
| 2905 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1)); | |
| 2906 if (!shift_left->can_overflow()) { | |
| 2907 if (right_needs_check) { | |
| 2908 if (!RangeUtils::IsPositive(right_range)) { | |
| 2909 ASSERT(shift_left->CanDeoptimize()); | |
| 2910 __ bltz(right, deopt); | |
| 2911 } | |
| 2912 Label done, is_not_zero; | |
| 2913 | |
| 2914 __ sltiu(CMPRES1, right, | |
| 2915 Immediate(reinterpret_cast<int32_t>(Smi::New(Smi::kBits)))); | |
| 2916 __ movz(result, ZR, CMPRES1); // result = right >= kBits ? 0 : result. | |
| 2917 __ sra(TMP, right, kSmiTagSize); | |
| 2918 __ sllv(TMP, left, TMP); | |
| 2919 // result = right < kBits ? left << right : result. | |
| 2920 __ movn(result, TMP, CMPRES1); | |
| 2921 } else { | |
| 2922 __ sra(TMP, right, kSmiTagSize); | |
| 2923 __ sllv(result, left, TMP); | |
| 2924 } | |
| 2925 } else { | |
| 2926 if (right_needs_check) { | |
| 2927 const Immediate& bits_imm = | |
| 2928 Immediate(reinterpret_cast<int32_t>(Smi::New(Smi::kBits))); | |
| 2929 ASSERT(shift_left->CanDeoptimize()); | |
| 2930 __ BranchUnsignedGreaterEqual(right, bits_imm, deopt); | |
| 2931 } | |
| 2932 // Left is not a constant. | |
| 2933 Register temp = locs.temp(0).reg(); | |
| 2934 // Check if count too large for handling it inlined. | |
| 2935 __ SmiUntag(temp, right); | |
| 2936 // Overflow test (preserve left, right, and temp); | |
| 2937 __ sllv(CMPRES1, left, temp); | |
| 2938 __ srav(CMPRES1, CMPRES1, temp); | |
| 2939 __ bne(CMPRES1, left, deopt); // Overflow. | |
| 2940 // Shift for result now we know there is no overflow. | |
| 2941 __ sllv(result, left, temp); | |
| 2942 } | |
| 2943 } | |
| 2944 | |
| 2945 | |
| 2946 class CheckedSmiSlowPath : public SlowPathCode { | |
| 2947 public: | |
| 2948 CheckedSmiSlowPath(CheckedSmiOpInstr* instruction, intptr_t try_index) | |
| 2949 : instruction_(instruction), try_index_(try_index) {} | |
| 2950 | |
| 2951 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 2952 if (Assembler::EmittingComments()) { | |
| 2953 __ Comment("slow path smi operation"); | |
| 2954 } | |
| 2955 __ Bind(entry_label()); | |
| 2956 LocationSummary* locs = instruction_->locs(); | |
| 2957 Register result = locs->out(0).reg(); | |
| 2958 locs->live_registers()->Remove(Location::RegisterLocation(result)); | |
| 2959 | |
| 2960 compiler->SaveLiveRegisters(locs); | |
| 2961 if (instruction_->env() != NULL) { | |
| 2962 Environment* env = compiler->SlowPathEnvironmentFor(instruction_); | |
| 2963 compiler->pending_deoptimization_env_ = env; | |
| 2964 } | |
| 2965 __ Push(locs->in(0).reg()); | |
| 2966 __ Push(locs->in(1).reg()); | |
| 2967 const String& selector = | |
| 2968 String::Handle(instruction_->call()->ic_data()->target_name()); | |
| 2969 const Array& argument_names = | |
| 2970 Array::Handle(instruction_->call()->ic_data()->arguments_descriptor()); | |
| 2971 compiler->EmitMegamorphicInstanceCall( | |
| 2972 selector, argument_names, instruction_->call()->ArgumentCount(), | |
| 2973 instruction_->call()->deopt_id(), instruction_->call()->token_pos(), | |
| 2974 locs, try_index_, | |
| 2975 /* slow_path_argument_count = */ 2); | |
| 2976 __ mov(result, V0); | |
| 2977 compiler->RestoreLiveRegisters(locs); | |
| 2978 __ b(exit_label()); | |
| 2979 compiler->pending_deoptimization_env_ = NULL; | |
| 2980 } | |
| 2981 | |
| 2982 private: | |
| 2983 CheckedSmiOpInstr* instruction_; | |
| 2984 intptr_t try_index_; | |
| 2985 }; | |
| 2986 | |
| 2987 | |
| 2988 LocationSummary* CheckedSmiOpInstr::MakeLocationSummary(Zone* zone, | |
| 2989 bool opt) const { | |
| 2990 const intptr_t kNumInputs = 2; | |
| 2991 const intptr_t kNumTemps = 0; | |
| 2992 LocationSummary* summary = new (zone) LocationSummary( | |
| 2993 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
| 2994 summary->set_in(0, Location::RequiresRegister()); | |
| 2995 summary->set_in(1, Location::RequiresRegister()); | |
| 2996 summary->set_out(0, Location::RequiresRegister()); | |
| 2997 return summary; | |
| 2998 } | |
| 2999 | |
| 3000 | |
| 3001 void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3002 CheckedSmiSlowPath* slow_path = | |
| 3003 new CheckedSmiSlowPath(this, compiler->CurrentTryIndex()); | |
| 3004 compiler->AddSlowPathCode(slow_path); | |
| 3005 // Test operands if necessary. | |
| 3006 Register left = locs()->in(0).reg(); | |
| 3007 Register right = locs()->in(1).reg(); | |
| 3008 Register result = locs()->out(0).reg(); | |
| 3009 intptr_t left_cid = this->left()->Type()->ToCid(); | |
| 3010 intptr_t right_cid = this->right()->Type()->ToCid(); | |
| 3011 bool combined_smi_check = false; | |
| 3012 if (this->left()->definition() == this->right()->definition()) { | |
| 3013 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); | |
| 3014 } else if (left_cid == kSmiCid) { | |
| 3015 __ andi(CMPRES1, right, Immediate(kSmiTagMask)); | |
| 3016 } else if (right_cid == kSmiCid) { | |
| 3017 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); | |
| 3018 } else { | |
| 3019 combined_smi_check = true; | |
| 3020 __ or_(result, left, right); | |
| 3021 __ andi(CMPRES1, result, Immediate(kSmiTagMask)); | |
| 3022 } | |
| 3023 __ bne(CMPRES1, ZR, slow_path->entry_label()); | |
| 3024 switch (op_kind()) { | |
| 3025 case Token::kADD: | |
| 3026 __ AdduDetectOverflow(result, left, right, CMPRES1); | |
| 3027 __ bltz(CMPRES1, slow_path->entry_label()); | |
| 3028 break; | |
| 3029 case Token::kSUB: | |
| 3030 __ SubuDetectOverflow(result, left, right, CMPRES1); | |
| 3031 __ bltz(CMPRES1, slow_path->entry_label()); | |
| 3032 break; | |
| 3033 case Token::kMUL: | |
| 3034 __ sra(TMP, left, kSmiTagSize); | |
| 3035 __ mult(TMP, right); | |
| 3036 __ mflo(result); | |
| 3037 __ mfhi(CMPRES2); | |
| 3038 __ sra(CMPRES1, result, 31); | |
| 3039 __ bne(CMPRES1, CMPRES2, slow_path->entry_label()); | |
| 3040 break; | |
| 3041 case Token::kBIT_OR: | |
| 3042 // Operation part of combined smi check. | |
| 3043 if (!combined_smi_check) { | |
| 3044 __ or_(result, left, right); | |
| 3045 } | |
| 3046 break; | |
| 3047 case Token::kBIT_AND: | |
| 3048 __ and_(result, left, right); | |
| 3049 break; | |
| 3050 case Token::kBIT_XOR: | |
| 3051 __ xor_(result, left, right); | |
| 3052 break; | |
| 3053 case Token::kSHL: | |
| 3054 ASSERT(result != left); | |
| 3055 ASSERT(result != right); | |
| 3056 __ BranchUnsignedGreater(right, Immediate(Smi::RawValue(Smi::kBits)), | |
| 3057 slow_path->entry_label()); | |
| 3058 // Check for overflow by shifting left and shifting back arithmetically. | |
| 3059 // If the result is different from the original, there was overflow. | |
| 3060 __ delay_slot()->SmiUntag(TMP, right); | |
| 3061 __ sllv(result, left, TMP); | |
| 3062 __ srav(CMPRES1, result, TMP); | |
| 3063 __ bne(CMPRES1, left, slow_path->entry_label()); | |
| 3064 break; | |
| 3065 case Token::kSHR: | |
| 3066 __ BranchUnsignedGreater(right, Immediate(Smi::RawValue(Smi::kBits)), | |
| 3067 slow_path->entry_label()); | |
| 3068 __ delay_slot()->SmiUntag(result, right); | |
| 3069 __ SmiUntag(TMP, left); | |
| 3070 __ srav(result, TMP, result); | |
| 3071 __ SmiTag(result); | |
| 3072 break; | |
| 3073 default: | |
| 3074 UNIMPLEMENTED(); | |
| 3075 } | |
| 3076 __ Bind(slow_path->exit_label()); | |
| 3077 } | |
| 3078 | |
| 3079 | |
| 3080 class CheckedSmiComparisonSlowPath : public SlowPathCode { | |
| 3081 public: | |
| 3082 CheckedSmiComparisonSlowPath(CheckedSmiComparisonInstr* instruction, | |
| 3083 intptr_t try_index, | |
| 3084 BranchLabels labels, | |
| 3085 bool merged) | |
| 3086 : instruction_(instruction), | |
| 3087 try_index_(try_index), | |
| 3088 labels_(labels), | |
| 3089 merged_(merged) {} | |
| 3090 | |
| 3091 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3092 if (Assembler::EmittingComments()) { | |
| 3093 __ Comment("slow path smi operation"); | |
| 3094 } | |
| 3095 __ Bind(entry_label()); | |
| 3096 LocationSummary* locs = instruction_->locs(); | |
| 3097 Register result = merged_ ? locs->temp(0).reg() : locs->out(0).reg(); | |
| 3098 locs->live_registers()->Remove(Location::RegisterLocation(result)); | |
| 3099 | |
| 3100 compiler->SaveLiveRegisters(locs); | |
| 3101 if (instruction_->env() != NULL) { | |
| 3102 Environment* env = compiler->SlowPathEnvironmentFor(instruction_); | |
| 3103 compiler->pending_deoptimization_env_ = env; | |
| 3104 } | |
| 3105 __ Push(locs->in(0).reg()); | |
| 3106 __ Push(locs->in(1).reg()); | |
| 3107 String& selector = | |
| 3108 String::Handle(instruction_->call()->ic_data()->target_name()); | |
| 3109 Array& argument_names = | |
| 3110 Array::Handle(instruction_->call()->ic_data()->arguments_descriptor()); | |
| 3111 compiler->EmitMegamorphicInstanceCall( | |
| 3112 selector, argument_names, instruction_->call()->ArgumentCount(), | |
| 3113 instruction_->call()->deopt_id(), instruction_->call()->token_pos(), | |
| 3114 locs, try_index_, | |
| 3115 /* slow_path_argument_count = */ 2); | |
| 3116 __ mov(result, V0); | |
| 3117 compiler->RestoreLiveRegisters(locs); | |
| 3118 compiler->pending_deoptimization_env_ = NULL; | |
| 3119 if (merged_) { | |
| 3120 __ BranchEqual(result, Bool::True(), instruction_->is_negated() | |
| 3121 ? labels_.false_label | |
| 3122 : labels_.true_label); | |
| 3123 __ b(instruction_->is_negated() ? labels_.true_label | |
| 3124 : labels_.false_label); | |
| 3125 } else { | |
| 3126 __ b(exit_label()); | |
| 3127 } | |
| 3128 } | |
| 3129 | |
| 3130 private: | |
| 3131 CheckedSmiComparisonInstr* instruction_; | |
| 3132 intptr_t try_index_; | |
| 3133 BranchLabels labels_; | |
| 3134 bool merged_; | |
| 3135 }; | |
| 3136 | |
| 3137 | |
| 3138 LocationSummary* CheckedSmiComparisonInstr::MakeLocationSummary( | |
| 3139 Zone* zone, | |
| 3140 bool opt) const { | |
| 3141 const intptr_t kNumInputs = 2; | |
| 3142 const intptr_t kNumTemps = 1; | |
| 3143 LocationSummary* summary = new (zone) LocationSummary( | |
| 3144 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
| 3145 summary->set_in(0, Location::RequiresRegister()); | |
| 3146 summary->set_in(1, Location::RequiresRegister()); | |
| 3147 summary->set_temp(0, Location::RequiresRegister()); | |
| 3148 summary->set_out(0, Location::RequiresRegister()); | |
| 3149 return summary; | |
| 3150 } | |
| 3151 | |
| 3152 | |
| 3153 Condition CheckedSmiComparisonInstr::EmitComparisonCode( | |
| 3154 FlowGraphCompiler* compiler, | |
| 3155 BranchLabels labels) { | |
| 3156 return EmitSmiComparisonOp(compiler, *locs(), kind()); | |
| 3157 } | |
| 3158 | |
| 3159 | |
| 3160 #define EMIT_SMI_CHECK \ | |
| 3161 Register left = locs()->in(0).reg(); \ | |
| 3162 Register right = locs()->in(1).reg(); \ | |
| 3163 Register temp = locs()->temp(0).reg(); \ | |
| 3164 intptr_t left_cid = this->left()->Type()->ToCid(); \ | |
| 3165 intptr_t right_cid = this->right()->Type()->ToCid(); \ | |
| 3166 if (this->left()->definition() == this->right()->definition()) { \ | |
| 3167 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); \ | |
| 3168 } else if (left_cid == kSmiCid) { \ | |
| 3169 __ andi(CMPRES1, right, Immediate(kSmiTagMask)); \ | |
| 3170 } else if (right_cid == kSmiCid) { \ | |
| 3171 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); \ | |
| 3172 } else { \ | |
| 3173 __ or_(temp, left, right); \ | |
| 3174 __ andi(CMPRES1, temp, Immediate(kSmiTagMask)); \ | |
| 3175 } \ | |
| 3176 __ bne(CMPRES1, ZR, slow_path->entry_label()); | |
| 3177 | |
| 3178 | |
| 3179 void CheckedSmiComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler, | |
| 3180 BranchInstr* branch) { | |
| 3181 BranchLabels labels = compiler->CreateBranchLabels(branch); | |
| 3182 CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath( | |
| 3183 this, compiler->CurrentTryIndex(), labels, | |
| 3184 /* merged = */ true); | |
| 3185 compiler->AddSlowPathCode(slow_path); | |
| 3186 EMIT_SMI_CHECK; | |
| 3187 Condition true_condition = EmitComparisonCode(compiler, labels); | |
| 3188 ASSERT(true_condition.IsValid()); | |
| 3189 EmitBranchOnCondition(compiler, true_condition, labels); | |
| 3190 __ Bind(slow_path->exit_label()); | |
| 3191 } | |
| 3192 | |
| 3193 | |
| 3194 void CheckedSmiComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3195 Label true_label, false_label, done; | |
| 3196 BranchLabels labels = {&true_label, &false_label, &false_label}; | |
| 3197 CheckedSmiComparisonSlowPath* slow_path = new CheckedSmiComparisonSlowPath( | |
| 3198 this, compiler->CurrentTryIndex(), labels, | |
| 3199 /* merged = */ false); | |
| 3200 compiler->AddSlowPathCode(slow_path); | |
| 3201 EMIT_SMI_CHECK; | |
| 3202 Condition true_condition = EmitComparisonCode(compiler, labels); | |
| 3203 ASSERT(true_condition.IsValid()); | |
| 3204 EmitBranchOnCondition(compiler, true_condition, labels); | |
| 3205 Register result = locs()->out(0).reg(); | |
| 3206 __ Bind(&false_label); | |
| 3207 __ LoadObject(result, Bool::False()); | |
| 3208 __ b(&done); | |
| 3209 __ Bind(&true_label); | |
| 3210 __ LoadObject(result, Bool::True()); | |
| 3211 __ Bind(&done); | |
| 3212 __ Bind(slow_path->exit_label()); | |
| 3213 } | |
| 3214 | |
| 3215 | |
| 3216 LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone, | |
| 3217 bool opt) const { | |
| 3218 const intptr_t kNumInputs = 2; | |
| 3219 const intptr_t kNumTemps = | |
| 3220 ((op_kind() == Token::kADD) || (op_kind() == Token::kMOD) || | |
| 3221 (op_kind() == Token::kTRUNCDIV) || | |
| 3222 (((op_kind() == Token::kSHL) && can_overflow()) || | |
| 3223 (op_kind() == Token::kSHR))) | |
| 3224 ? 1 | |
| 3225 : 0; | |
| 3226 LocationSummary* summary = new (zone) | |
| 3227 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 3228 if (op_kind() == Token::kTRUNCDIV) { | |
| 3229 summary->set_in(0, Location::RequiresRegister()); | |
| 3230 if (RightIsPowerOfTwoConstant()) { | |
| 3231 ConstantInstr* right_constant = right()->definition()->AsConstant(); | |
| 3232 summary->set_in(1, Location::Constant(right_constant)); | |
| 3233 } else { | |
| 3234 summary->set_in(1, Location::RequiresRegister()); | |
| 3235 } | |
| 3236 summary->set_temp(0, Location::RequiresRegister()); | |
| 3237 summary->set_out(0, Location::RequiresRegister()); | |
| 3238 return summary; | |
| 3239 } | |
| 3240 if (op_kind() == Token::kMOD) { | |
| 3241 summary->set_in(0, Location::RequiresRegister()); | |
| 3242 summary->set_in(1, Location::RequiresRegister()); | |
| 3243 summary->set_temp(0, Location::RequiresRegister()); | |
| 3244 summary->set_out(0, Location::RequiresRegister()); | |
| 3245 return summary; | |
| 3246 } | |
| 3247 summary->set_in(0, Location::RequiresRegister()); | |
| 3248 summary->set_in(1, Location::RegisterOrSmiConstant(right())); | |
| 3249 if (((op_kind() == Token::kSHL) && can_overflow()) || | |
| 3250 (op_kind() == Token::kSHR)) { | |
| 3251 summary->set_temp(0, Location::RequiresRegister()); | |
| 3252 } else if (op_kind() == Token::kADD) { | |
| 3253 // Need an extra temp for the overflow detection code. | |
| 3254 summary->set_temp(0, Location::RequiresRegister()); | |
| 3255 } | |
| 3256 // We make use of 3-operand instructions by not requiring result register | |
| 3257 // to be identical to first input register as on Intel. | |
| 3258 summary->set_out(0, Location::RequiresRegister()); | |
| 3259 return summary; | |
| 3260 } | |
| 3261 | |
| 3262 | |
| 3263 void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3264 __ Comment("BinarySmiOpInstr"); | |
| 3265 if (op_kind() == Token::kSHL) { | |
| 3266 EmitSmiShiftLeft(compiler, this); | |
| 3267 return; | |
| 3268 } | |
| 3269 | |
| 3270 Register left = locs()->in(0).reg(); | |
| 3271 Register result = locs()->out(0).reg(); | |
| 3272 Label* deopt = NULL; | |
| 3273 if (CanDeoptimize()) { | |
| 3274 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); | |
| 3275 } | |
| 3276 | |
| 3277 if (locs()->in(1).IsConstant()) { | |
| 3278 const Object& constant = locs()->in(1).constant(); | |
| 3279 ASSERT(constant.IsSmi()); | |
| 3280 const int32_t imm = reinterpret_cast<int32_t>(constant.raw()); | |
| 3281 switch (op_kind()) { | |
| 3282 case Token::kADD: { | |
| 3283 if (deopt == NULL) { | |
| 3284 __ AddImmediate(result, left, imm); | |
| 3285 } else { | |
| 3286 Register temp = locs()->temp(0).reg(); | |
| 3287 __ AddImmediateDetectOverflow(result, left, imm, CMPRES1, temp); | |
| 3288 __ bltz(CMPRES1, deopt); | |
| 3289 } | |
| 3290 break; | |
| 3291 } | |
| 3292 case Token::kSUB: { | |
| 3293 __ Comment("kSUB imm"); | |
| 3294 if (deopt == NULL) { | |
| 3295 __ AddImmediate(result, left, -imm); | |
| 3296 } else { | |
| 3297 __ SubImmediateDetectOverflow(result, left, imm, CMPRES1); | |
| 3298 __ bltz(CMPRES1, deopt); | |
| 3299 } | |
| 3300 break; | |
| 3301 } | |
| 3302 case Token::kMUL: { | |
| 3303 // Keep left value tagged and untag right value. | |
| 3304 const intptr_t value = Smi::Cast(constant).Value(); | |
| 3305 __ LoadImmediate(TMP, value); | |
| 3306 __ mult(left, TMP); | |
| 3307 __ mflo(result); | |
| 3308 if (deopt != NULL) { | |
| 3309 __ mfhi(CMPRES2); | |
| 3310 __ sra(CMPRES1, result, 31); | |
| 3311 __ bne(CMPRES1, CMPRES2, deopt); | |
| 3312 } | |
| 3313 break; | |
| 3314 } | |
| 3315 case Token::kTRUNCDIV: { | |
| 3316 const intptr_t value = Smi::Cast(constant).Value(); | |
| 3317 ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value))); | |
| 3318 const intptr_t shift_count = | |
| 3319 Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize; | |
| 3320 ASSERT(kSmiTagSize == 1); | |
| 3321 __ sra(TMP, left, 31); | |
| 3322 ASSERT(shift_count > 1); // 1, -1 case handled above. | |
| 3323 Register temp = locs()->temp(0).reg(); | |
| 3324 __ srl(TMP, TMP, 32 - shift_count); | |
| 3325 __ addu(temp, left, TMP); | |
| 3326 ASSERT(shift_count > 0); | |
| 3327 __ sra(result, temp, shift_count); | |
| 3328 if (value < 0) { | |
| 3329 __ subu(result, ZR, result); | |
| 3330 } | |
| 3331 __ SmiTag(result); | |
| 3332 break; | |
| 3333 } | |
| 3334 case Token::kBIT_AND: { | |
| 3335 // No overflow check. | |
| 3336 __ AndImmediate(result, left, imm); | |
| 3337 break; | |
| 3338 } | |
| 3339 case Token::kBIT_OR: { | |
| 3340 // No overflow check. | |
| 3341 __ OrImmediate(result, left, imm); | |
| 3342 break; | |
| 3343 } | |
| 3344 case Token::kBIT_XOR: { | |
| 3345 // No overflow check. | |
| 3346 __ XorImmediate(result, left, imm); | |
| 3347 break; | |
| 3348 } | |
| 3349 case Token::kSHR: { | |
| 3350 // sarl operation masks the count to 5 bits. | |
| 3351 const intptr_t kCountLimit = 0x1F; | |
| 3352 const intptr_t value = Smi::Cast(constant).Value(); | |
| 3353 __ Comment("kSHR"); | |
| 3354 __ sra(result, left, Utils::Minimum(value + kSmiTagSize, kCountLimit)); | |
| 3355 __ SmiTag(result); | |
| 3356 break; | |
| 3357 } | |
| 3358 | |
| 3359 default: | |
| 3360 UNREACHABLE(); | |
| 3361 break; | |
| 3362 } | |
| 3363 return; | |
| 3364 } | |
| 3365 | |
| 3366 Register right = locs()->in(1).reg(); | |
| 3367 switch (op_kind()) { | |
| 3368 case Token::kADD: { | |
| 3369 if (deopt == NULL) { | |
| 3370 __ addu(result, left, right); | |
| 3371 } else { | |
| 3372 Register temp = locs()->temp(0).reg(); | |
| 3373 __ AdduDetectOverflow(result, left, right, CMPRES1, temp); | |
| 3374 __ bltz(CMPRES1, deopt); | |
| 3375 } | |
| 3376 break; | |
| 3377 } | |
| 3378 case Token::kSUB: { | |
| 3379 __ Comment("kSUB"); | |
| 3380 if (deopt == NULL) { | |
| 3381 __ subu(result, left, right); | |
| 3382 } else { | |
| 3383 __ SubuDetectOverflow(result, left, right, CMPRES1); | |
| 3384 __ bltz(CMPRES1, deopt); | |
| 3385 } | |
| 3386 break; | |
| 3387 } | |
| 3388 case Token::kMUL: { | |
| 3389 __ Comment("kMUL"); | |
| 3390 __ sra(TMP, left, kSmiTagSize); | |
| 3391 __ mult(TMP, right); | |
| 3392 __ mflo(result); | |
| 3393 if (deopt != NULL) { | |
| 3394 __ mfhi(CMPRES2); | |
| 3395 __ sra(CMPRES1, result, 31); | |
| 3396 __ bne(CMPRES1, CMPRES2, deopt); | |
| 3397 } | |
| 3398 break; | |
| 3399 } | |
| 3400 case Token::kBIT_AND: { | |
| 3401 // No overflow check. | |
| 3402 __ and_(result, left, right); | |
| 3403 break; | |
| 3404 } | |
| 3405 case Token::kBIT_OR: { | |
| 3406 // No overflow check. | |
| 3407 __ or_(result, left, right); | |
| 3408 break; | |
| 3409 } | |
| 3410 case Token::kBIT_XOR: { | |
| 3411 // No overflow check. | |
| 3412 __ xor_(result, left, right); | |
| 3413 break; | |
| 3414 } | |
| 3415 case Token::kTRUNCDIV: { | |
| 3416 if (RangeUtils::CanBeZero(right_range())) { | |
| 3417 // Handle divide by zero in runtime. | |
| 3418 __ beq(right, ZR, deopt); | |
| 3419 } | |
| 3420 Register temp = locs()->temp(0).reg(); | |
| 3421 __ SmiUntag(temp, left); | |
| 3422 __ SmiUntag(TMP, right); | |
| 3423 __ div(temp, TMP); | |
| 3424 __ mflo(result); | |
| 3425 // Check the corner case of dividing the 'MIN_SMI' with -1, in which | |
| 3426 // case we cannot tag the result. | |
| 3427 __ BranchEqual(result, Immediate(0x40000000), deopt); | |
| 3428 __ SmiTag(result); | |
| 3429 break; | |
| 3430 } | |
| 3431 case Token::kMOD: { | |
| 3432 if (RangeUtils::CanBeZero(right_range())) { | |
| 3433 // Handle divide by zero in runtime. | |
| 3434 __ beq(right, ZR, deopt); | |
| 3435 } | |
| 3436 Register temp = locs()->temp(0).reg(); | |
| 3437 __ SmiUntag(temp, left); | |
| 3438 __ SmiUntag(TMP, right); | |
| 3439 __ div(temp, TMP); | |
| 3440 __ mfhi(result); | |
| 3441 // res = left % right; | |
| 3442 // if (res < 0) { | |
| 3443 // if (right < 0) { | |
| 3444 // res = res - right; | |
| 3445 // } else { | |
| 3446 // res = res + right; | |
| 3447 // } | |
| 3448 // } | |
| 3449 Label done; | |
| 3450 __ bgez(result, &done); | |
| 3451 if (RangeUtils::Overlaps(right_range(), -1, 1)) { | |
| 3452 Label subtract; | |
| 3453 __ bltz(right, &subtract); | |
| 3454 __ addu(result, result, TMP); | |
| 3455 __ b(&done); | |
| 3456 __ Bind(&subtract); | |
| 3457 __ subu(result, result, TMP); | |
| 3458 } else if (right_range()->IsPositive()) { | |
| 3459 // Right is positive. | |
| 3460 __ addu(result, result, TMP); | |
| 3461 } else { | |
| 3462 // Right is negative. | |
| 3463 __ subu(result, result, TMP); | |
| 3464 } | |
| 3465 __ Bind(&done); | |
| 3466 __ SmiTag(result); | |
| 3467 break; | |
| 3468 } | |
| 3469 case Token::kSHR: { | |
| 3470 Register temp = locs()->temp(0).reg(); | |
| 3471 if (CanDeoptimize()) { | |
| 3472 __ bltz(right, deopt); | |
| 3473 } | |
| 3474 __ SmiUntag(temp, right); | |
| 3475 // sra operation masks the count to 5 bits. | |
| 3476 const intptr_t kCountLimit = 0x1F; | |
| 3477 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) { | |
| 3478 Label ok; | |
| 3479 __ BranchSignedLessEqual(temp, Immediate(kCountLimit), &ok); | |
| 3480 __ LoadImmediate(temp, kCountLimit); | |
| 3481 __ Bind(&ok); | |
| 3482 } | |
| 3483 | |
| 3484 __ SmiUntag(CMPRES1, left); | |
| 3485 __ srav(result, CMPRES1, temp); | |
| 3486 __ SmiTag(result); | |
| 3487 break; | |
| 3488 } | |
| 3489 case Token::kDIV: { | |
| 3490 // Dispatches to 'Double./'. | |
| 3491 // TODO(srdjan): Implement as conversion to double and double division. | |
| 3492 UNREACHABLE(); | |
| 3493 break; | |
| 3494 } | |
| 3495 case Token::kOR: | |
| 3496 case Token::kAND: { | |
| 3497 // Flow graph builder has dissected this operation to guarantee correct | |
| 3498 // behavior (short-circuit evaluation). | |
| 3499 UNREACHABLE(); | |
| 3500 break; | |
| 3501 } | |
| 3502 default: | |
| 3503 UNREACHABLE(); | |
| 3504 break; | |
| 3505 } | |
| 3506 } | |
| 3507 | |
| 3508 | |
| 3509 LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone, | |
| 3510 bool opt) const { | |
| 3511 intptr_t left_cid = left()->Type()->ToCid(); | |
| 3512 intptr_t right_cid = right()->Type()->ToCid(); | |
| 3513 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid)); | |
| 3514 const intptr_t kNumInputs = 2; | |
| 3515 const intptr_t kNumTemps = 0; | |
| 3516 LocationSummary* summary = new (zone) | |
| 3517 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 3518 summary->set_in(0, Location::RequiresRegister()); | |
| 3519 summary->set_in(1, Location::RequiresRegister()); | |
| 3520 return summary; | |
| 3521 } | |
| 3522 | |
| 3523 | |
| 3524 void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3525 Label* deopt = | |
| 3526 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp, | |
| 3527 licm_hoisted_ ? ICData::kHoisted : 0); | |
| 3528 intptr_t left_cid = left()->Type()->ToCid(); | |
| 3529 intptr_t right_cid = right()->Type()->ToCid(); | |
| 3530 Register left = locs()->in(0).reg(); | |
| 3531 Register right = locs()->in(1).reg(); | |
| 3532 if (this->left()->definition() == this->right()->definition()) { | |
| 3533 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); | |
| 3534 } else if (left_cid == kSmiCid) { | |
| 3535 __ andi(CMPRES1, right, Immediate(kSmiTagMask)); | |
| 3536 } else if (right_cid == kSmiCid) { | |
| 3537 __ andi(CMPRES1, left, Immediate(kSmiTagMask)); | |
| 3538 } else { | |
| 3539 __ or_(TMP, left, right); | |
| 3540 __ andi(CMPRES1, TMP, Immediate(kSmiTagMask)); | |
| 3541 } | |
| 3542 __ beq(CMPRES1, ZR, deopt); | |
| 3543 } | |
| 3544 | |
| 3545 | |
| 3546 LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 3547 const intptr_t kNumInputs = 1; | |
| 3548 const intptr_t kNumTemps = 1; | |
| 3549 LocationSummary* summary = new (zone) LocationSummary( | |
| 3550 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
| 3551 summary->set_in(0, Location::RequiresFpuRegister()); | |
| 3552 summary->set_temp(0, Location::RequiresRegister()); | |
| 3553 summary->set_out(0, Location::RequiresRegister()); | |
| 3554 return summary; | |
| 3555 } | |
| 3556 | |
| 3557 | |
| 3558 void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3559 ASSERT(from_representation() == kUnboxedDouble); | |
| 3560 | |
| 3561 Register out_reg = locs()->out(0).reg(); | |
| 3562 DRegister value = locs()->in(0).fpu_reg(); | |
| 3563 | |
| 3564 BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(), | |
| 3565 out_reg, locs()->temp(0).reg()); | |
| 3566 __ StoreDToOffset(value, out_reg, Double::value_offset() - kHeapObjectTag); | |
| 3567 } | |
| 3568 | |
| 3569 | |
| 3570 LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 3571 const intptr_t kNumInputs = 1; | |
| 3572 const intptr_t kNumTemps = 0; | |
| 3573 LocationSummary* summary = new (zone) | |
| 3574 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 3575 summary->set_in(0, Location::RequiresRegister()); | |
| 3576 if (representation() == kUnboxedMint) { | |
| 3577 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
| 3578 Location::RequiresRegister())); | |
| 3579 } else { | |
| 3580 summary->set_out(0, Location::RequiresFpuRegister()); | |
| 3581 } | |
| 3582 return summary; | |
| 3583 } | |
| 3584 | |
| 3585 | |
| 3586 void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) { | |
| 3587 const Register box = locs()->in(0).reg(); | |
| 3588 | |
| 3589 switch (representation()) { | |
| 3590 case kUnboxedMint: { | |
| 3591 PairLocation* result = locs()->out(0).AsPairLocation(); | |
| 3592 __ LoadFromOffset(result->At(0).reg(), box, | |
| 3593 ValueOffset() - kHeapObjectTag); | |
| 3594 __ LoadFromOffset(result->At(1).reg(), box, | |
| 3595 ValueOffset() - kHeapObjectTag + kWordSize); | |
| 3596 break; | |
| 3597 } | |
| 3598 | |
| 3599 case kUnboxedDouble: { | |
| 3600 const DRegister result = locs()->out(0).fpu_reg(); | |
| 3601 __ LoadDFromOffset(result, box, Double::value_offset() - kHeapObjectTag); | |
| 3602 break; | |
| 3603 } | |
| 3604 | |
| 3605 case kUnboxedFloat32x4: | |
| 3606 case kUnboxedFloat64x2: | |
| 3607 case kUnboxedInt32x4: { | |
| 3608 UNIMPLEMENTED(); | |
| 3609 break; | |
| 3610 } | |
| 3611 | |
| 3612 default: | |
| 3613 UNREACHABLE(); | |
| 3614 break; | |
| 3615 } | |
| 3616 } | |
| 3617 | |
| 3618 | |
| 3619 void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) { | |
| 3620 const Register box = locs()->in(0).reg(); | |
| 3621 | |
| 3622 switch (representation()) { | |
| 3623 case kUnboxedMint: { | |
| 3624 PairLocation* result = locs()->out(0).AsPairLocation(); | |
| 3625 __ SmiUntag(result->At(0).reg(), box); | |
| 3626 __ sra(result->At(1).reg(), result->At(0).reg(), 31); | |
| 3627 break; | |
| 3628 } | |
| 3629 | |
| 3630 case kUnboxedDouble: { | |
| 3631 const DRegister result = locs()->out(0).fpu_reg(); | |
| 3632 __ SmiUntag(TMP, box); | |
| 3633 __ mtc1(TMP, STMP1); | |
| 3634 __ cvtdw(result, STMP1); | |
| 3635 break; | |
| 3636 } | |
| 3637 | |
| 3638 default: | |
| 3639 UNREACHABLE(); | |
| 3640 break; | |
| 3641 } | |
| 3642 } | |
| 3643 | |
| 3644 | |
| 3645 void UnboxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3646 const intptr_t value_cid = value()->Type()->ToCid(); | |
| 3647 const intptr_t box_cid = BoxCid(); | |
| 3648 | |
| 3649 if (value_cid == box_cid) { | |
| 3650 EmitLoadFromBox(compiler); | |
| 3651 } else if (CanConvertSmi() && (value_cid == kSmiCid)) { | |
| 3652 EmitSmiConversion(compiler); | |
| 3653 } else { | |
| 3654 const Register box = locs()->in(0).reg(); | |
| 3655 Label* deopt = | |
| 3656 compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptCheckClass); | |
| 3657 Label is_smi; | |
| 3658 | |
| 3659 if ((value()->Type()->ToNullableCid() == box_cid) && | |
| 3660 value()->Type()->is_nullable()) { | |
| 3661 __ BranchEqual(box, Object::null_object(), deopt); | |
| 3662 } else { | |
| 3663 __ andi(CMPRES1, box, Immediate(kSmiTagMask)); | |
| 3664 __ beq(CMPRES1, ZR, CanConvertSmi() ? &is_smi : deopt); | |
| 3665 __ LoadClassId(CMPRES1, box); | |
| 3666 __ BranchNotEqual(CMPRES1, Immediate(box_cid), deopt); | |
| 3667 } | |
| 3668 | |
| 3669 EmitLoadFromBox(compiler); | |
| 3670 | |
| 3671 if (is_smi.IsLinked()) { | |
| 3672 Label done; | |
| 3673 __ b(&done); | |
| 3674 __ Bind(&is_smi); | |
| 3675 EmitSmiConversion(compiler); | |
| 3676 __ Bind(&done); | |
| 3677 } | |
| 3678 } | |
| 3679 } | |
| 3680 | |
| 3681 | |
| 3682 LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone, | |
| 3683 bool opt) const { | |
| 3684 ASSERT((from_representation() == kUnboxedInt32) || | |
| 3685 (from_representation() == kUnboxedUint32)); | |
| 3686 const intptr_t kNumInputs = 1; | |
| 3687 const intptr_t kNumTemps = 1; | |
| 3688 LocationSummary* summary = new (zone) LocationSummary( | |
| 3689 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
| 3690 summary->set_in(0, Location::RequiresRegister()); | |
| 3691 summary->set_temp(0, Location::RequiresRegister()); | |
| 3692 summary->set_out(0, Location::RequiresRegister()); | |
| 3693 return summary; | |
| 3694 } | |
| 3695 | |
| 3696 | |
| 3697 void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3698 Register value = locs()->in(0).reg(); | |
| 3699 Register out = locs()->out(0).reg(); | |
| 3700 ASSERT(value != out); | |
| 3701 | |
| 3702 __ SmiTag(out, value); | |
| 3703 if (!ValueFitsSmi()) { | |
| 3704 Register temp = locs()->temp(0).reg(); | |
| 3705 Label done; | |
| 3706 if (from_representation() == kUnboxedInt32) { | |
| 3707 __ SmiUntag(CMPRES1, out); | |
| 3708 __ BranchEqual(CMPRES1, value, &done); | |
| 3709 } else { | |
| 3710 ASSERT(from_representation() == kUnboxedUint32); | |
| 3711 __ AndImmediate(CMPRES1, value, 0xC0000000); | |
| 3712 __ BranchEqual(CMPRES1, ZR, &done); | |
| 3713 } | |
| 3714 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out, | |
| 3715 temp); | |
| 3716 Register hi; | |
| 3717 if (from_representation() == kUnboxedInt32) { | |
| 3718 hi = temp; | |
| 3719 __ sra(hi, value, kBitsPerWord - 1); | |
| 3720 } else { | |
| 3721 ASSERT(from_representation() == kUnboxedUint32); | |
| 3722 hi = ZR; | |
| 3723 } | |
| 3724 __ StoreToOffset(value, out, Mint::value_offset() - kHeapObjectTag); | |
| 3725 __ StoreToOffset(hi, out, | |
| 3726 Mint::value_offset() - kHeapObjectTag + kWordSize); | |
| 3727 __ Bind(&done); | |
| 3728 } | |
| 3729 } | |
| 3730 | |
| 3731 | |
| 3732 LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone, | |
| 3733 bool opt) const { | |
| 3734 const intptr_t kNumInputs = 1; | |
| 3735 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1; | |
| 3736 LocationSummary* summary = new (zone) | |
| 3737 LocationSummary(zone, kNumInputs, kNumTemps, | |
| 3738 ValueFitsSmi() ? LocationSummary::kNoCall | |
| 3739 : LocationSummary::kCallOnSlowPath); | |
| 3740 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
| 3741 Location::RequiresRegister())); | |
| 3742 if (!ValueFitsSmi()) { | |
| 3743 summary->set_temp(0, Location::RequiresRegister()); | |
| 3744 } | |
| 3745 summary->set_out(0, Location::RequiresRegister()); | |
| 3746 return summary; | |
| 3747 } | |
| 3748 | |
| 3749 | |
| 3750 void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3751 if (ValueFitsSmi()) { | |
| 3752 PairLocation* value_pair = locs()->in(0).AsPairLocation(); | |
| 3753 Register value_lo = value_pair->At(0).reg(); | |
| 3754 Register out_reg = locs()->out(0).reg(); | |
| 3755 __ SmiTag(out_reg, value_lo); | |
| 3756 return; | |
| 3757 } | |
| 3758 | |
| 3759 PairLocation* value_pair = locs()->in(0).AsPairLocation(); | |
| 3760 Register value_lo = value_pair->At(0).reg(); | |
| 3761 Register value_hi = value_pair->At(1).reg(); | |
| 3762 Register tmp = locs()->temp(0).reg(); | |
| 3763 Register out_reg = locs()->out(0).reg(); | |
| 3764 | |
| 3765 Label not_smi, done; | |
| 3766 __ SmiTag(out_reg, value_lo); | |
| 3767 __ SmiUntag(tmp, out_reg); | |
| 3768 __ bne(tmp, value_lo, ¬_smi); | |
| 3769 __ delay_slot()->sra(tmp, out_reg, 31); | |
| 3770 __ beq(tmp, value_hi, &done); | |
| 3771 | |
| 3772 __ Bind(¬_smi); | |
| 3773 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), | |
| 3774 out_reg, tmp); | |
| 3775 __ StoreToOffset(value_lo, out_reg, Mint::value_offset() - kHeapObjectTag); | |
| 3776 __ StoreToOffset(value_hi, out_reg, | |
| 3777 Mint::value_offset() - kHeapObjectTag + kWordSize); | |
| 3778 __ Bind(&done); | |
| 3779 } | |
| 3780 | |
| 3781 | |
| 3782 LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone, | |
| 3783 bool opt) const { | |
| 3784 ASSERT((representation() == kUnboxedInt32) || | |
| 3785 (representation() == kUnboxedUint32)); | |
| 3786 const intptr_t kNumInputs = 1; | |
| 3787 const intptr_t kNumTemps = 0; | |
| 3788 LocationSummary* summary = new (zone) | |
| 3789 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 3790 summary->set_in(0, Location::RequiresRegister()); | |
| 3791 summary->set_out(0, Location::RequiresRegister()); | |
| 3792 return summary; | |
| 3793 } | |
| 3794 | |
| 3795 | |
| 3796 static void LoadInt32FromMint(FlowGraphCompiler* compiler, | |
| 3797 Register mint, | |
| 3798 Register result, | |
| 3799 Label* deopt) { | |
| 3800 __ LoadFieldFromOffset(result, mint, Mint::value_offset()); | |
| 3801 if (deopt != NULL) { | |
| 3802 __ LoadFieldFromOffset(CMPRES1, mint, Mint::value_offset() + kWordSize); | |
| 3803 __ sra(CMPRES2, result, kBitsPerWord - 1); | |
| 3804 __ BranchNotEqual(CMPRES1, CMPRES2, deopt); | |
| 3805 } | |
| 3806 } | |
| 3807 | |
| 3808 | |
| 3809 void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3810 const intptr_t value_cid = value()->Type()->ToCid(); | |
| 3811 const Register value = locs()->in(0).reg(); | |
| 3812 const Register out = locs()->out(0).reg(); | |
| 3813 Label* deopt = | |
| 3814 CanDeoptimize() | |
| 3815 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger) | |
| 3816 : NULL; | |
| 3817 Label* out_of_range = !is_truncating() ? deopt : NULL; | |
| 3818 ASSERT(value != out); | |
| 3819 | |
| 3820 if (value_cid == kSmiCid) { | |
| 3821 __ SmiUntag(out, value); | |
| 3822 } else if (value_cid == kMintCid) { | |
| 3823 LoadInt32FromMint(compiler, value, out, out_of_range); | |
| 3824 } else if (!CanDeoptimize()) { | |
| 3825 Label done; | |
| 3826 __ SmiUntag(out, value); | |
| 3827 __ andi(CMPRES1, value, Immediate(kSmiTagMask)); | |
| 3828 __ beq(CMPRES1, ZR, &done); | |
| 3829 LoadInt32FromMint(compiler, value, out, NULL); | |
| 3830 __ Bind(&done); | |
| 3831 } else { | |
| 3832 Label done; | |
| 3833 __ SmiUntag(out, value); | |
| 3834 __ andi(CMPRES1, value, Immediate(kSmiTagMask)); | |
| 3835 __ beq(CMPRES1, ZR, &done); | |
| 3836 __ LoadClassId(CMPRES1, value); | |
| 3837 __ BranchNotEqual(CMPRES1, Immediate(kMintCid), deopt); | |
| 3838 LoadInt32FromMint(compiler, value, out, out_of_range); | |
| 3839 __ Bind(&done); | |
| 3840 } | |
| 3841 } | |
| 3842 | |
| 3843 | |
| 3844 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone, | |
| 3845 bool opt) const { | |
| 3846 const intptr_t kNumInputs = 2; | |
| 3847 const intptr_t kNumTemps = 0; | |
| 3848 LocationSummary* summary = new (zone) | |
| 3849 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 3850 summary->set_in(0, Location::RequiresFpuRegister()); | |
| 3851 summary->set_in(1, Location::RequiresFpuRegister()); | |
| 3852 summary->set_out(0, Location::RequiresFpuRegister()); | |
| 3853 return summary; | |
| 3854 } | |
| 3855 | |
| 3856 | |
| 3857 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3858 DRegister left = locs()->in(0).fpu_reg(); | |
| 3859 DRegister right = locs()->in(1).fpu_reg(); | |
| 3860 DRegister result = locs()->out(0).fpu_reg(); | |
| 3861 switch (op_kind()) { | |
| 3862 case Token::kADD: | |
| 3863 __ addd(result, left, right); | |
| 3864 break; | |
| 3865 case Token::kSUB: | |
| 3866 __ subd(result, left, right); | |
| 3867 break; | |
| 3868 case Token::kMUL: | |
| 3869 __ muld(result, left, right); | |
| 3870 break; | |
| 3871 case Token::kDIV: | |
| 3872 __ divd(result, left, right); | |
| 3873 break; | |
| 3874 default: | |
| 3875 UNREACHABLE(); | |
| 3876 } | |
| 3877 } | |
| 3878 | |
| 3879 | |
| 3880 LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone, | |
| 3881 bool opt) const { | |
| 3882 const intptr_t kNumInputs = 1; | |
| 3883 const intptr_t kNumTemps = 0; | |
| 3884 LocationSummary* summary = new (zone) | |
| 3885 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 3886 summary->set_in(0, Location::RequiresFpuRegister()); | |
| 3887 summary->set_out(0, Location::RequiresRegister()); | |
| 3888 return summary; | |
| 3889 } | |
| 3890 | |
| 3891 | |
| 3892 Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
| 3893 BranchLabels labels) { | |
| 3894 const DRegister value = locs()->in(0).fpu_reg(); | |
| 3895 const bool is_negated = kind() != Token::kEQ; | |
| 3896 if (op_kind() == MethodRecognizer::kDouble_getIsNaN) { | |
| 3897 __ cund(value, value); | |
| 3898 if (labels.fall_through == labels.true_label) { | |
| 3899 if (is_negated) { | |
| 3900 __ bc1t(labels.false_label); | |
| 3901 } else { | |
| 3902 __ bc1f(labels.false_label); | |
| 3903 } | |
| 3904 } else if (labels.fall_through == labels.false_label) { | |
| 3905 if (is_negated) { | |
| 3906 __ bc1f(labels.true_label); | |
| 3907 } else { | |
| 3908 __ bc1t(labels.true_label); | |
| 3909 } | |
| 3910 } else { | |
| 3911 if (is_negated) { | |
| 3912 __ bc1t(labels.false_label); | |
| 3913 } else { | |
| 3914 __ bc1f(labels.false_label); | |
| 3915 } | |
| 3916 __ b(labels.true_label); | |
| 3917 } | |
| 3918 return Condition(ZR, ZR, INVALID_RELATION); // Unused. | |
| 3919 } else { | |
| 3920 ASSERT(op_kind() == MethodRecognizer::kDouble_getIsInfinite); | |
| 3921 __ mfc1(CMPRES1, EvenFRegisterOf(value)); | |
| 3922 // If the low word isn't zero, then it isn't infinity. | |
| 3923 __ bne(CMPRES1, ZR, is_negated ? labels.true_label : labels.false_label); | |
| 3924 __ mfc1(CMPRES1, OddFRegisterOf(value)); | |
| 3925 // Mask off the sign bit. | |
| 3926 __ AndImmediate(CMPRES1, CMPRES1, 0x7FFFFFFF); | |
| 3927 // Compare with +infinity. | |
| 3928 __ LoadImmediate(CMPRES2, 0x7FF00000); | |
| 3929 return Condition(CMPRES1, CMPRES2, is_negated ? NE : EQ); | |
| 3930 } | |
| 3931 } | |
| 3932 | |
| 3933 LocationSummary* BinaryFloat32x4OpInstr::MakeLocationSummary(Zone* zone, | |
| 3934 bool opt) const { | |
| 3935 UNIMPLEMENTED(); | |
| 3936 return NULL; | |
| 3937 } | |
| 3938 | |
| 3939 | |
| 3940 void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3941 UNIMPLEMENTED(); | |
| 3942 } | |
| 3943 | |
| 3944 | |
| 3945 LocationSummary* BinaryFloat64x2OpInstr::MakeLocationSummary(Zone* zone, | |
| 3946 bool opt) const { | |
| 3947 UNIMPLEMENTED(); | |
| 3948 return NULL; | |
| 3949 } | |
| 3950 | |
| 3951 | |
| 3952 void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3953 UNIMPLEMENTED(); | |
| 3954 } | |
| 3955 | |
| 3956 | |
| 3957 LocationSummary* Simd32x4ShuffleInstr::MakeLocationSummary(Zone* zone, | |
| 3958 bool opt) const { | |
| 3959 UNIMPLEMENTED(); | |
| 3960 return NULL; | |
| 3961 } | |
| 3962 | |
| 3963 | |
| 3964 void Simd32x4ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3965 UNIMPLEMENTED(); | |
| 3966 } | |
| 3967 | |
| 3968 | |
| 3969 LocationSummary* Simd32x4ShuffleMixInstr::MakeLocationSummary(Zone* zone, | |
| 3970 bool opt) const { | |
| 3971 UNIMPLEMENTED(); | |
| 3972 return NULL; | |
| 3973 } | |
| 3974 | |
| 3975 | |
| 3976 void Simd32x4ShuffleMixInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3977 UNIMPLEMENTED(); | |
| 3978 } | |
| 3979 | |
| 3980 | |
| 3981 LocationSummary* Float32x4ConstructorInstr::MakeLocationSummary( | |
| 3982 Zone* zone, | |
| 3983 bool opt) const { | |
| 3984 UNIMPLEMENTED(); | |
| 3985 return NULL; | |
| 3986 } | |
| 3987 | |
| 3988 | |
| 3989 void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 3990 UNIMPLEMENTED(); | |
| 3991 } | |
| 3992 | |
| 3993 | |
| 3994 LocationSummary* Float32x4ZeroInstr::MakeLocationSummary(Zone* zone, | |
| 3995 bool opt) const { | |
| 3996 UNIMPLEMENTED(); | |
| 3997 return NULL; | |
| 3998 } | |
| 3999 | |
| 4000 | |
| 4001 void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4002 UNIMPLEMENTED(); | |
| 4003 } | |
| 4004 | |
| 4005 | |
| 4006 LocationSummary* Float32x4SplatInstr::MakeLocationSummary(Zone* zone, | |
| 4007 bool opt) const { | |
| 4008 UNIMPLEMENTED(); | |
| 4009 return NULL; | |
| 4010 } | |
| 4011 | |
| 4012 | |
| 4013 void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4014 UNIMPLEMENTED(); | |
| 4015 } | |
| 4016 | |
| 4017 | |
| 4018 LocationSummary* Float32x4ComparisonInstr::MakeLocationSummary(Zone* zone, | |
| 4019 bool opt) const { | |
| 4020 UNIMPLEMENTED(); | |
| 4021 return NULL; | |
| 4022 } | |
| 4023 | |
| 4024 | |
| 4025 void Float32x4ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4026 UNIMPLEMENTED(); | |
| 4027 } | |
| 4028 | |
| 4029 | |
| 4030 LocationSummary* Float32x4MinMaxInstr::MakeLocationSummary(Zone* zone, | |
| 4031 bool opt) const { | |
| 4032 UNIMPLEMENTED(); | |
| 4033 return NULL; | |
| 4034 } | |
| 4035 | |
| 4036 | |
| 4037 void Float32x4MinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4038 UNIMPLEMENTED(); | |
| 4039 } | |
| 4040 | |
| 4041 | |
| 4042 LocationSummary* Float32x4SqrtInstr::MakeLocationSummary(Zone* zone, | |
| 4043 bool opt) const { | |
| 4044 UNIMPLEMENTED(); | |
| 4045 return NULL; | |
| 4046 } | |
| 4047 | |
| 4048 | |
| 4049 void Float32x4SqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4050 UNIMPLEMENTED(); | |
| 4051 } | |
| 4052 | |
| 4053 | |
| 4054 LocationSummary* Float32x4ScaleInstr::MakeLocationSummary(Zone* zone, | |
| 4055 bool opt) const { | |
| 4056 UNIMPLEMENTED(); | |
| 4057 return NULL; | |
| 4058 } | |
| 4059 | |
| 4060 | |
| 4061 void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4062 UNIMPLEMENTED(); | |
| 4063 } | |
| 4064 | |
| 4065 | |
| 4066 LocationSummary* Float32x4ZeroArgInstr::MakeLocationSummary(Zone* zone, | |
| 4067 bool opt) const { | |
| 4068 UNIMPLEMENTED(); | |
| 4069 return NULL; | |
| 4070 } | |
| 4071 | |
| 4072 | |
| 4073 void Float32x4ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4074 UNIMPLEMENTED(); | |
| 4075 } | |
| 4076 | |
| 4077 | |
| 4078 LocationSummary* Float32x4ClampInstr::MakeLocationSummary(Zone* zone, | |
| 4079 bool opt) const { | |
| 4080 UNIMPLEMENTED(); | |
| 4081 return NULL; | |
| 4082 } | |
| 4083 | |
| 4084 | |
| 4085 void Float32x4ClampInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4086 UNIMPLEMENTED(); | |
| 4087 } | |
| 4088 | |
| 4089 | |
| 4090 LocationSummary* Float32x4WithInstr::MakeLocationSummary(Zone* zone, | |
| 4091 bool opt) const { | |
| 4092 UNIMPLEMENTED(); | |
| 4093 return NULL; | |
| 4094 } | |
| 4095 | |
| 4096 | |
| 4097 void Float32x4WithInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4098 UNIMPLEMENTED(); | |
| 4099 } | |
| 4100 | |
| 4101 | |
| 4102 LocationSummary* Float32x4ToInt32x4Instr::MakeLocationSummary(Zone* zone, | |
| 4103 bool opt) const { | |
| 4104 UNIMPLEMENTED(); | |
| 4105 return NULL; | |
| 4106 } | |
| 4107 | |
| 4108 | |
| 4109 void Float32x4ToInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4110 UNIMPLEMENTED(); | |
| 4111 } | |
| 4112 | |
| 4113 | |
| 4114 LocationSummary* Simd64x2ShuffleInstr::MakeLocationSummary(Zone* zone, | |
| 4115 bool opt) const { | |
| 4116 UNIMPLEMENTED(); | |
| 4117 return NULL; | |
| 4118 } | |
| 4119 | |
| 4120 | |
| 4121 void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4122 UNIMPLEMENTED(); | |
| 4123 } | |
| 4124 | |
| 4125 | |
| 4126 LocationSummary* Float64x2ZeroInstr::MakeLocationSummary(Zone* zone, | |
| 4127 bool opt) const { | |
| 4128 UNIMPLEMENTED(); | |
| 4129 return NULL; | |
| 4130 } | |
| 4131 | |
| 4132 | |
| 4133 void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4134 UNIMPLEMENTED(); | |
| 4135 } | |
| 4136 | |
| 4137 | |
| 4138 LocationSummary* Float64x2SplatInstr::MakeLocationSummary(Zone* zone, | |
| 4139 bool opt) const { | |
| 4140 UNIMPLEMENTED(); | |
| 4141 return NULL; | |
| 4142 } | |
| 4143 | |
| 4144 | |
| 4145 void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4146 UNIMPLEMENTED(); | |
| 4147 } | |
| 4148 | |
| 4149 | |
| 4150 LocationSummary* Float64x2ConstructorInstr::MakeLocationSummary( | |
| 4151 Zone* zone, | |
| 4152 bool opt) const { | |
| 4153 UNIMPLEMENTED(); | |
| 4154 return NULL; | |
| 4155 } | |
| 4156 | |
| 4157 | |
| 4158 void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4159 UNIMPLEMENTED(); | |
| 4160 } | |
| 4161 | |
| 4162 | |
| 4163 LocationSummary* Float64x2ToFloat32x4Instr::MakeLocationSummary( | |
| 4164 Zone* zone, | |
| 4165 bool opt) const { | |
| 4166 UNIMPLEMENTED(); | |
| 4167 return NULL; | |
| 4168 } | |
| 4169 | |
| 4170 | |
| 4171 void Float64x2ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4172 UNIMPLEMENTED(); | |
| 4173 } | |
| 4174 | |
| 4175 | |
| 4176 LocationSummary* Float32x4ToFloat64x2Instr::MakeLocationSummary( | |
| 4177 Zone* zone, | |
| 4178 bool opt) const { | |
| 4179 UNIMPLEMENTED(); | |
| 4180 return NULL; | |
| 4181 } | |
| 4182 | |
| 4183 | |
| 4184 void Float32x4ToFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4185 UNIMPLEMENTED(); | |
| 4186 } | |
| 4187 | |
| 4188 | |
| 4189 LocationSummary* Float64x2ZeroArgInstr::MakeLocationSummary(Zone* zone, | |
| 4190 bool opt) const { | |
| 4191 UNIMPLEMENTED(); | |
| 4192 return NULL; | |
| 4193 } | |
| 4194 | |
| 4195 | |
| 4196 void Float64x2ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4197 UNIMPLEMENTED(); | |
| 4198 } | |
| 4199 | |
| 4200 | |
| 4201 LocationSummary* Float64x2OneArgInstr::MakeLocationSummary(Zone* zone, | |
| 4202 bool opt) const { | |
| 4203 UNIMPLEMENTED(); | |
| 4204 return NULL; | |
| 4205 } | |
| 4206 | |
| 4207 | |
| 4208 void Float64x2OneArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4209 UNIMPLEMENTED(); | |
| 4210 } | |
| 4211 | |
| 4212 | |
| 4213 LocationSummary* Int32x4ConstructorInstr::MakeLocationSummary(Zone* zone, | |
| 4214 bool opt) const { | |
| 4215 UNIMPLEMENTED(); | |
| 4216 return NULL; | |
| 4217 } | |
| 4218 | |
| 4219 | |
| 4220 void Int32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4221 UNIMPLEMENTED(); | |
| 4222 } | |
| 4223 | |
| 4224 | |
| 4225 LocationSummary* Int32x4BoolConstructorInstr::MakeLocationSummary( | |
| 4226 Zone* zone, | |
| 4227 bool opt) const { | |
| 4228 UNIMPLEMENTED(); | |
| 4229 return NULL; | |
| 4230 } | |
| 4231 | |
| 4232 | |
| 4233 void Int32x4BoolConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4234 UNIMPLEMENTED(); | |
| 4235 } | |
| 4236 | |
| 4237 | |
| 4238 LocationSummary* Int32x4GetFlagInstr::MakeLocationSummary(Zone* zone, | |
| 4239 bool opt) const { | |
| 4240 UNIMPLEMENTED(); | |
| 4241 return NULL; | |
| 4242 } | |
| 4243 | |
| 4244 | |
| 4245 void Int32x4GetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4246 UNIMPLEMENTED(); | |
| 4247 } | |
| 4248 | |
| 4249 | |
| 4250 LocationSummary* Simd32x4GetSignMaskInstr::MakeLocationSummary(Zone* zone, | |
| 4251 bool opt) const { | |
| 4252 UNIMPLEMENTED(); | |
| 4253 return NULL; | |
| 4254 } | |
| 4255 | |
| 4256 | |
| 4257 void Simd32x4GetSignMaskInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4258 UNIMPLEMENTED(); | |
| 4259 } | |
| 4260 | |
| 4261 | |
| 4262 LocationSummary* Int32x4SelectInstr::MakeLocationSummary(Zone* zone, | |
| 4263 bool opt) const { | |
| 4264 UNIMPLEMENTED(); | |
| 4265 return NULL; | |
| 4266 } | |
| 4267 | |
| 4268 | |
| 4269 void Int32x4SelectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4270 UNIMPLEMENTED(); | |
| 4271 } | |
| 4272 | |
| 4273 | |
| 4274 LocationSummary* Int32x4SetFlagInstr::MakeLocationSummary(Zone* zone, | |
| 4275 bool opt) const { | |
| 4276 UNIMPLEMENTED(); | |
| 4277 return NULL; | |
| 4278 } | |
| 4279 | |
| 4280 | |
| 4281 void Int32x4SetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4282 UNIMPLEMENTED(); | |
| 4283 } | |
| 4284 | |
| 4285 | |
| 4286 LocationSummary* Int32x4ToFloat32x4Instr::MakeLocationSummary(Zone* zone, | |
| 4287 bool opt) const { | |
| 4288 UNIMPLEMENTED(); | |
| 4289 return NULL; | |
| 4290 } | |
| 4291 | |
| 4292 | |
| 4293 void Int32x4ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4294 UNIMPLEMENTED(); | |
| 4295 } | |
| 4296 | |
| 4297 | |
| 4298 LocationSummary* BinaryInt32x4OpInstr::MakeLocationSummary(Zone* zone, | |
| 4299 bool opt) const { | |
| 4300 UNIMPLEMENTED(); | |
| 4301 return NULL; | |
| 4302 } | |
| 4303 | |
| 4304 | |
| 4305 void BinaryInt32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4306 UNIMPLEMENTED(); | |
| 4307 } | |
| 4308 | |
| 4309 | |
| 4310 LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone, | |
| 4311 bool opt) const { | |
| 4312 ASSERT((kind() == MathUnaryInstr::kSqrt) || | |
| 4313 (kind() == MathUnaryInstr::kDoubleSquare)); | |
| 4314 const intptr_t kNumInputs = 1; | |
| 4315 const intptr_t kNumTemps = 0; | |
| 4316 LocationSummary* summary = new (zone) | |
| 4317 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4318 summary->set_in(0, Location::RequiresFpuRegister()); | |
| 4319 summary->set_out(0, Location::RequiresFpuRegister()); | |
| 4320 return summary; | |
| 4321 } | |
| 4322 | |
| 4323 | |
| 4324 void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4325 if (kind() == MathUnaryInstr::kSqrt) { | |
| 4326 __ sqrtd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg()); | |
| 4327 } else if (kind() == MathUnaryInstr::kDoubleSquare) { | |
| 4328 DRegister val = locs()->in(0).fpu_reg(); | |
| 4329 DRegister result = locs()->out(0).fpu_reg(); | |
| 4330 __ muld(result, val, val); | |
| 4331 } else { | |
| 4332 UNREACHABLE(); | |
| 4333 } | |
| 4334 } | |
| 4335 | |
| 4336 | |
| 4337 LocationSummary* CaseInsensitiveCompareUC16Instr::MakeLocationSummary( | |
| 4338 Zone* zone, | |
| 4339 bool opt) const { | |
| 4340 const intptr_t kNumTemps = 0; | |
| 4341 LocationSummary* summary = new (zone) | |
| 4342 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); | |
| 4343 summary->set_in(0, Location::RegisterLocation(A0)); | |
| 4344 summary->set_in(1, Location::RegisterLocation(A1)); | |
| 4345 summary->set_in(2, Location::RegisterLocation(A2)); | |
| 4346 summary->set_in(3, Location::RegisterLocation(A3)); | |
| 4347 summary->set_out(0, Location::RegisterLocation(V0)); | |
| 4348 return summary; | |
| 4349 } | |
| 4350 | |
| 4351 | |
| 4352 void CaseInsensitiveCompareUC16Instr::EmitNativeCode( | |
| 4353 FlowGraphCompiler* compiler) { | |
| 4354 // Call the function. | |
| 4355 __ CallRuntime(TargetFunction(), TargetFunction().argument_count()); | |
| 4356 } | |
| 4357 | |
| 4358 | |
| 4359 LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone, | |
| 4360 bool opt) const { | |
| 4361 if (result_cid() == kDoubleCid) { | |
| 4362 const intptr_t kNumInputs = 2; | |
| 4363 const intptr_t kNumTemps = 1; | |
| 4364 LocationSummary* summary = new (zone) | |
| 4365 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4366 summary->set_in(0, Location::RequiresFpuRegister()); | |
| 4367 summary->set_in(1, Location::RequiresFpuRegister()); | |
| 4368 // Reuse the left register so that code can be made shorter. | |
| 4369 summary->set_out(0, Location::SameAsFirstInput()); | |
| 4370 summary->set_temp(0, Location::RequiresRegister()); | |
| 4371 return summary; | |
| 4372 } | |
| 4373 ASSERT(result_cid() == kSmiCid); | |
| 4374 const intptr_t kNumInputs = 2; | |
| 4375 const intptr_t kNumTemps = 0; | |
| 4376 LocationSummary* summary = new (zone) | |
| 4377 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4378 summary->set_in(0, Location::RequiresRegister()); | |
| 4379 summary->set_in(1, Location::RequiresRegister()); | |
| 4380 // Reuse the left register so that code can be made shorter. | |
| 4381 summary->set_out(0, Location::SameAsFirstInput()); | |
| 4382 return summary; | |
| 4383 } | |
| 4384 | |
| 4385 | |
| 4386 void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4387 ASSERT((op_kind() == MethodRecognizer::kMathMin) || | |
| 4388 (op_kind() == MethodRecognizer::kMathMax)); | |
| 4389 const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin); | |
| 4390 if (result_cid() == kDoubleCid) { | |
| 4391 Label done, returns_nan, are_equal; | |
| 4392 DRegister left = locs()->in(0).fpu_reg(); | |
| 4393 DRegister right = locs()->in(1).fpu_reg(); | |
| 4394 DRegister result = locs()->out(0).fpu_reg(); | |
| 4395 Register temp = locs()->temp(0).reg(); | |
| 4396 __ cund(left, right); | |
| 4397 __ bc1t(&returns_nan); | |
| 4398 __ ceqd(left, right); | |
| 4399 __ bc1t(&are_equal); | |
| 4400 if (is_min) { | |
| 4401 __ coltd(left, right); | |
| 4402 } else { | |
| 4403 __ coltd(right, left); | |
| 4404 } | |
| 4405 // TODO(zra): Add conditional moves. | |
| 4406 ASSERT(left == result); | |
| 4407 __ bc1t(&done); | |
| 4408 __ movd(result, right); | |
| 4409 __ b(&done); | |
| 4410 | |
| 4411 __ Bind(&returns_nan); | |
| 4412 __ LoadImmediate(result, NAN); | |
| 4413 __ b(&done); | |
| 4414 | |
| 4415 __ Bind(&are_equal); | |
| 4416 Label left_is_negative; | |
| 4417 // Check for negative zero: -0.0 is equal 0.0 but min or max must return | |
| 4418 // -0.0 or 0.0 respectively. | |
| 4419 // Check for negative left value (get the sign bit): | |
| 4420 // - min -> left is negative ? left : right. | |
| 4421 // - max -> left is negative ? right : left | |
| 4422 // Check the sign bit. | |
| 4423 __ mfc1(temp, OddFRegisterOf(left)); // Moves bits 32...63 of left to temp. | |
| 4424 if (is_min) { | |
| 4425 ASSERT(left == result); | |
| 4426 __ bltz(temp, &done); // Left is negative. | |
| 4427 } else { | |
| 4428 __ bgez(temp, &done); // Left is positive. | |
| 4429 } | |
| 4430 __ movd(result, right); | |
| 4431 __ Bind(&done); | |
| 4432 return; | |
| 4433 } | |
| 4434 | |
| 4435 Label done; | |
| 4436 ASSERT(result_cid() == kSmiCid); | |
| 4437 Register left = locs()->in(0).reg(); | |
| 4438 Register right = locs()->in(1).reg(); | |
| 4439 Register result = locs()->out(0).reg(); | |
| 4440 ASSERT(result == left); | |
| 4441 if (is_min) { | |
| 4442 __ BranchSignedLessEqual(left, right, &done); | |
| 4443 } else { | |
| 4444 __ BranchSignedGreaterEqual(left, right, &done); | |
| 4445 } | |
| 4446 __ mov(result, right); | |
| 4447 __ Bind(&done); | |
| 4448 } | |
| 4449 | |
| 4450 | |
| 4451 LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone, | |
| 4452 bool opt) const { | |
| 4453 const intptr_t kNumInputs = 1; | |
| 4454 const intptr_t kNumTemps = 0; | |
| 4455 LocationSummary* summary = new (zone) | |
| 4456 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4457 summary->set_in(0, Location::RequiresRegister()); | |
| 4458 // We make use of 3-operand instructions by not requiring result register | |
| 4459 // to be identical to first input register as on Intel. | |
| 4460 summary->set_out(0, Location::RequiresRegister()); | |
| 4461 return summary; | |
| 4462 } | |
| 4463 | |
| 4464 | |
| 4465 void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4466 Register value = locs()->in(0).reg(); | |
| 4467 Register result = locs()->out(0).reg(); | |
| 4468 switch (op_kind()) { | |
| 4469 case Token::kNEGATE: { | |
| 4470 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp); | |
| 4471 __ SubuDetectOverflow(result, ZR, value, CMPRES1); | |
| 4472 __ bltz(CMPRES1, deopt); | |
| 4473 break; | |
| 4474 } | |
| 4475 case Token::kBIT_NOT: | |
| 4476 __ nor(result, value, ZR); | |
| 4477 __ addiu(result, result, Immediate(-1)); // Remove inverted smi-tag. | |
| 4478 break; | |
| 4479 default: | |
| 4480 UNREACHABLE(); | |
| 4481 } | |
| 4482 } | |
| 4483 | |
| 4484 | |
| 4485 LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone, | |
| 4486 bool opt) const { | |
| 4487 const intptr_t kNumInputs = 1; | |
| 4488 const intptr_t kNumTemps = 0; | |
| 4489 LocationSummary* summary = new (zone) | |
| 4490 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4491 summary->set_in(0, Location::RequiresFpuRegister()); | |
| 4492 summary->set_out(0, Location::RequiresFpuRegister()); | |
| 4493 return summary; | |
| 4494 } | |
| 4495 | |
| 4496 | |
| 4497 void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4498 FpuRegister result = locs()->out(0).fpu_reg(); | |
| 4499 FpuRegister value = locs()->in(0).fpu_reg(); | |
| 4500 __ negd(result, value); | |
| 4501 } | |
| 4502 | |
| 4503 | |
| 4504 LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone, | |
| 4505 bool opt) const { | |
| 4506 const intptr_t kNumInputs = 1; | |
| 4507 const intptr_t kNumTemps = 0; | |
| 4508 LocationSummary* result = new (zone) | |
| 4509 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4510 result->set_in(0, Location::RequiresRegister()); | |
| 4511 result->set_out(0, Location::RequiresFpuRegister()); | |
| 4512 return result; | |
| 4513 } | |
| 4514 | |
| 4515 | |
| 4516 void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4517 Register value = locs()->in(0).reg(); | |
| 4518 FpuRegister result = locs()->out(0).fpu_reg(); | |
| 4519 __ mtc1(value, STMP1); | |
| 4520 __ cvtdw(result, STMP1); | |
| 4521 } | |
| 4522 | |
| 4523 | |
| 4524 LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone, | |
| 4525 bool opt) const { | |
| 4526 const intptr_t kNumInputs = 1; | |
| 4527 const intptr_t kNumTemps = 0; | |
| 4528 LocationSummary* result = new (zone) | |
| 4529 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4530 result->set_in(0, Location::RequiresRegister()); | |
| 4531 result->set_out(0, Location::RequiresFpuRegister()); | |
| 4532 return result; | |
| 4533 } | |
| 4534 | |
| 4535 | |
| 4536 void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4537 Register value = locs()->in(0).reg(); | |
| 4538 FpuRegister result = locs()->out(0).fpu_reg(); | |
| 4539 __ SmiUntag(TMP, value); | |
| 4540 __ mtc1(TMP, STMP1); | |
| 4541 __ cvtdw(result, STMP1); | |
| 4542 } | |
| 4543 | |
| 4544 | |
| 4545 LocationSummary* MintToDoubleInstr::MakeLocationSummary(Zone* zone, | |
| 4546 bool opt) const { | |
| 4547 UNIMPLEMENTED(); | |
| 4548 return NULL; | |
| 4549 } | |
| 4550 | |
| 4551 | |
| 4552 void MintToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4553 UNIMPLEMENTED(); | |
| 4554 } | |
| 4555 | |
| 4556 | |
| 4557 LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone, | |
| 4558 bool opt) const { | |
| 4559 const intptr_t kNumInputs = 1; | |
| 4560 const intptr_t kNumTemps = 0; | |
| 4561 LocationSummary* result = new (zone) | |
| 4562 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 4563 result->set_in(0, Location::RegisterLocation(T1)); | |
| 4564 result->set_out(0, Location::RegisterLocation(V0)); | |
| 4565 return result; | |
| 4566 } | |
| 4567 | |
| 4568 | |
| 4569 void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4570 Register result = locs()->out(0).reg(); | |
| 4571 Register value_obj = locs()->in(0).reg(); | |
| 4572 ASSERT(result == V0); | |
| 4573 ASSERT(result != value_obj); | |
| 4574 __ LoadDFromOffset(DTMP, value_obj, Double::value_offset() - kHeapObjectTag); | |
| 4575 __ truncwd(STMP1, DTMP); | |
| 4576 __ mfc1(result, STMP1); | |
| 4577 | |
| 4578 // Overflow is signaled with minint. | |
| 4579 Label do_call, done; | |
| 4580 // Check for overflow and that it fits into Smi. | |
| 4581 __ LoadImmediate(TMP, 0xC0000000); | |
| 4582 __ subu(CMPRES1, result, TMP); | |
| 4583 __ bltz(CMPRES1, &do_call); | |
| 4584 __ SmiTag(result); | |
| 4585 __ b(&done); | |
| 4586 __ Bind(&do_call); | |
| 4587 __ Push(value_obj); | |
| 4588 ASSERT(instance_call()->HasICData()); | |
| 4589 const ICData& ic_data = *instance_call()->ic_data(); | |
| 4590 ASSERT(ic_data.NumberOfChecksIs(1)); | |
| 4591 const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0)); | |
| 4592 const int kTypeArgsLen = 0; | |
| 4593 const int kNumberOfArguments = 1; | |
| 4594 const Array& kNoArgumentNames = Object::null_array(); | |
| 4595 ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kNoArgumentNames); | |
| 4596 compiler->GenerateStaticCall(deopt_id(), instance_call()->token_pos(), target, | |
| 4597 args_info, locs(), ICData::Handle()); | |
| 4598 __ Bind(&done); | |
| 4599 } | |
| 4600 | |
| 4601 | |
| 4602 LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone, | |
| 4603 bool opt) const { | |
| 4604 const intptr_t kNumInputs = 1; | |
| 4605 const intptr_t kNumTemps = 0; | |
| 4606 LocationSummary* result = new (zone) | |
| 4607 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4608 result->set_in(0, Location::RequiresFpuRegister()); | |
| 4609 result->set_out(0, Location::RequiresRegister()); | |
| 4610 return result; | |
| 4611 } | |
| 4612 | |
| 4613 | |
| 4614 void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4615 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi); | |
| 4616 Register result = locs()->out(0).reg(); | |
| 4617 DRegister value = locs()->in(0).fpu_reg(); | |
| 4618 __ truncwd(STMP1, value); | |
| 4619 __ mfc1(result, STMP1); | |
| 4620 | |
| 4621 // Check for overflow and that it fits into Smi. | |
| 4622 __ LoadImmediate(TMP, 0xC0000000); | |
| 4623 __ subu(CMPRES1, result, TMP); | |
| 4624 __ bltz(CMPRES1, deopt); | |
| 4625 __ SmiTag(result); | |
| 4626 } | |
| 4627 | |
| 4628 | |
| 4629 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone, | |
| 4630 bool opt) const { | |
| 4631 UNIMPLEMENTED(); | |
| 4632 return NULL; | |
| 4633 } | |
| 4634 | |
| 4635 | |
| 4636 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4637 UNIMPLEMENTED(); | |
| 4638 } | |
| 4639 | |
| 4640 | |
| 4641 LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone, | |
| 4642 bool opt) const { | |
| 4643 const intptr_t kNumInputs = 1; | |
| 4644 const intptr_t kNumTemps = 0; | |
| 4645 LocationSummary* result = new (zone) | |
| 4646 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4647 result->set_in(0, Location::RequiresFpuRegister()); | |
| 4648 result->set_out(0, Location::SameAsFirstInput()); | |
| 4649 return result; | |
| 4650 } | |
| 4651 | |
| 4652 | |
| 4653 void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4654 DRegister value = locs()->in(0).fpu_reg(); | |
| 4655 FRegister result = EvenFRegisterOf(locs()->out(0).fpu_reg()); | |
| 4656 __ cvtsd(result, value); | |
| 4657 } | |
| 4658 | |
| 4659 | |
| 4660 LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone, | |
| 4661 bool opt) const { | |
| 4662 const intptr_t kNumInputs = 1; | |
| 4663 const intptr_t kNumTemps = 0; | |
| 4664 LocationSummary* result = new (zone) | |
| 4665 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4666 result->set_in(0, Location::RequiresFpuRegister()); | |
| 4667 result->set_out(0, Location::SameAsFirstInput()); | |
| 4668 return result; | |
| 4669 } | |
| 4670 | |
| 4671 | |
| 4672 void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4673 FRegister value = EvenFRegisterOf(locs()->in(0).fpu_reg()); | |
| 4674 DRegister result = locs()->out(0).fpu_reg(); | |
| 4675 __ cvtds(result, value); | |
| 4676 } | |
| 4677 | |
| 4678 | |
| 4679 LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone, | |
| 4680 bool opt) const { | |
| 4681 // Calling convention on MIPS uses D6 and D7 to pass the first two | |
| 4682 // double arguments. | |
| 4683 ASSERT((InputCount() == 1) || (InputCount() == 2)); | |
| 4684 const intptr_t kNumTemps = 0; | |
| 4685 LocationSummary* result = new (zone) | |
| 4686 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall); | |
| 4687 result->set_in(0, Location::FpuRegisterLocation(D6)); | |
| 4688 if (InputCount() == 2) { | |
| 4689 result->set_in(1, Location::FpuRegisterLocation(D7)); | |
| 4690 } | |
| 4691 result->set_out(0, Location::FpuRegisterLocation(D0)); | |
| 4692 return result; | |
| 4693 } | |
| 4694 | |
| 4695 | |
| 4696 // Pseudo code: | |
| 4697 // if (exponent == 0.0) return 1.0; | |
| 4698 // // Speed up simple cases. | |
| 4699 // if (exponent == 1.0) return base; | |
| 4700 // if (exponent == 2.0) return base * base; | |
| 4701 // if (exponent == 3.0) return base * base * base; | |
| 4702 // if (base == 1.0) return 1.0; | |
| 4703 // if (base.isNaN || exponent.isNaN) { | |
| 4704 // return double.NAN; | |
| 4705 // } | |
| 4706 // if (base != -Infinity && exponent == 0.5) { | |
| 4707 // if (base == 0.0) return 0.0; | |
| 4708 // return sqrt(value); | |
| 4709 // } | |
| 4710 // TODO(srdjan): Move into a stub? | |
| 4711 static void InvokeDoublePow(FlowGraphCompiler* compiler, | |
| 4712 InvokeMathCFunctionInstr* instr) { | |
| 4713 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow); | |
| 4714 const intptr_t kInputCount = 2; | |
| 4715 ASSERT(instr->InputCount() == kInputCount); | |
| 4716 LocationSummary* locs = instr->locs(); | |
| 4717 | |
| 4718 DRegister base = locs->in(0).fpu_reg(); | |
| 4719 DRegister exp = locs->in(1).fpu_reg(); | |
| 4720 DRegister result = locs->out(0).fpu_reg(); | |
| 4721 | |
| 4722 Label check_base, skip_call; | |
| 4723 __ LoadImmediate(DTMP, 0.0); | |
| 4724 __ LoadImmediate(result, 1.0); | |
| 4725 // exponent == 0.0 -> return 1.0; | |
| 4726 __ cund(exp, exp); | |
| 4727 __ bc1t(&check_base); // NaN -> check base. | |
| 4728 __ ceqd(exp, DTMP); | |
| 4729 __ bc1t(&skip_call); // exp is 0.0, result is 1.0. | |
| 4730 | |
| 4731 // exponent == 1.0 ? | |
| 4732 __ ceqd(exp, result); | |
| 4733 Label return_base; | |
| 4734 __ bc1t(&return_base); | |
| 4735 // exponent == 2.0 ? | |
| 4736 __ LoadImmediate(DTMP, 2.0); | |
| 4737 __ ceqd(exp, DTMP); | |
| 4738 Label return_base_times_2; | |
| 4739 __ bc1t(&return_base_times_2); | |
| 4740 // exponent == 3.0 ? | |
| 4741 __ LoadImmediate(DTMP, 3.0); | |
| 4742 __ ceqd(exp, DTMP); | |
| 4743 __ bc1f(&check_base); | |
| 4744 | |
| 4745 // base_times_3. | |
| 4746 __ muld(result, base, base); | |
| 4747 __ muld(result, result, base); | |
| 4748 __ b(&skip_call); | |
| 4749 | |
| 4750 __ Bind(&return_base); | |
| 4751 __ movd(result, base); | |
| 4752 __ b(&skip_call); | |
| 4753 | |
| 4754 __ Bind(&return_base_times_2); | |
| 4755 __ muld(result, base, base); | |
| 4756 __ b(&skip_call); | |
| 4757 | |
| 4758 __ Bind(&check_base); | |
| 4759 // Note: 'exp' could be NaN. | |
| 4760 // base == 1.0 -> return 1.0; | |
| 4761 __ cund(base, base); | |
| 4762 Label return_nan; | |
| 4763 __ bc1t(&return_nan); | |
| 4764 __ ceqd(base, result); | |
| 4765 __ bc1t(&skip_call); // base and result are 1.0. | |
| 4766 | |
| 4767 __ cund(exp, exp); | |
| 4768 Label try_sqrt; | |
| 4769 __ bc1f(&try_sqrt); // Neither 'exp' nor 'base' are NaN. | |
| 4770 | |
| 4771 __ Bind(&return_nan); | |
| 4772 __ LoadImmediate(result, NAN); | |
| 4773 __ b(&skip_call); | |
| 4774 | |
| 4775 __ Bind(&try_sqrt); | |
| 4776 // Before calling pow, check if we could use sqrt instead of pow. | |
| 4777 __ LoadImmediate(result, kNegInfinity); | |
| 4778 // base == -Infinity -> call pow; | |
| 4779 __ ceqd(base, result); | |
| 4780 Label do_pow; | |
| 4781 __ bc1t(&do_pow); | |
| 4782 | |
| 4783 // exponent == 0.5 ? | |
| 4784 __ LoadImmediate(result, 0.5); | |
| 4785 __ ceqd(exp, result); | |
| 4786 __ bc1f(&do_pow); | |
| 4787 | |
| 4788 // base == 0 -> return 0; | |
| 4789 __ LoadImmediate(DTMP, 0.0); | |
| 4790 __ ceqd(base, DTMP); | |
| 4791 Label return_zero; | |
| 4792 __ bc1t(&return_zero); | |
| 4793 | |
| 4794 __ sqrtd(result, base); | |
| 4795 __ b(&skip_call); | |
| 4796 | |
| 4797 __ Bind(&return_zero); | |
| 4798 __ movd(result, DTMP); | |
| 4799 __ b(&skip_call); | |
| 4800 | |
| 4801 __ Bind(&do_pow); | |
| 4802 | |
| 4803 // double values are passed and returned in vfp registers. | |
| 4804 __ CallRuntime(instr->TargetFunction(), kInputCount); | |
| 4805 __ Bind(&skip_call); | |
| 4806 } | |
| 4807 | |
| 4808 | |
| 4809 void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4810 // For pow-function return NaN if exponent is NaN. | |
| 4811 if (recognized_kind() == MethodRecognizer::kMathDoublePow) { | |
| 4812 InvokeDoublePow(compiler, this); | |
| 4813 return; | |
| 4814 } | |
| 4815 // double values are passed and returned in vfp registers. | |
| 4816 __ CallRuntime(TargetFunction(), InputCount()); | |
| 4817 } | |
| 4818 | |
| 4819 | |
| 4820 LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone, | |
| 4821 bool opt) const { | |
| 4822 // Only use this instruction in optimized code. | |
| 4823 ASSERT(opt); | |
| 4824 const intptr_t kNumInputs = 1; | |
| 4825 LocationSummary* summary = | |
| 4826 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall); | |
| 4827 if (representation() == kUnboxedDouble) { | |
| 4828 if (index() == 0) { | |
| 4829 summary->set_in( | |
| 4830 0, Location::Pair(Location::RequiresFpuRegister(), Location::Any())); | |
| 4831 } else { | |
| 4832 ASSERT(index() == 1); | |
| 4833 summary->set_in( | |
| 4834 0, Location::Pair(Location::Any(), Location::RequiresFpuRegister())); | |
| 4835 } | |
| 4836 summary->set_out(0, Location::RequiresFpuRegister()); | |
| 4837 } else { | |
| 4838 ASSERT(representation() == kTagged); | |
| 4839 if (index() == 0) { | |
| 4840 summary->set_in( | |
| 4841 0, Location::Pair(Location::RequiresRegister(), Location::Any())); | |
| 4842 } else { | |
| 4843 ASSERT(index() == 1); | |
| 4844 summary->set_in( | |
| 4845 0, Location::Pair(Location::Any(), Location::RequiresRegister())); | |
| 4846 } | |
| 4847 summary->set_out(0, Location::RequiresRegister()); | |
| 4848 } | |
| 4849 return summary; | |
| 4850 } | |
| 4851 | |
| 4852 | |
| 4853 void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4854 ASSERT(locs()->in(0).IsPairLocation()); | |
| 4855 PairLocation* pair = locs()->in(0).AsPairLocation(); | |
| 4856 Location in_loc = pair->At(index()); | |
| 4857 if (representation() == kUnboxedDouble) { | |
| 4858 DRegister out = locs()->out(0).fpu_reg(); | |
| 4859 DRegister in = in_loc.fpu_reg(); | |
| 4860 __ movd(out, in); | |
| 4861 } else { | |
| 4862 ASSERT(representation() == kTagged); | |
| 4863 Register out = locs()->out(0).reg(); | |
| 4864 Register in = in_loc.reg(); | |
| 4865 __ mov(out, in); | |
| 4866 } | |
| 4867 } | |
| 4868 | |
| 4869 | |
| 4870 LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone, | |
| 4871 bool opt) const { | |
| 4872 const intptr_t kNumInputs = 2; | |
| 4873 const intptr_t kNumTemps = 1; | |
| 4874 LocationSummary* summary = new (zone) | |
| 4875 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4876 summary->set_in(0, Location::RequiresRegister()); | |
| 4877 summary->set_in(1, Location::RequiresRegister()); | |
| 4878 summary->set_temp(0, Location::RequiresRegister()); | |
| 4879 // Output is a pair of registers. | |
| 4880 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
| 4881 Location::RequiresRegister())); | |
| 4882 return summary; | |
| 4883 } | |
| 4884 | |
| 4885 | |
| 4886 void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4887 ASSERT(CanDeoptimize()); | |
| 4888 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); | |
| 4889 Register left = locs()->in(0).reg(); | |
| 4890 Register right = locs()->in(1).reg(); | |
| 4891 Register temp = locs()->temp(0).reg(); | |
| 4892 ASSERT(locs()->out(0).IsPairLocation()); | |
| 4893 PairLocation* pair = locs()->out(0).AsPairLocation(); | |
| 4894 Register result_div = pair->At(0).reg(); | |
| 4895 Register result_mod = pair->At(1).reg(); | |
| 4896 if (RangeUtils::CanBeZero(divisor_range())) { | |
| 4897 // Handle divide by zero in runtime. | |
| 4898 __ beq(right, ZR, deopt); | |
| 4899 } | |
| 4900 __ SmiUntag(temp, left); | |
| 4901 __ SmiUntag(TMP, right); | |
| 4902 __ div(temp, TMP); | |
| 4903 __ mflo(result_div); | |
| 4904 __ mfhi(result_mod); | |
| 4905 // Check the corner case of dividing the 'MIN_SMI' with -1, in which | |
| 4906 // case we cannot tag the result. | |
| 4907 __ BranchEqual(result_div, Immediate(0x40000000), deopt); | |
| 4908 // res = left % right; | |
| 4909 // if (res < 0) { | |
| 4910 // if (right < 0) { | |
| 4911 // res = res - right; | |
| 4912 // } else { | |
| 4913 // res = res + right; | |
| 4914 // } | |
| 4915 // } | |
| 4916 Label done; | |
| 4917 __ bgez(result_mod, &done); | |
| 4918 if (RangeUtils::Overlaps(divisor_range(), -1, 1)) { | |
| 4919 Label subtract; | |
| 4920 __ bltz(right, &subtract); | |
| 4921 __ addu(result_mod, result_mod, TMP); | |
| 4922 __ b(&done); | |
| 4923 __ Bind(&subtract); | |
| 4924 __ subu(result_mod, result_mod, TMP); | |
| 4925 } else if (divisor_range()->IsPositive()) { | |
| 4926 // Right is positive. | |
| 4927 __ addu(result_mod, result_mod, TMP); | |
| 4928 } else { | |
| 4929 // Right is negative. | |
| 4930 __ subu(result_mod, result_mod, TMP); | |
| 4931 } | |
| 4932 __ Bind(&done); | |
| 4933 | |
| 4934 __ SmiTag(result_div); | |
| 4935 __ SmiTag(result_mod); | |
| 4936 } | |
| 4937 | |
| 4938 | |
| 4939 LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary( | |
| 4940 Zone* zone, | |
| 4941 bool opt) const { | |
| 4942 return MakeCallSummary(zone); | |
| 4943 } | |
| 4944 | |
| 4945 | |
| 4946 LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 4947 comparison()->InitializeLocationSummary(zone, opt); | |
| 4948 // Branches don't produce a result. | |
| 4949 comparison()->locs()->set_out(0, Location::NoLocation()); | |
| 4950 return comparison()->locs(); | |
| 4951 } | |
| 4952 | |
| 4953 | |
| 4954 void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 4955 __ Comment("BranchInstr"); | |
| 4956 comparison()->EmitBranchCode(compiler, this); | |
| 4957 } | |
| 4958 | |
| 4959 | |
| 4960 LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone, | |
| 4961 bool opt) const { | |
| 4962 const intptr_t kNumInputs = 1; | |
| 4963 const bool need_mask_temp = IsBitTest(); | |
| 4964 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0; | |
| 4965 LocationSummary* summary = new (zone) | |
| 4966 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 4967 summary->set_in(0, Location::RequiresRegister()); | |
| 4968 if (!IsNullCheck()) { | |
| 4969 summary->set_temp(0, Location::RequiresRegister()); | |
| 4970 if (need_mask_temp) { | |
| 4971 summary->set_temp(1, Location::RequiresRegister()); | |
| 4972 } | |
| 4973 } | |
| 4974 return summary; | |
| 4975 } | |
| 4976 | |
| 4977 | |
| 4978 void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler, Label* deopt) { | |
| 4979 if (IsDeoptIfNull()) { | |
| 4980 __ BranchEqual(locs()->in(0).reg(), Object::null_object(), deopt); | |
| 4981 } else { | |
| 4982 ASSERT(IsDeoptIfNotNull()); | |
| 4983 __ BranchNotEqual(locs()->in(0).reg(), Object::null_object(), deopt); | |
| 4984 } | |
| 4985 } | |
| 4986 | |
| 4987 | |
| 4988 void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler, | |
| 4989 intptr_t min, | |
| 4990 intptr_t max, | |
| 4991 intptr_t mask, | |
| 4992 Label* deopt) { | |
| 4993 Register biased_cid = locs()->temp(0).reg(); | |
| 4994 __ LoadImmediate(TMP, min); | |
| 4995 __ subu(biased_cid, biased_cid, TMP); | |
| 4996 __ LoadImmediate(TMP, max - min); | |
| 4997 __ BranchUnsignedGreater(biased_cid, TMP, deopt); | |
| 4998 | |
| 4999 Register bit_reg = locs()->temp(1).reg(); | |
| 5000 __ LoadImmediate(bit_reg, 1); | |
| 5001 __ sllv(bit_reg, bit_reg, biased_cid); | |
| 5002 __ AndImmediate(bit_reg, bit_reg, mask); | |
| 5003 __ beq(bit_reg, ZR, deopt); | |
| 5004 } | |
| 5005 | |
| 5006 | |
| 5007 int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler, | |
| 5008 int bias, | |
| 5009 intptr_t cid_start, | |
| 5010 intptr_t cid_end, | |
| 5011 bool is_last, | |
| 5012 Label* is_ok, | |
| 5013 Label* deopt, | |
| 5014 bool use_near_jump) { | |
| 5015 Register biased_cid = locs()->temp(0).reg(); | |
| 5016 if (cid_start == cid_end) { | |
| 5017 __ LoadImmediate(TMP, cid_start - bias); | |
| 5018 if (is_last) { | |
| 5019 __ bne(biased_cid, TMP, deopt); | |
| 5020 } else { | |
| 5021 __ beq(biased_cid, TMP, is_ok); | |
| 5022 } | |
| 5023 } else { | |
| 5024 // For class ID ranges use a subtract followed by an unsigned | |
| 5025 // comparison to check both ends of the ranges with one comparison. | |
| 5026 __ AddImmediate(biased_cid, biased_cid, bias - cid_start); | |
| 5027 bias = cid_start; | |
| 5028 // TODO(erikcorry): We should use sltiu instead of the temporary TMP if | |
| 5029 // the range is small enough. | |
| 5030 __ LoadImmediate(TMP, cid_end - cid_start); | |
| 5031 // Reverse comparison so we get 1 if biased_cid > tmp ie cid is out of | |
| 5032 // range. | |
| 5033 __ sltu(TMP, TMP, biased_cid); | |
| 5034 if (is_last) { | |
| 5035 __ bne(TMP, ZR, deopt); | |
| 5036 } else { | |
| 5037 __ beq(TMP, ZR, is_ok); | |
| 5038 } | |
| 5039 } | |
| 5040 return bias; | |
| 5041 } | |
| 5042 | |
| 5043 | |
| 5044 LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone, | |
| 5045 bool opt) const { | |
| 5046 const intptr_t kNumInputs = 1; | |
| 5047 const intptr_t kNumTemps = 0; | |
| 5048 LocationSummary* summary = new (zone) | |
| 5049 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5050 summary->set_in(0, Location::RequiresRegister()); | |
| 5051 return summary; | |
| 5052 } | |
| 5053 | |
| 5054 | |
| 5055 void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5056 __ Comment("CheckSmiInstr"); | |
| 5057 Register value = locs()->in(0).reg(); | |
| 5058 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi, | |
| 5059 licm_hoisted_ ? ICData::kHoisted : 0); | |
| 5060 __ BranchIfNotSmi(value, deopt); | |
| 5061 } | |
| 5062 | |
| 5063 | |
| 5064 LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone, | |
| 5065 bool opt) const { | |
| 5066 const intptr_t kNumInputs = 1; | |
| 5067 const intptr_t kNumTemps = 0; | |
| 5068 LocationSummary* summary = new (zone) | |
| 5069 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5070 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister() | |
| 5071 : Location::WritableRegister()); | |
| 5072 | |
| 5073 return summary; | |
| 5074 } | |
| 5075 | |
| 5076 | |
| 5077 void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5078 Register value = locs()->in(0).reg(); | |
| 5079 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass); | |
| 5080 if (cids_.IsSingleCid()) { | |
| 5081 __ BranchNotEqual(value, Immediate(Smi::RawValue(cids_.cid_start)), deopt); | |
| 5082 } else { | |
| 5083 __ AddImmediate(value, value, -Smi::RawValue(cids_.cid_start)); | |
| 5084 // TODO(erikcorry): We should use sltiu instead of the temporary TMP if | |
| 5085 // the range is small enough. | |
| 5086 __ LoadImmediate(TMP, cids_.Extent()); | |
| 5087 // Reverse comparison so we get 1 if biased_cid > tmp ie cid is out of | |
| 5088 // range. | |
| 5089 __ sltu(TMP, TMP, value); | |
| 5090 __ bne(TMP, ZR, deopt); | |
| 5091 } | |
| 5092 } | |
| 5093 | |
| 5094 | |
| 5095 LocationSummary* GenericCheckBoundInstr::MakeLocationSummary(Zone* zone, | |
| 5096 bool opt) const { | |
| 5097 const intptr_t kNumInputs = 2; | |
| 5098 const intptr_t kNumTemps = 0; | |
| 5099 LocationSummary* locs = new (zone) LocationSummary( | |
| 5100 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath); | |
| 5101 locs->set_in(kLengthPos, Location::RequiresRegister()); | |
| 5102 locs->set_in(kIndexPos, Location::RequiresRegister()); | |
| 5103 return locs; | |
| 5104 } | |
| 5105 | |
| 5106 | |
| 5107 class RangeErrorSlowPath : public SlowPathCode { | |
| 5108 public: | |
| 5109 RangeErrorSlowPath(GenericCheckBoundInstr* instruction, intptr_t try_index) | |
| 5110 : instruction_(instruction), try_index_(try_index) {} | |
| 5111 | |
| 5112 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5113 if (Assembler::EmittingComments()) { | |
| 5114 __ Comment("slow path check bound operation"); | |
| 5115 } | |
| 5116 __ Bind(entry_label()); | |
| 5117 LocationSummary* locs = instruction_->locs(); | |
| 5118 compiler->SaveLiveRegisters(locs); | |
| 5119 __ Push(locs->in(0).reg()); | |
| 5120 __ Push(locs->in(1).reg()); | |
| 5121 __ CallRuntime(kRangeErrorRuntimeEntry, 2); | |
| 5122 compiler->AddDescriptor( | |
| 5123 RawPcDescriptors::kOther, compiler->assembler()->CodeSize(), | |
| 5124 instruction_->deopt_id(), instruction_->token_pos(), try_index_); | |
| 5125 Environment* env = compiler->SlowPathEnvironmentFor(instruction_); | |
| 5126 compiler->EmitCatchEntryState(env, try_index_); | |
| 5127 __ break_(0); | |
| 5128 } | |
| 5129 | |
| 5130 private: | |
| 5131 GenericCheckBoundInstr* instruction_; | |
| 5132 intptr_t try_index_; | |
| 5133 }; | |
| 5134 | |
| 5135 | |
| 5136 void GenericCheckBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5137 RangeErrorSlowPath* slow_path = | |
| 5138 new RangeErrorSlowPath(this, compiler->CurrentTryIndex()); | |
| 5139 compiler->AddSlowPathCode(slow_path); | |
| 5140 | |
| 5141 Location length_loc = locs()->in(kLengthPos); | |
| 5142 Location index_loc = locs()->in(kIndexPos); | |
| 5143 Register length = length_loc.reg(); | |
| 5144 Register index = index_loc.reg(); | |
| 5145 const intptr_t index_cid = this->index()->Type()->ToCid(); | |
| 5146 if (index_cid != kSmiCid) { | |
| 5147 __ BranchIfNotSmi(index, slow_path->entry_label()); | |
| 5148 } | |
| 5149 __ BranchUnsignedGreaterEqual(index, length, slow_path->entry_label()); | |
| 5150 } | |
| 5151 | |
| 5152 | |
| 5153 LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone, | |
| 5154 bool opt) const { | |
| 5155 const intptr_t kNumInputs = 2; | |
| 5156 const intptr_t kNumTemps = 0; | |
| 5157 LocationSummary* locs = new (zone) | |
| 5158 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5159 locs->set_in(kLengthPos, Location::RegisterOrSmiConstant(length())); | |
| 5160 locs->set_in(kIndexPos, Location::RegisterOrSmiConstant(index())); | |
| 5161 return locs; | |
| 5162 } | |
| 5163 | |
| 5164 | |
| 5165 void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5166 uint32_t flags = generalized_ ? ICData::kGeneralized : 0; | |
| 5167 flags |= licm_hoisted_ ? ICData::kHoisted : 0; | |
| 5168 Label* deopt = | |
| 5169 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags); | |
| 5170 | |
| 5171 Location length_loc = locs()->in(kLengthPos); | |
| 5172 Location index_loc = locs()->in(kIndexPos); | |
| 5173 | |
| 5174 if (length_loc.IsConstant() && index_loc.IsConstant()) { | |
| 5175 ASSERT((Smi::Cast(length_loc.constant()).Value() <= | |
| 5176 Smi::Cast(index_loc.constant()).Value()) || | |
| 5177 (Smi::Cast(index_loc.constant()).Value() < 0)); | |
| 5178 // Unconditionally deoptimize for constant bounds checks because they | |
| 5179 // only occur only when index is out-of-bounds. | |
| 5180 __ b(deopt); | |
| 5181 return; | |
| 5182 } | |
| 5183 | |
| 5184 const intptr_t index_cid = index()->Type()->ToCid(); | |
| 5185 if (index_loc.IsConstant()) { | |
| 5186 Register length = length_loc.reg(); | |
| 5187 const Smi& index = Smi::Cast(index_loc.constant()); | |
| 5188 __ BranchUnsignedLessEqual( | |
| 5189 length, Immediate(reinterpret_cast<int32_t>(index.raw())), deopt); | |
| 5190 } else if (length_loc.IsConstant()) { | |
| 5191 const Smi& length = Smi::Cast(length_loc.constant()); | |
| 5192 Register index = index_loc.reg(); | |
| 5193 if (index_cid != kSmiCid) { | |
| 5194 __ BranchIfNotSmi(index, deopt); | |
| 5195 } | |
| 5196 if (length.Value() == Smi::kMaxValue) { | |
| 5197 __ BranchSignedLess(index, Immediate(0), deopt); | |
| 5198 } else { | |
| 5199 __ BranchUnsignedGreaterEqual( | |
| 5200 index, Immediate(reinterpret_cast<int32_t>(length.raw())), deopt); | |
| 5201 } | |
| 5202 } else { | |
| 5203 Register length = length_loc.reg(); | |
| 5204 Register index = index_loc.reg(); | |
| 5205 if (index_cid != kSmiCid) { | |
| 5206 __ BranchIfNotSmi(index, deopt); | |
| 5207 } | |
| 5208 __ BranchUnsignedGreaterEqual(index, length, deopt); | |
| 5209 } | |
| 5210 } | |
| 5211 | |
| 5212 LocationSummary* BinaryMintOpInstr::MakeLocationSummary(Zone* zone, | |
| 5213 bool opt) const { | |
| 5214 const intptr_t kNumInputs = 2; | |
| 5215 const intptr_t kNumTemps = 0; | |
| 5216 LocationSummary* summary = new (zone) | |
| 5217 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5218 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
| 5219 Location::RequiresRegister())); | |
| 5220 summary->set_in(1, Location::Pair(Location::RequiresRegister(), | |
| 5221 Location::RequiresRegister())); | |
| 5222 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
| 5223 Location::RequiresRegister())); | |
| 5224 return summary; | |
| 5225 } | |
| 5226 | |
| 5227 | |
| 5228 void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5229 PairLocation* left_pair = locs()->in(0).AsPairLocation(); | |
| 5230 Register left_lo = left_pair->At(0).reg(); | |
| 5231 Register left_hi = left_pair->At(1).reg(); | |
| 5232 PairLocation* right_pair = locs()->in(1).AsPairLocation(); | |
| 5233 Register right_lo = right_pair->At(0).reg(); | |
| 5234 Register right_hi = right_pair->At(1).reg(); | |
| 5235 PairLocation* out_pair = locs()->out(0).AsPairLocation(); | |
| 5236 Register out_lo = out_pair->At(0).reg(); | |
| 5237 Register out_hi = out_pair->At(1).reg(); | |
| 5238 | |
| 5239 Label* deopt = NULL; | |
| 5240 if (CanDeoptimize()) { | |
| 5241 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); | |
| 5242 } | |
| 5243 switch (op_kind()) { | |
| 5244 case Token::kBIT_AND: { | |
| 5245 __ and_(out_lo, left_lo, right_lo); | |
| 5246 __ and_(out_hi, left_hi, right_hi); | |
| 5247 break; | |
| 5248 } | |
| 5249 case Token::kBIT_OR: { | |
| 5250 __ or_(out_lo, left_lo, right_lo); | |
| 5251 __ or_(out_hi, left_hi, right_hi); | |
| 5252 break; | |
| 5253 } | |
| 5254 case Token::kBIT_XOR: { | |
| 5255 __ xor_(out_lo, left_lo, right_lo); | |
| 5256 __ xor_(out_hi, left_hi, right_hi); | |
| 5257 break; | |
| 5258 } | |
| 5259 case Token::kADD: | |
| 5260 case Token::kSUB: { | |
| 5261 if (op_kind() == Token::kADD) { | |
| 5262 __ addu(out_lo, left_lo, right_lo); | |
| 5263 __ sltu(TMP, out_lo, left_lo); // TMP = carry of left_lo + right_lo. | |
| 5264 __ addu(out_hi, left_hi, right_hi); | |
| 5265 __ addu(out_hi, out_hi, TMP); | |
| 5266 if (can_overflow()) { | |
| 5267 __ xor_(CMPRES1, out_hi, left_hi); | |
| 5268 __ xor_(TMP, out_hi, right_hi); | |
| 5269 __ and_(CMPRES1, TMP, CMPRES1); | |
| 5270 __ bltz(CMPRES1, deopt); | |
| 5271 } | |
| 5272 } else { | |
| 5273 __ subu(out_lo, left_lo, right_lo); | |
| 5274 __ sltu(TMP, left_lo, out_lo); // TMP = borrow of left_lo - right_lo. | |
| 5275 __ subu(out_hi, left_hi, right_hi); | |
| 5276 __ subu(out_hi, out_hi, TMP); | |
| 5277 if (can_overflow()) { | |
| 5278 __ xor_(CMPRES1, out_hi, left_hi); | |
| 5279 __ xor_(TMP, left_hi, right_hi); | |
| 5280 __ and_(CMPRES1, TMP, CMPRES1); | |
| 5281 __ bltz(CMPRES1, deopt); | |
| 5282 } | |
| 5283 } | |
| 5284 break; | |
| 5285 } | |
| 5286 case Token::kMUL: { | |
| 5287 // The product of two signed 32-bit integers fits in a signed 64-bit | |
| 5288 // result without causing overflow. | |
| 5289 // We deopt on larger inputs. | |
| 5290 // TODO(regis): Range analysis may eliminate the deopt check. | |
| 5291 __ sra(CMPRES1, left_lo, 31); | |
| 5292 __ bne(CMPRES1, left_hi, deopt); | |
| 5293 __ delay_slot()->sra(CMPRES2, right_lo, 31); | |
| 5294 __ bne(CMPRES2, right_hi, deopt); | |
| 5295 __ delay_slot()->mult(left_lo, right_lo); | |
| 5296 __ mflo(out_lo); | |
| 5297 __ mfhi(out_hi); | |
| 5298 break; | |
| 5299 } | |
| 5300 default: | |
| 5301 UNREACHABLE(); | |
| 5302 } | |
| 5303 } | |
| 5304 | |
| 5305 | |
| 5306 LocationSummary* ShiftMintOpInstr::MakeLocationSummary(Zone* zone, | |
| 5307 bool opt) const { | |
| 5308 const intptr_t kNumInputs = 2; | |
| 5309 const intptr_t kNumTemps = 0; | |
| 5310 LocationSummary* summary = new (zone) | |
| 5311 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5312 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
| 5313 Location::RequiresRegister())); | |
| 5314 summary->set_in(1, Location::WritableRegisterOrSmiConstant(right())); | |
| 5315 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
| 5316 Location::RequiresRegister())); | |
| 5317 return summary; | |
| 5318 } | |
| 5319 | |
| 5320 | |
| 5321 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5322 PairLocation* left_pair = locs()->in(0).AsPairLocation(); | |
| 5323 Register left_lo = left_pair->At(0).reg(); | |
| 5324 Register left_hi = left_pair->At(1).reg(); | |
| 5325 PairLocation* out_pair = locs()->out(0).AsPairLocation(); | |
| 5326 Register out_lo = out_pair->At(0).reg(); | |
| 5327 Register out_hi = out_pair->At(1).reg(); | |
| 5328 | |
| 5329 Label* deopt = NULL; | |
| 5330 if (CanDeoptimize()) { | |
| 5331 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); | |
| 5332 } | |
| 5333 if (locs()->in(1).IsConstant()) { | |
| 5334 // Code for a constant shift amount. | |
| 5335 ASSERT(locs()->in(1).constant().IsSmi()); | |
| 5336 const int32_t shift = | |
| 5337 reinterpret_cast<int32_t>(locs()->in(1).constant().raw()) >> 1; | |
| 5338 switch (op_kind()) { | |
| 5339 case Token::kSHR: { | |
| 5340 if (shift < 32) { | |
| 5341 __ sll(out_lo, left_hi, 32 - shift); | |
| 5342 __ srl(TMP, left_lo, shift); | |
| 5343 __ or_(out_lo, out_lo, TMP); | |
| 5344 __ sra(out_hi, left_hi, shift); | |
| 5345 } else { | |
| 5346 if (shift == 32) { | |
| 5347 __ mov(out_lo, left_hi); | |
| 5348 } else if (shift < 64) { | |
| 5349 __ sra(out_lo, left_hi, shift - 32); | |
| 5350 } else { | |
| 5351 __ sra(out_lo, left_hi, 31); | |
| 5352 } | |
| 5353 __ sra(out_hi, left_hi, 31); | |
| 5354 } | |
| 5355 break; | |
| 5356 } | |
| 5357 case Token::kSHL: { | |
| 5358 ASSERT(shift < 64); | |
| 5359 if (shift < 32) { | |
| 5360 __ srl(out_hi, left_lo, 32 - shift); | |
| 5361 __ sll(TMP, left_hi, shift); | |
| 5362 __ or_(out_hi, out_hi, TMP); | |
| 5363 __ sll(out_lo, left_lo, shift); | |
| 5364 } else { | |
| 5365 __ sll(out_hi, left_lo, shift - 32); | |
| 5366 __ mov(out_lo, ZR); | |
| 5367 } | |
| 5368 // Check for overflow. | |
| 5369 if (can_overflow()) { | |
| 5370 // Compare high word from input with shifted high word from output. | |
| 5371 // Overflow if they aren't equal. | |
| 5372 // If shift > 32, also compare low word from input with high word from | |
| 5373 // output shifted back shift - 32. | |
| 5374 if (shift > 32) { | |
| 5375 __ sra(TMP, out_hi, shift - 32); | |
| 5376 __ bne(left_lo, TMP, deopt); | |
| 5377 __ delay_slot()->sra(TMP, out_hi, 31); | |
| 5378 } else if (shift == 32) { | |
| 5379 __ sra(TMP, out_hi, 31); | |
| 5380 } else { | |
| 5381 __ sra(TMP, out_hi, shift); | |
| 5382 } | |
| 5383 __ bne(left_hi, TMP, deopt); | |
| 5384 } | |
| 5385 break; | |
| 5386 } | |
| 5387 default: | |
| 5388 UNREACHABLE(); | |
| 5389 } | |
| 5390 } else { | |
| 5391 // Code for a variable shift amount. | |
| 5392 Register shift = locs()->in(1).reg(); | |
| 5393 | |
| 5394 // Code below assumes shift amount is not 0 (cannot shift by 32 - 0). | |
| 5395 Label non_zero_shift, done; | |
| 5396 __ bne(shift, ZR, &non_zero_shift); | |
| 5397 __ delay_slot()->mov(out_lo, left_lo); | |
| 5398 __ b(&done); | |
| 5399 __ delay_slot()->mov(out_hi, left_hi); | |
| 5400 __ Bind(&non_zero_shift); | |
| 5401 | |
| 5402 // Deopt if shift is larger than 63 or less than 0. | |
| 5403 if (has_shift_count_check()) { | |
| 5404 __ sltiu(CMPRES1, shift, Immediate(2 * (kMintShiftCountLimit + 1))); | |
| 5405 __ beq(CMPRES1, ZR, deopt); | |
| 5406 // Untag shift count. | |
| 5407 __ delay_slot()->SmiUntag(shift); | |
| 5408 } else { | |
| 5409 // Untag shift count. | |
| 5410 __ SmiUntag(shift); | |
| 5411 } | |
| 5412 | |
| 5413 switch (op_kind()) { | |
| 5414 case Token::kSHR: { | |
| 5415 Label large_shift; | |
| 5416 __ sltiu(CMPRES1, shift, Immediate(32)); | |
| 5417 __ beq(CMPRES1, ZR, &large_shift); | |
| 5418 | |
| 5419 // 0 < shift < 32. | |
| 5420 __ delay_slot()->ori(TMP, ZR, Immediate(32)); | |
| 5421 __ subu(TMP, TMP, shift); // TMP = 32 - shift; 0 < TMP <= 31. | |
| 5422 __ sllv(out_lo, left_hi, TMP); | |
| 5423 __ srlv(TMP, left_lo, shift); | |
| 5424 __ or_(out_lo, out_lo, TMP); | |
| 5425 __ b(&done); | |
| 5426 __ delay_slot()->srav(out_hi, left_hi, shift); | |
| 5427 | |
| 5428 // shift >= 32. | |
| 5429 __ Bind(&large_shift); | |
| 5430 __ sra(out_hi, left_hi, 31); | |
| 5431 __ srav(out_lo, left_hi, shift); // Only 5 low bits of shift used. | |
| 5432 | |
| 5433 break; | |
| 5434 } | |
| 5435 case Token::kSHL: { | |
| 5436 Label large_shift; | |
| 5437 __ sltiu(CMPRES1, shift, Immediate(32)); | |
| 5438 __ beq(CMPRES1, ZR, &large_shift); | |
| 5439 | |
| 5440 // 0 < shift < 32. | |
| 5441 __ delay_slot()->ori(TMP, ZR, Immediate(32)); | |
| 5442 __ subu(TMP, TMP, shift); // TMP = 32 - shift; 0 < TMP <= 31. | |
| 5443 __ srlv(out_hi, left_lo, TMP); | |
| 5444 __ sllv(TMP, left_hi, shift); | |
| 5445 __ or_(out_hi, out_hi, TMP); | |
| 5446 // Check for overflow. | |
| 5447 if (can_overflow()) { | |
| 5448 // Compare high word from input with shifted high word from output. | |
| 5449 __ srav(TMP, out_hi, shift); | |
| 5450 __ beq(TMP, left_hi, &done); | |
| 5451 __ delay_slot()->sllv(out_lo, left_lo, shift); | |
| 5452 __ b(deopt); | |
| 5453 } else { | |
| 5454 __ b(&done); | |
| 5455 __ delay_slot()->sllv(out_lo, left_lo, shift); | |
| 5456 } | |
| 5457 | |
| 5458 // shift >= 32. | |
| 5459 __ Bind(&large_shift); | |
| 5460 __ sllv(out_hi, left_lo, shift); // Only 5 low bits of shift used. | |
| 5461 // Check for overflow. | |
| 5462 if (can_overflow()) { | |
| 5463 // Compare low word from input with shifted high word from output and | |
| 5464 // high word from input to sign of output. | |
| 5465 // Overflow if they aren't equal. | |
| 5466 __ srav(TMP, out_hi, shift); | |
| 5467 __ bne(TMP, left_lo, deopt); | |
| 5468 __ delay_slot()->sra(TMP, out_hi, 31); | |
| 5469 __ bne(TMP, left_hi, deopt); | |
| 5470 __ delay_slot()->mov(out_lo, ZR); | |
| 5471 } else { | |
| 5472 __ mov(out_lo, ZR); | |
| 5473 } | |
| 5474 break; | |
| 5475 } | |
| 5476 default: | |
| 5477 UNREACHABLE(); | |
| 5478 } | |
| 5479 __ Bind(&done); | |
| 5480 } | |
| 5481 } | |
| 5482 | |
| 5483 | |
| 5484 LocationSummary* UnaryMintOpInstr::MakeLocationSummary(Zone* zone, | |
| 5485 bool opt) const { | |
| 5486 const intptr_t kNumInputs = 1; | |
| 5487 const intptr_t kNumTemps = 0; | |
| 5488 LocationSummary* summary = new (zone) | |
| 5489 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5490 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
| 5491 Location::RequiresRegister())); | |
| 5492 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
| 5493 Location::RequiresRegister())); | |
| 5494 return summary; | |
| 5495 } | |
| 5496 | |
| 5497 | |
| 5498 void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5499 ASSERT(op_kind() == Token::kBIT_NOT); | |
| 5500 PairLocation* left_pair = locs()->in(0).AsPairLocation(); | |
| 5501 Register left_lo = left_pair->At(0).reg(); | |
| 5502 Register left_hi = left_pair->At(1).reg(); | |
| 5503 | |
| 5504 PairLocation* out_pair = locs()->out(0).AsPairLocation(); | |
| 5505 Register out_lo = out_pair->At(0).reg(); | |
| 5506 Register out_hi = out_pair->At(1).reg(); | |
| 5507 | |
| 5508 __ nor(out_lo, ZR, left_lo); | |
| 5509 __ nor(out_hi, ZR, left_hi); | |
| 5510 } | |
| 5511 | |
| 5512 | |
| 5513 CompileType BinaryUint32OpInstr::ComputeType() const { | |
| 5514 return CompileType::Int(); | |
| 5515 } | |
| 5516 | |
| 5517 | |
| 5518 CompileType ShiftUint32OpInstr::ComputeType() const { | |
| 5519 return CompileType::Int(); | |
| 5520 } | |
| 5521 | |
| 5522 | |
| 5523 CompileType UnaryUint32OpInstr::ComputeType() const { | |
| 5524 return CompileType::Int(); | |
| 5525 } | |
| 5526 | |
| 5527 | |
| 5528 LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone, | |
| 5529 bool opt) const { | |
| 5530 const intptr_t kNumInputs = 2; | |
| 5531 const intptr_t kNumTemps = 0; | |
| 5532 LocationSummary* summary = new (zone) | |
| 5533 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5534 summary->set_in(0, Location::RequiresRegister()); | |
| 5535 summary->set_in(1, Location::RequiresRegister()); | |
| 5536 summary->set_out(0, Location::RequiresRegister()); | |
| 5537 return summary; | |
| 5538 } | |
| 5539 | |
| 5540 | |
| 5541 void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5542 Register left = locs()->in(0).reg(); | |
| 5543 Register right = locs()->in(1).reg(); | |
| 5544 Register out = locs()->out(0).reg(); | |
| 5545 ASSERT(out != left); | |
| 5546 switch (op_kind()) { | |
| 5547 case Token::kBIT_AND: | |
| 5548 __ and_(out, left, right); | |
| 5549 break; | |
| 5550 case Token::kBIT_OR: | |
| 5551 __ or_(out, left, right); | |
| 5552 break; | |
| 5553 case Token::kBIT_XOR: | |
| 5554 __ xor_(out, left, right); | |
| 5555 break; | |
| 5556 case Token::kADD: | |
| 5557 __ addu(out, left, right); | |
| 5558 break; | |
| 5559 case Token::kSUB: | |
| 5560 __ subu(out, left, right); | |
| 5561 break; | |
| 5562 case Token::kMUL: | |
| 5563 __ multu(left, right); | |
| 5564 __ mflo(out); | |
| 5565 break; | |
| 5566 default: | |
| 5567 UNREACHABLE(); | |
| 5568 } | |
| 5569 } | |
| 5570 | |
| 5571 | |
| 5572 LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone, | |
| 5573 bool opt) const { | |
| 5574 const intptr_t kNumInputs = 2; | |
| 5575 const intptr_t kNumTemps = 1; | |
| 5576 LocationSummary* summary = new (zone) | |
| 5577 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5578 summary->set_in(0, Location::RequiresRegister()); | |
| 5579 summary->set_in(1, Location::RegisterOrSmiConstant(right())); | |
| 5580 summary->set_temp(0, Location::RequiresRegister()); | |
| 5581 summary->set_out(0, Location::RequiresRegister()); | |
| 5582 return summary; | |
| 5583 } | |
| 5584 | |
| 5585 | |
| 5586 void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5587 const intptr_t kShifterLimit = 31; | |
| 5588 | |
| 5589 Register left = locs()->in(0).reg(); | |
| 5590 Register out = locs()->out(0).reg(); | |
| 5591 Register temp = locs()->temp(0).reg(); | |
| 5592 | |
| 5593 ASSERT(left != out); | |
| 5594 | |
| 5595 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); | |
| 5596 | |
| 5597 if (locs()->in(1).IsConstant()) { | |
| 5598 // Shifter is constant. | |
| 5599 | |
| 5600 const Object& constant = locs()->in(1).constant(); | |
| 5601 ASSERT(constant.IsSmi()); | |
| 5602 const intptr_t shift_value = Smi::Cast(constant).Value(); | |
| 5603 | |
| 5604 // Do the shift: (shift_value > 0) && (shift_value <= kShifterLimit). | |
| 5605 switch (op_kind()) { | |
| 5606 case Token::kSHR: | |
| 5607 __ srl(out, left, shift_value); | |
| 5608 break; | |
| 5609 case Token::kSHL: | |
| 5610 __ sll(out, left, shift_value); | |
| 5611 break; | |
| 5612 default: | |
| 5613 UNREACHABLE(); | |
| 5614 } | |
| 5615 return; | |
| 5616 } | |
| 5617 | |
| 5618 // Non constant shift value. | |
| 5619 Register shifter = locs()->in(1).reg(); | |
| 5620 | |
| 5621 __ SmiUntag(temp, shifter); | |
| 5622 // If shift value is < 0, deoptimize. | |
| 5623 __ bltz(temp, deopt); | |
| 5624 __ delay_slot()->mov(out, left); | |
| 5625 __ sltiu(CMPRES1, temp, Immediate(kShifterLimit + 1)); | |
| 5626 __ movz(out, ZR, CMPRES1); // out = shift > kShifterLimit ? 0 : left. | |
| 5627 // Do the shift % 32. | |
| 5628 switch (op_kind()) { | |
| 5629 case Token::kSHR: | |
| 5630 __ srlv(out, out, temp); | |
| 5631 break; | |
| 5632 case Token::kSHL: | |
| 5633 __ sllv(out, out, temp); | |
| 5634 break; | |
| 5635 default: | |
| 5636 UNREACHABLE(); | |
| 5637 } | |
| 5638 } | |
| 5639 | |
| 5640 | |
| 5641 LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone, | |
| 5642 bool opt) const { | |
| 5643 const intptr_t kNumInputs = 1; | |
| 5644 const intptr_t kNumTemps = 0; | |
| 5645 LocationSummary* summary = new (zone) | |
| 5646 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5647 summary->set_in(0, Location::RequiresRegister()); | |
| 5648 summary->set_out(0, Location::RequiresRegister()); | |
| 5649 return summary; | |
| 5650 } | |
| 5651 | |
| 5652 | |
| 5653 void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5654 Register left = locs()->in(0).reg(); | |
| 5655 Register out = locs()->out(0).reg(); | |
| 5656 ASSERT(left != out); | |
| 5657 | |
| 5658 ASSERT(op_kind() == Token::kBIT_NOT); | |
| 5659 | |
| 5660 __ nor(out, ZR, left); | |
| 5661 } | |
| 5662 | |
| 5663 | |
| 5664 DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr) | |
| 5665 | |
| 5666 | |
| 5667 LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone, | |
| 5668 bool opt) const { | |
| 5669 const intptr_t kNumInputs = 1; | |
| 5670 const intptr_t kNumTemps = 0; | |
| 5671 LocationSummary* summary = new (zone) | |
| 5672 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5673 if (from() == kUnboxedMint) { | |
| 5674 ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32)); | |
| 5675 summary->set_in(0, Location::Pair(Location::RequiresRegister(), | |
| 5676 Location::RequiresRegister())); | |
| 5677 summary->set_out(0, Location::RequiresRegister()); | |
| 5678 } else if (to() == kUnboxedMint) { | |
| 5679 ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32)); | |
| 5680 summary->set_in(0, Location::RequiresRegister()); | |
| 5681 summary->set_out(0, Location::Pair(Location::RequiresRegister(), | |
| 5682 Location::RequiresRegister())); | |
| 5683 } else { | |
| 5684 ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32)); | |
| 5685 ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32)); | |
| 5686 summary->set_in(0, Location::RequiresRegister()); | |
| 5687 summary->set_out(0, Location::SameAsFirstInput()); | |
| 5688 } | |
| 5689 return summary; | |
| 5690 } | |
| 5691 | |
| 5692 | |
| 5693 void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5694 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) { | |
| 5695 const Register out = locs()->out(0).reg(); | |
| 5696 // Representations are bitwise equivalent. | |
| 5697 ASSERT(out == locs()->in(0).reg()); | |
| 5698 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) { | |
| 5699 const Register out = locs()->out(0).reg(); | |
| 5700 // Representations are bitwise equivalent. | |
| 5701 ASSERT(out == locs()->in(0).reg()); | |
| 5702 if (CanDeoptimize()) { | |
| 5703 Label* deopt = | |
| 5704 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); | |
| 5705 __ BranchSignedLess(out, Immediate(0), deopt); | |
| 5706 } | |
| 5707 } else if (from() == kUnboxedMint) { | |
| 5708 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32); | |
| 5709 PairLocation* in_pair = locs()->in(0).AsPairLocation(); | |
| 5710 Register in_lo = in_pair->At(0).reg(); | |
| 5711 Register in_hi = in_pair->At(1).reg(); | |
| 5712 Register out = locs()->out(0).reg(); | |
| 5713 // Copy low word. | |
| 5714 __ mov(out, in_lo); | |
| 5715 if (CanDeoptimize()) { | |
| 5716 Label* deopt = | |
| 5717 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger); | |
| 5718 ASSERT(to() == kUnboxedInt32); | |
| 5719 __ sra(TMP, in_lo, 31); | |
| 5720 __ bne(in_hi, TMP, deopt); | |
| 5721 } | |
| 5722 } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) { | |
| 5723 ASSERT(to() == kUnboxedMint); | |
| 5724 Register in = locs()->in(0).reg(); | |
| 5725 PairLocation* out_pair = locs()->out(0).AsPairLocation(); | |
| 5726 Register out_lo = out_pair->At(0).reg(); | |
| 5727 Register out_hi = out_pair->At(1).reg(); | |
| 5728 // Copy low word. | |
| 5729 __ mov(out_lo, in); | |
| 5730 if (from() == kUnboxedUint32) { | |
| 5731 __ xor_(out_hi, out_hi, out_hi); | |
| 5732 } else { | |
| 5733 ASSERT(from() == kUnboxedInt32); | |
| 5734 __ sra(out_hi, in, 31); | |
| 5735 } | |
| 5736 } else { | |
| 5737 UNREACHABLE(); | |
| 5738 } | |
| 5739 } | |
| 5740 | |
| 5741 | |
| 5742 LocationSummary* ThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 5743 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); | |
| 5744 } | |
| 5745 | |
| 5746 | |
| 5747 void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5748 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), kThrowRuntimeEntry, 1, | |
| 5749 locs()); | |
| 5750 __ break_(0); | |
| 5751 } | |
| 5752 | |
| 5753 | |
| 5754 LocationSummary* ReThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 5755 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall); | |
| 5756 } | |
| 5757 | |
| 5758 | |
| 5759 void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5760 compiler->SetNeedsStackTrace(catch_try_index()); | |
| 5761 compiler->GenerateRuntimeCall(token_pos(), deopt_id(), kReThrowRuntimeEntry, | |
| 5762 2, locs()); | |
| 5763 __ break_(0); | |
| 5764 } | |
| 5765 | |
| 5766 | |
| 5767 LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 5768 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); | |
| 5769 } | |
| 5770 | |
| 5771 | |
| 5772 void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5773 __ Stop(message()); | |
| 5774 } | |
| 5775 | |
| 5776 | |
| 5777 void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5778 if (!compiler->CanFallThroughTo(normal_entry())) { | |
| 5779 __ b(compiler->GetJumpLabel(normal_entry())); | |
| 5780 } | |
| 5781 } | |
| 5782 | |
| 5783 | |
| 5784 LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const { | |
| 5785 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall); | |
| 5786 } | |
| 5787 | |
| 5788 | |
| 5789 void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5790 __ Comment("GotoInstr"); | |
| 5791 if (!compiler->is_optimizing()) { | |
| 5792 if (FLAG_reorder_basic_blocks) { | |
| 5793 compiler->EmitEdgeCounter(block()->preorder_number()); | |
| 5794 } | |
| 5795 // Add a deoptimization descriptor for deoptimizing instructions that | |
| 5796 // may be inserted before this instruction. | |
| 5797 compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(), | |
| 5798 TokenPosition::kNoSource); | |
| 5799 } | |
| 5800 if (HasParallelMove()) { | |
| 5801 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); | |
| 5802 } | |
| 5803 | |
| 5804 // We can fall through if the successor is the next block in the list. | |
| 5805 // Otherwise, we need a jump. | |
| 5806 if (!compiler->CanFallThroughTo(successor())) { | |
| 5807 __ b(compiler->GetJumpLabel(successor())); | |
| 5808 } | |
| 5809 } | |
| 5810 | |
| 5811 | |
| 5812 LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone, | |
| 5813 bool opt) const { | |
| 5814 const intptr_t kNumInputs = 1; | |
| 5815 const intptr_t kNumTemps = 1; | |
| 5816 | |
| 5817 LocationSummary* summary = new (zone) | |
| 5818 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5819 | |
| 5820 summary->set_in(0, Location::RequiresRegister()); | |
| 5821 summary->set_temp(0, Location::RequiresRegister()); | |
| 5822 | |
| 5823 return summary; | |
| 5824 } | |
| 5825 | |
| 5826 | |
| 5827 void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5828 Register target_reg = locs()->temp_slot(0)->reg(); | |
| 5829 | |
| 5830 __ GetNextPC(target_reg, TMP); | |
| 5831 const intptr_t entry_offset = __ CodeSize() - 1 * Instr::kInstrSize; | |
| 5832 __ AddImmediate(target_reg, target_reg, -entry_offset); | |
| 5833 | |
| 5834 // Add the offset. | |
| 5835 Register offset_reg = locs()->in(0).reg(); | |
| 5836 if (offset()->definition()->representation() == kTagged) { | |
| 5837 __ SmiUntag(offset_reg); | |
| 5838 } | |
| 5839 __ addu(target_reg, target_reg, offset_reg); | |
| 5840 | |
| 5841 // Jump to the absolute address. | |
| 5842 __ jr(target_reg); | |
| 5843 } | |
| 5844 | |
| 5845 | |
| 5846 LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone, | |
| 5847 bool opt) const { | |
| 5848 const intptr_t kNumInputs = 2; | |
| 5849 const intptr_t kNumTemps = 0; | |
| 5850 if (needs_number_check()) { | |
| 5851 LocationSummary* locs = new (zone) | |
| 5852 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 5853 locs->set_in(0, Location::RegisterLocation(A0)); | |
| 5854 locs->set_in(1, Location::RegisterLocation(A1)); | |
| 5855 locs->set_out(0, Location::RegisterLocation(A0)); | |
| 5856 return locs; | |
| 5857 } | |
| 5858 LocationSummary* locs = new (zone) | |
| 5859 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall); | |
| 5860 locs->set_in(0, Location::RegisterOrConstant(left())); | |
| 5861 // Only one of the inputs can be a constant. Choose register if the first one | |
| 5862 // is a constant. | |
| 5863 locs->set_in(1, locs->in(0).IsConstant() | |
| 5864 ? Location::RequiresRegister() | |
| 5865 : Location::RegisterOrConstant(right())); | |
| 5866 locs->set_out(0, Location::RequiresRegister()); | |
| 5867 return locs; | |
| 5868 } | |
| 5869 | |
| 5870 | |
| 5871 Condition StrictCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, | |
| 5872 BranchLabels labels) { | |
| 5873 Location left = locs()->in(0); | |
| 5874 Location right = locs()->in(1); | |
| 5875 ASSERT(!left.IsConstant() || !right.IsConstant()); | |
| 5876 Condition true_condition; | |
| 5877 if (left.IsConstant()) { | |
| 5878 true_condition = compiler->EmitEqualityRegConstCompare( | |
| 5879 right.reg(), left.constant(), needs_number_check(), token_pos(), | |
| 5880 deopt_id_); | |
| 5881 } else if (right.IsConstant()) { | |
| 5882 true_condition = compiler->EmitEqualityRegConstCompare( | |
| 5883 left.reg(), right.constant(), needs_number_check(), token_pos(), | |
| 5884 deopt_id_); | |
| 5885 } else { | |
| 5886 true_condition = compiler->EmitEqualityRegRegCompare( | |
| 5887 left.reg(), right.reg(), needs_number_check(), token_pos(), deopt_id_); | |
| 5888 } | |
| 5889 if (kind() != Token::kEQ_STRICT) { | |
| 5890 ASSERT(kind() == Token::kNE_STRICT); | |
| 5891 true_condition = NegateCondition(true_condition); | |
| 5892 } | |
| 5893 return true_condition; | |
| 5894 } | |
| 5895 | |
| 5896 | |
| 5897 LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone, | |
| 5898 bool opt) const { | |
| 5899 return LocationSummary::Make(zone, 1, Location::RequiresRegister(), | |
| 5900 LocationSummary::kNoCall); | |
| 5901 } | |
| 5902 | |
| 5903 | |
| 5904 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5905 Register value = locs()->in(0).reg(); | |
| 5906 Register result = locs()->out(0).reg(); | |
| 5907 | |
| 5908 __ LoadObject(result, Bool::True()); | |
| 5909 __ LoadObject(TMP, Bool::False()); | |
| 5910 __ subu(CMPRES1, value, result); | |
| 5911 __ movz(result, TMP, CMPRES1); // If value is True, move False into result. | |
| 5912 } | |
| 5913 | |
| 5914 | |
| 5915 LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone, | |
| 5916 bool opt) const { | |
| 5917 return MakeCallSummary(zone); | |
| 5918 } | |
| 5919 | |
| 5920 | |
| 5921 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5922 __ Comment("AllocateObjectInstr"); | |
| 5923 const Code& stub = Code::ZoneHandle( | |
| 5924 compiler->zone(), StubCode::GetAllocationStubForClass(cls())); | |
| 5925 const StubEntry stub_entry(stub); | |
| 5926 compiler->GenerateCall(token_pos(), stub_entry, RawPcDescriptors::kOther, | |
| 5927 locs()); | |
| 5928 compiler->AddStubCallTarget(stub); | |
| 5929 __ Drop(ArgumentCount()); // Discard arguments. | |
| 5930 } | |
| 5931 | |
| 5932 | |
| 5933 void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5934 ASSERT(!compiler->is_optimizing()); | |
| 5935 __ BranchLinkPatchable(*StubCode::DebugStepCheck_entry()); | |
| 5936 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, token_pos()); | |
| 5937 compiler->RecordSafepoint(locs()); | |
| 5938 } | |
| 5939 | |
| 5940 | |
| 5941 LocationSummary* GrowRegExpStackInstr::MakeLocationSummary(Zone* zone, | |
| 5942 bool opt) const { | |
| 5943 const intptr_t kNumInputs = 1; | |
| 5944 const intptr_t kNumTemps = 0; | |
| 5945 LocationSummary* locs = new (zone) | |
| 5946 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall); | |
| 5947 locs->set_in(0, Location::RegisterLocation(T0)); | |
| 5948 locs->set_out(0, Location::RegisterLocation(T0)); | |
| 5949 return locs; | |
| 5950 } | |
| 5951 | |
| 5952 | |
| 5953 void GrowRegExpStackInstr::EmitNativeCode(FlowGraphCompiler* compiler) { | |
| 5954 const Register typed_data = locs()->in(0).reg(); | |
| 5955 const Register result = locs()->out(0).reg(); | |
| 5956 __ Comment("GrowRegExpStackInstr"); | |
| 5957 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 5958 __ LoadObject(TMP, Object::null_object()); | |
| 5959 __ sw(TMP, Address(SP, 1 * kWordSize)); | |
| 5960 __ sw(typed_data, Address(SP, 0 * kWordSize)); | |
| 5961 compiler->GenerateRuntimeCall(TokenPosition::kNoSource, deopt_id(), | |
| 5962 kGrowRegExpStackRuntimeEntry, 1, locs()); | |
| 5963 __ lw(result, Address(SP, 1 * kWordSize)); | |
| 5964 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
| 5965 } | |
| 5966 | |
| 5967 | |
| 5968 } // namespace dart | |
| 5969 | |
| 5970 #endif // defined TARGET_ARCH_MIPS | |
| OLD | NEW |