Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(159)

Side by Side Diff: runtime/vm/intermediate_language_arm.cc

Issue 504143003: Support Int32 representation for selected binary operations. (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after
298 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 298 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
299 locs->set_out(0, Location::RequiresFpuRegister()); 299 locs->set_out(0, Location::RequiresFpuRegister());
300 locs->set_temp(0, Location::RequiresRegister()); 300 locs->set_temp(0, Location::RequiresRegister());
301 return locs; 301 return locs;
302 } 302 }
303 303
304 304
305 void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 305 void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
306 // The register allocator drops constant definitions that have no uses. 306 // The register allocator drops constant definitions that have no uses.
307 if (!locs()->out(0).IsInvalid()) { 307 if (!locs()->out(0).IsInvalid()) {
308 if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0) && 308 switch (representation_) {
309 TargetCPUFeatures::neon_supported()) { 309 case kUnboxedDouble:
310 const QRegister dst = locs()->out(0).fpu_reg(); 310 if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0) &&
311 __ veorq(dst, dst, dst); 311 TargetCPUFeatures::neon_supported()) {
312 } else { 312 const QRegister dst = locs()->out(0).fpu_reg();
313 const DRegister dst = EvenDRegisterOf(locs()->out(0).fpu_reg()); 313 __ veorq(dst, dst, dst);
314 const Register temp = locs()->temp(0).reg(); 314 } else {
315 __ LoadDImmediate(dst, Double::Cast(value()).value(), temp); 315 const DRegister dst = EvenDRegisterOf(locs()->out(0).fpu_reg());
316 const Register temp = locs()->temp(0).reg();
317 __ LoadDImmediate(dst, Double::Cast(value()).value(), temp);
318 }
319 break;
320 case kUnboxedInt32:
321 __ LoadImmediate(locs()->out(0).reg(), Smi::Cast(value()).Value());
322 break;
323 default:
324 UNREACHABLE();
325 break;
316 } 326 }
317 } 327 }
318 } 328 }
319 329
320 330
321 LocationSummary* AssertAssignableInstr::MakeLocationSummary(Isolate* isolate, 331 LocationSummary* AssertAssignableInstr::MakeLocationSummary(Isolate* isolate,
322 bool opt) const { 332 bool opt) const {
323 const intptr_t kNumInputs = 3; 333 const intptr_t kNumInputs = 3;
324 const intptr_t kNumTemps = 0; 334 const intptr_t kNumTemps = 0;
325 LocationSummary* summary = new(isolate) LocationSummary( 335 LocationSummary* summary = new(isolate) LocationSummary(
(...skipping 853 matching lines...) Expand 10 before | Expand all | Expand 10 after
1179 const intptr_t kNumInputs = 2; 1189 const intptr_t kNumInputs = 2;
1180 const intptr_t kNumTemps = 0; 1190 const intptr_t kNumTemps = 0;
1181 LocationSummary* locs = new(isolate) LocationSummary( 1191 LocationSummary* locs = new(isolate) LocationSummary(
1182 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 1192 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1183 locs->set_in(0, Location::RequiresRegister()); 1193 locs->set_in(0, Location::RequiresRegister());
1184 bool needs_base = false; 1194 bool needs_base = false;
1185 if (CanBeImmediateIndex(index(), class_id(), IsExternal(), 1195 if (CanBeImmediateIndex(index(), class_id(), IsExternal(),
1186 true, // Load. 1196 true, // Load.
1187 &needs_base)) { 1197 &needs_base)) {
1188 // CanBeImmediateIndex must return false for unsafe smis. 1198 // CanBeImmediateIndex must return false for unsafe smis.
1189 locs->set_in(1, Location::Constant(index()->BoundConstant())); 1199 locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
1190 } else { 1200 } else {
1191 locs->set_in(1, Location::RequiresRegister()); 1201 locs->set_in(1, Location::RequiresRegister());
1192 } 1202 }
1193 if ((representation() == kUnboxedDouble) || 1203 if ((representation() == kUnboxedDouble) ||
1194 (representation() == kUnboxedFloat32x4) || 1204 (representation() == kUnboxedFloat32x4) ||
1195 (representation() == kUnboxedInt32x4) || 1205 (representation() == kUnboxedInt32x4) ||
1196 (representation() == kUnboxedFloat64x2)) { 1206 (representation() == kUnboxedFloat64x2)) {
1197 if (class_id() == kTypedDataFloat32ArrayCid) { 1207 if (class_id() == kTypedDataFloat32ArrayCid) {
1198 // Need register <= Q7 for float operations. 1208 // Need register <= Q7 for float operations.
1199 // TODO(fschneider): Add a register policy to specify a subset of 1209 // TODO(fschneider): Add a register policy to specify a subset of
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
1380 1390
1381 bool needs_base = false; 1391 bool needs_base = false;
1382 if (CanBeImmediateIndex(index(), class_id(), IsExternal(), 1392 if (CanBeImmediateIndex(index(), class_id(), IsExternal(),
1383 false, // Store. 1393 false, // Store.
1384 &needs_base)) { 1394 &needs_base)) {
1385 const intptr_t kNumTemps = needs_base ? 1 : 0; 1395 const intptr_t kNumTemps = needs_base ? 1 : 0;
1386 locs = new(isolate) LocationSummary( 1396 locs = new(isolate) LocationSummary(
1387 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 1397 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1388 1398
1389 // CanBeImmediateIndex must return false for unsafe smis. 1399 // CanBeImmediateIndex must return false for unsafe smis.
1390 locs->set_in(1, Location::Constant(index()->BoundConstant())); 1400 locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
1391 if (needs_base) { 1401 if (needs_base) {
1392 locs->set_temp(0, Location::RequiresRegister()); 1402 locs->set_temp(0, Location::RequiresRegister());
1393 } 1403 }
1394 } else { 1404 } else {
1395 const intptr_t kNumTemps = 0; 1405 const intptr_t kNumTemps = 0;
1396 locs = new(isolate) LocationSummary( 1406 locs = new(isolate) LocationSummary(
1397 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 1407 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1398 1408
1399 locs->set_in(1, Location::WritableRegister()); 1409 locs->set_in(1, Location::WritableRegister());
1400 } 1410 }
(...skipping 1552 matching lines...) Expand 10 before | Expand all | Expand 10 after
2953 } else if ((op_kind() == Token::kMUL) && 2963 } else if ((op_kind() == Token::kMUL) &&
2954 (TargetCPUFeatures::arm_version() != ARMv7)) { 2964 (TargetCPUFeatures::arm_version() != ARMv7)) {
2955 num_temps = 1; 2965 num_temps = 1;
2956 } 2966 }
2957 LocationSummary* summary = new(isolate) LocationSummary( 2967 LocationSummary* summary = new(isolate) LocationSummary(
2958 isolate, kNumInputs, num_temps, LocationSummary::kNoCall); 2968 isolate, kNumInputs, num_temps, LocationSummary::kNoCall);
2959 if (op_kind() == Token::kTRUNCDIV) { 2969 if (op_kind() == Token::kTRUNCDIV) {
2960 summary->set_in(0, Location::RequiresRegister()); 2970 summary->set_in(0, Location::RequiresRegister());
2961 if (RightIsPowerOfTwoConstant()) { 2971 if (RightIsPowerOfTwoConstant()) {
2962 ConstantInstr* right_constant = right()->definition()->AsConstant(); 2972 ConstantInstr* right_constant = right()->definition()->AsConstant();
2963 summary->set_in(1, Location::Constant(right_constant->value())); 2973 summary->set_in(1, Location::Constant(right_constant));
2964 summary->set_temp(0, Location::RequiresRegister()); 2974 summary->set_temp(0, Location::RequiresRegister());
2965 } else { 2975 } else {
2966 summary->set_in(1, Location::RequiresRegister()); 2976 summary->set_in(1, Location::RequiresRegister());
2967 summary->set_temp(0, Location::RequiresRegister()); 2977 summary->set_temp(0, Location::RequiresRegister());
2968 summary->set_temp(1, Location::RequiresFpuRegister()); 2978 summary->set_temp(1, Location::RequiresFpuRegister());
2969 } 2979 }
2970 summary->set_out(0, Location::RequiresRegister()); 2980 summary->set_out(0, Location::RequiresRegister());
2971 return summary; 2981 return summary;
2972 } 2982 }
2973 if (op_kind() == Token::kMOD) { 2983 if (op_kind() == Token::kMOD) {
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
3059 __ cmp(IP, Operand(result, ASR, 31)); 3069 __ cmp(IP, Operand(result, ASR, 31));
3060 __ b(deopt, NE); 3070 __ b(deopt, NE);
3061 } else if (TargetCPUFeatures::can_divide()) { 3071 } else if (TargetCPUFeatures::can_divide()) {
3062 const QRegister qtmp = locs()->temp(0).fpu_reg(); 3072 const QRegister qtmp = locs()->temp(0).fpu_reg();
3063 const DRegister dtmp0 = EvenDRegisterOf(qtmp); 3073 const DRegister dtmp0 = EvenDRegisterOf(qtmp);
3064 const DRegister dtmp1 = OddDRegisterOf(qtmp); 3074 const DRegister dtmp1 = OddDRegisterOf(qtmp);
3065 __ LoadImmediate(IP, value); 3075 __ LoadImmediate(IP, value);
3066 __ CheckMultSignedOverflow(left, IP, result, dtmp0, dtmp1, deopt); 3076 __ CheckMultSignedOverflow(left, IP, result, dtmp0, dtmp1, deopt);
3067 __ mul(result, left, IP); 3077 __ mul(result, left, IP);
3068 } else { 3078 } else {
3079 // TODO(vegorov): never emit this instruction if hardware does not
3080 // support it! This will lead to deopt cycle penalizing the code.
3069 __ b(deopt); 3081 __ b(deopt);
3070 } 3082 }
3071 } 3083 }
3072 } 3084 }
3073 break; 3085 break;
3074 } 3086 }
3075 case Token::kTRUNCDIV: { 3087 case Token::kTRUNCDIV: {
3076 const intptr_t value = Smi::Cast(constant).Value(); 3088 const intptr_t value = Smi::Cast(constant).Value();
3077 if (value == 1) { 3089 if (value == 1) {
3078 __ MoveRegister(result, left); 3090 __ MoveRegister(result, left);
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
3199 // IP: result bits 32..63. 3211 // IP: result bits 32..63.
3200 __ cmp(IP, Operand(result, ASR, 31)); 3212 __ cmp(IP, Operand(result, ASR, 31));
3201 __ b(deopt, NE); 3213 __ b(deopt, NE);
3202 } else if (TargetCPUFeatures::can_divide()) { 3214 } else if (TargetCPUFeatures::can_divide()) {
3203 const QRegister qtmp = locs()->temp(0).fpu_reg(); 3215 const QRegister qtmp = locs()->temp(0).fpu_reg();
3204 const DRegister dtmp0 = EvenDRegisterOf(qtmp); 3216 const DRegister dtmp0 = EvenDRegisterOf(qtmp);
3205 const DRegister dtmp1 = OddDRegisterOf(qtmp); 3217 const DRegister dtmp1 = OddDRegisterOf(qtmp);
3206 __ CheckMultSignedOverflow(IP, right, result, dtmp0, dtmp1, deopt); 3218 __ CheckMultSignedOverflow(IP, right, result, dtmp0, dtmp1, deopt);
3207 __ mul(result, IP, right); 3219 __ mul(result, IP, right);
3208 } else { 3220 } else {
3221 // TODO(vegorov): never emit this instruction if hardware does not
3222 // support it! This will lead to deopt cycle penalizing the code.
3209 __ b(deopt); 3223 __ b(deopt);
3210 } 3224 }
3211 } 3225 }
3212 break; 3226 break;
3213 } 3227 }
3214 case Token::kBIT_AND: { 3228 case Token::kBIT_AND: {
3215 // No overflow check. 3229 // No overflow check.
3216 __ and_(result, left, Operand(right)); 3230 __ and_(result, left, Operand(right));
3217 break; 3231 break;
3218 } 3232 }
(...skipping 13 matching lines...) Expand all
3232 __ cmp(right, Operand(0)); 3246 __ cmp(right, Operand(0));
3233 __ b(deopt, EQ); 3247 __ b(deopt, EQ);
3234 } 3248 }
3235 const Register temp = locs()->temp(0).reg(); 3249 const Register temp = locs()->temp(0).reg();
3236 if (TargetCPUFeatures::can_divide()) { 3250 if (TargetCPUFeatures::can_divide()) {
3237 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 3251 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3238 __ SmiUntag(temp, left); 3252 __ SmiUntag(temp, left);
3239 __ SmiUntag(IP, right); 3253 __ SmiUntag(IP, right);
3240 __ IntegerDivide(result, temp, IP, dtemp, DTMP); 3254 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3241 } else { 3255 } else {
3256 // TODO(vegorov): never emit this instruction if hardware does not
3257 // support it! This will lead to deopt cycle penalizing the code.
3242 __ b(deopt); 3258 __ b(deopt);
3243 } 3259 }
3244 3260
3245 // Check the corner case of dividing the 'MIN_SMI' with -1, in which 3261 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3246 // case we cannot tag the result. 3262 // case we cannot tag the result.
3247 __ CompareImmediate(result, 0x40000000); 3263 __ CompareImmediate(result, 0x40000000);
3248 __ b(deopt, EQ); 3264 __ b(deopt, EQ);
3249 __ SmiTag(result); 3265 __ SmiTag(result);
3250 break; 3266 break;
3251 } 3267 }
3252 case Token::kMOD: { 3268 case Token::kMOD: {
3253 if ((right_range == NULL) || right_range->Overlaps(0, 0)) { 3269 if ((right_range == NULL) || right_range->Overlaps(0, 0)) {
3254 // Handle divide by zero in runtime. 3270 // Handle divide by zero in runtime.
3255 __ cmp(right, Operand(0)); 3271 __ cmp(right, Operand(0));
3256 __ b(deopt, EQ); 3272 __ b(deopt, EQ);
3257 } 3273 }
3258 const Register temp = locs()->temp(0).reg(); 3274 const Register temp = locs()->temp(0).reg();
3259 if (TargetCPUFeatures::can_divide()) { 3275 if (TargetCPUFeatures::can_divide()) {
3260 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 3276 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3261 __ SmiUntag(temp, left); 3277 __ SmiUntag(temp, left);
3262 __ SmiUntag(IP, right); 3278 __ SmiUntag(IP, right);
3263 __ IntegerDivide(result, temp, IP, dtemp, DTMP); 3279 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3264 } else { 3280 } else {
3281 // TODO(vegorov): never emit this instruction if hardware does not
3282 // support it! This will lead to deopt cycle penalizing the code.
3265 __ b(deopt); 3283 __ b(deopt);
3266 } 3284 }
3267 __ SmiUntag(IP, right); 3285 __ SmiUntag(IP, right);
3268 __ mls(result, IP, result, temp); // result <- left - right * result 3286 __ mls(result, IP, result, temp); // result <- left - right * result
3269 __ SmiTag(result); 3287 __ SmiTag(result);
3270 // res = left % right; 3288 // res = left % right;
3271 // if (res < 0) { 3289 // if (res < 0) {
3272 // if (right < 0) { 3290 // if (right < 0) {
3273 // res = res - right; 3291 // res = res - right;
3274 // } else { 3292 // } else {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
3317 UNREACHABLE(); 3335 UNREACHABLE();
3318 break; 3336 break;
3319 } 3337 }
3320 default: 3338 default:
3321 UNREACHABLE(); 3339 UNREACHABLE();
3322 break; 3340 break;
3323 } 3341 }
3324 } 3342 }
3325 3343
3326 3344
3345 static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
3346 BinaryInt32OpInstr* shift_left) {
3347 const bool is_truncating = shift_left->IsTruncating();
3348 const LocationSummary& locs = *shift_left->locs();
3349 const Register left = locs.in(0).reg();
3350 const Register result = locs.out(0).reg();
3351 Label* deopt = shift_left->CanDeoptimize() ?
3352 compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp)
3353 : NULL;
3354 ASSERT(locs.in(1).IsConstant());
3355 const Object& constant = locs.in(1).constant();
3356 ASSERT(constant.IsSmi());
3357 // Immediate shift operation takes 5 bits for the count.
3358 const intptr_t kCountLimit = 0x1F;
3359 const intptr_t value = Smi::Cast(constant).Value();
3360 if (value == 0) {
3361 __ MoveRegister(result, left);
3362 } else if ((value < 0) || (value >= kCountLimit)) {
3363 // This condition may not be known earlier in some cases because
3364 // of constant propagation, inlining, etc.
3365 if ((value >= kCountLimit) && is_truncating) {
3366 __ mov(result, Operand(0));
3367 } else {
3368 // Result is Mint or exception.
3369 __ b(deopt);
3370 }
3371 } else {
3372 if (!is_truncating) {
3373 // Check for overflow (preserve left).
3374 __ Lsl(IP, left, value);
3375 __ cmp(left, Operand(IP, ASR, value));
3376 __ b(deopt, NE); // Overflow.
3377 }
3378 // Shift for result now we know there is no overflow.
3379 __ Lsl(result, left, value);
3380 }
3381 }
3382
3383
3384 LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Isolate* isolate,
3385 bool opt) const {
3386 const intptr_t kNumInputs = 2;
3387 // Calculate number of temporaries.
3388 intptr_t num_temps = 0;
3389 if (((op_kind() == Token::kSHL) && !IsTruncating()) ||
3390 (op_kind() == Token::kSHR)) {
3391 num_temps = 1;
3392 } else if ((op_kind() == Token::kMUL) &&
3393 (TargetCPUFeatures::arm_version() != ARMv7)) {
3394 num_temps = 1;
3395 }
3396 LocationSummary* summary = new(isolate) LocationSummary(
3397 isolate, kNumInputs, num_temps, LocationSummary::kNoCall);
3398 summary->set_in(0, Location::RequiresRegister());
3399 summary->set_in(1, Location::RegisterOrSmiConstant(right()));
3400 if (((op_kind() == Token::kSHL) && !IsTruncating()) ||
3401 (op_kind() == Token::kSHR)) {
3402 summary->set_temp(0, Location::RequiresRegister());
3403 }
3404 if (op_kind() == Token::kMUL) {
3405 if (TargetCPUFeatures::arm_version() != ARMv7) {
3406 summary->set_temp(0, Location::RequiresFpuRegister());
3407 }
3408 }
3409 // We make use of 3-operand instructions by not requiring result register
3410 // to be identical to first input register as on Intel.
3411 summary->set_out(0, Location::RequiresRegister());
3412 return summary;
3413 }
3414
3415
3416 void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3417 if (op_kind() == Token::kSHL) {
3418 EmitInt32ShiftLeft(compiler, this);
3419 return;
3420 }
3421
3422 const Register left = locs()->in(0).reg();
3423 const Register result = locs()->out(0).reg();
3424 Label* deopt = NULL;
3425 if (CanDeoptimize()) {
3426 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3427 }
3428
3429 if (locs()->in(1).IsConstant()) {
3430 const Object& constant = locs()->in(1).constant();
3431 ASSERT(constant.IsSmi());
3432 const int32_t value = Smi::Cast(constant).Value();
3433 switch (op_kind()) {
3434 case Token::kADD: {
3435 if (deopt == NULL) {
3436 __ AddImmediate(result, left, value);
3437 } else {
3438 __ AddImmediateSetFlags(result, left, value);
3439 __ b(deopt, VS);
3440 }
3441 break;
3442 }
3443 case Token::kSUB: {
3444 if (deopt == NULL) {
3445 __ AddImmediate(result, left, -value);
3446 } else {
3447 // Negating value and using AddImmediateSetFlags would not detect the
3448 // overflow when value == kMinInt32.
3449 __ SubImmediateSetFlags(result, left, value);
3450 __ b(deopt, VS);
3451 }
3452 break;
3453 }
3454 case Token::kMUL: {
3455 if (deopt == NULL) {
3456 if (value == 2) {
3457 __ mov(result, Operand(left, LSL, 1));
3458 } else {
3459 __ LoadImmediate(IP, value);
3460 __ mul(result, left, IP);
3461 }
3462 } else {
3463 if (value == 2) {
3464 __ CompareImmediate(left, 0xC0000000);
3465 __ b(deopt, MI);
3466 __ mov(result, Operand(left, LSL, 1));
3467 } else {
3468 if (TargetCPUFeatures::arm_version() == ARMv7) {
3469 __ LoadImmediate(IP, value);
3470 __ smull(result, IP, left, IP);
3471 // IP: result bits 32..63.
3472 __ cmp(IP, Operand(result, ASR, 31));
3473 __ b(deopt, NE);
3474 } else if (TargetCPUFeatures::can_divide()) {
3475 const QRegister qtmp = locs()->temp(0).fpu_reg();
3476 const DRegister dtmp0 = EvenDRegisterOf(qtmp);
3477 const DRegister dtmp1 = OddDRegisterOf(qtmp);
3478 __ LoadImmediate(IP, value);
3479 __ CheckMultSignedOverflow(left, IP, result, dtmp0, dtmp1, deopt);
3480 __ mul(result, left, IP);
3481 } else {
3482 // TODO(vegorov): never emit this instruction if hardware does not
3483 // support it! This will lead to deopt cycle penalizing the code.
3484 __ b(deopt);
3485 }
3486 }
3487 }
3488 break;
3489 }
3490 case Token::kBIT_AND: {
3491 // No overflow check.
3492 Operand o;
3493 if (Operand::CanHold(value, &o)) {
3494 __ and_(result, left, o);
3495 } else if (Operand::CanHold(~value, &o)) {
3496 __ bic(result, left, o);
3497 } else {
3498 __ LoadImmediate(IP, value);
3499 __ and_(result, left, Operand(IP));
3500 }
3501 break;
3502 }
3503 case Token::kBIT_OR: {
3504 // No overflow check.
3505 Operand o;
3506 if (Operand::CanHold(value, &o)) {
3507 __ orr(result, left, o);
3508 } else {
3509 __ LoadImmediate(IP, value);
3510 __ orr(result, left, Operand(IP));
3511 }
3512 break;
3513 }
3514 case Token::kBIT_XOR: {
3515 // No overflow check.
3516 Operand o;
3517 if (Operand::CanHold(value, &o)) {
3518 __ eor(result, left, o);
3519 } else {
3520 __ LoadImmediate(IP, value);
3521 __ eor(result, left, Operand(IP));
3522 }
3523 break;
3524 }
3525 case Token::kSHR: {
3526 // sarl operation masks the count to 5 bits.
3527 const intptr_t kCountLimit = 0x1F;
3528
3529 if (value == 0) {
3530 // TODO(vegorov): should be handled outside.
3531 __ MoveRegister(result, left);
3532 break;
3533 } else if (value < 0) {
3534 // TODO(vegorov): should be handled outside.
3535 __ b(deopt);
3536 break;
3537 }
3538
3539 if (value >= kCountLimit) {
3540 __ Asr(result, left, kCountLimit);
3541 } else {
3542 __ Asr(result, left, value);
3543 }
3544 break;
3545 }
3546
3547 default:
3548 UNREACHABLE();
3549 break;
3550 }
3551 return;
3552 }
3553
3554 const Register right = locs()->in(1).reg();
3555 switch (op_kind()) {
3556 case Token::kADD: {
3557 if (deopt == NULL) {
3558 __ add(result, left, Operand(right));
3559 } else {
3560 __ adds(result, left, Operand(right));
3561 __ b(deopt, VS);
3562 }
3563 break;
3564 }
3565 case Token::kSUB: {
3566 if (deopt == NULL) {
3567 __ sub(result, left, Operand(right));
3568 } else {
3569 __ subs(result, left, Operand(right));
3570 __ b(deopt, VS);
3571 }
3572 break;
3573 }
3574 case Token::kMUL: {
3575 if (deopt == NULL) {
3576 __ mul(result, left, right);
3577 } else {
3578 if (TargetCPUFeatures::arm_version() == ARMv7) {
3579 __ smull(result, IP, left, right);
3580 // IP: result bits 32..63.
3581 __ cmp(IP, Operand(result, ASR, 31));
3582 __ b(deopt, NE);
3583 } else if (TargetCPUFeatures::can_divide()) {
3584 const QRegister qtmp = locs()->temp(0).fpu_reg();
3585 const DRegister dtmp0 = EvenDRegisterOf(qtmp);
3586 const DRegister dtmp1 = OddDRegisterOf(qtmp);
3587 __ CheckMultSignedOverflow(left, right, result, dtmp0, dtmp1, deopt);
3588 __ mul(result, left, right);
3589 } else {
3590 // TODO(vegorov): never emit this instruction if hardware does not
3591 // support it! This will lead to deopt cycle penalizing the code.
3592 __ b(deopt);
3593 }
3594 }
3595 break;
3596 }
3597 case Token::kBIT_AND: {
3598 // No overflow check.
3599 __ and_(result, left, Operand(right));
3600 break;
3601 }
3602 case Token::kBIT_OR: {
3603 // No overflow check.
3604 __ orr(result, left, Operand(right));
3605 break;
3606 }
3607 case Token::kBIT_XOR: {
3608 // No overflow check.
3609 __ eor(result, left, Operand(right));
3610 break;
3611 }
3612 default:
3613 UNREACHABLE();
3614 break;
3615 }
3616 }
3617
3618
3327 LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Isolate* isolate, 3619 LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Isolate* isolate,
3328 bool opt) const { 3620 bool opt) const {
3329 intptr_t left_cid = left()->Type()->ToCid(); 3621 intptr_t left_cid = left()->Type()->ToCid();
3330 intptr_t right_cid = right()->Type()->ToCid(); 3622 intptr_t right_cid = right()->Type()->ToCid();
3331 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid)); 3623 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3332 const intptr_t kNumInputs = 2; 3624 const intptr_t kNumInputs = 2;
3333 const intptr_t kNumTemps = 0; 3625 const intptr_t kNumTemps = 0;
3334 LocationSummary* summary = new(isolate) LocationSummary( 3626 LocationSummary* summary = new(isolate) LocationSummary(
3335 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 3627 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3336 summary->set_in(0, Location::RequiresRegister()); 3628 summary->set_in(0, Location::RequiresRegister());
(...skipping 1695 matching lines...) Expand 10 before | Expand all | Expand 10 after
5032 } 5324 }
5033 5325
5034 5326
5035 void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5327 void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5036 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg()); 5328 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5037 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg()); 5329 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5038 __ vnegd(result, value); 5330 __ vnegd(result, value);
5039 } 5331 }
5040 5332
5041 5333
5334 LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Isolate* isolate,
5335 bool opt) const {
5336 const intptr_t kNumInputs = 1;
5337 const intptr_t kNumTemps = 0;
5338 LocationSummary* result = new(isolate) LocationSummary(
5339 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5340 result->set_in(0, Location::RequiresRegister());
5341 result->set_out(0, Location::RequiresFpuRegister());
5342 return result;
5343 }
5344
5345
5346 void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5347 const Register value = locs()->in(0).reg();
5348 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5349 __ vmovsr(STMP, value);
5350 __ vcvtdi(result, STMP);
5351 }
5352
5353
5042 LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Isolate* isolate, 5354 LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Isolate* isolate,
5043 bool opt) const { 5355 bool opt) const {
5044 const intptr_t kNumInputs = 1; 5356 const intptr_t kNumInputs = 1;
5045 const intptr_t kNumTemps = 0; 5357 const intptr_t kNumTemps = 0;
5046 LocationSummary* result = new(isolate) LocationSummary( 5358 LocationSummary* result = new(isolate) LocationSummary(
5047 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 5359 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5048 result->set_in(0, Location::RequiresRegister()); 5360 result->set_in(0, Location::RequiresRegister());
5049 result->set_out(0, Location::RequiresFpuRegister()); 5361 result->set_out(0, Location::RequiresFpuRegister());
5050 return result; 5362 return result;
5051 } 5363 }
(...skipping 423 matching lines...) Expand 10 before | Expand all | Expand 10 after
5475 __ cmp(right, Operand(0)); 5787 __ cmp(right, Operand(0));
5476 __ b(deopt, EQ); 5788 __ b(deopt, EQ);
5477 } 5789 }
5478 const Register temp = locs()->temp(0).reg(); 5790 const Register temp = locs()->temp(0).reg();
5479 if (TargetCPUFeatures::can_divide()) { 5791 if (TargetCPUFeatures::can_divide()) {
5480 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 5792 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
5481 __ SmiUntag(temp, left); 5793 __ SmiUntag(temp, left);
5482 __ SmiUntag(IP, right); 5794 __ SmiUntag(IP, right);
5483 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP); 5795 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP);
5484 } else { 5796 } else {
5797 // TODO(vegorov): never emit this instruction if hardware does not
5798 // support it! This will lead to deopt cycle penalizing the code.
5485 __ b(deopt); 5799 __ b(deopt);
5486 } 5800 }
5487 5801
5488 // Check the corner case of dividing the 'MIN_SMI' with -1, in which 5802 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5489 // case we cannot tag the result. 5803 // case we cannot tag the result.
5490 __ CompareImmediate(result_div, 0x40000000); 5804 __ CompareImmediate(result_div, 0x40000000);
5491 __ b(deopt, EQ); 5805 __ b(deopt, EQ);
5492 __ SmiUntag(IP, right); 5806 __ SmiUntag(IP, right);
5493 // result_mod <- left - right * result_div. 5807 // result_mod <- left - right * result_div.
5494 __ mls(result_mod, IP, result_div, temp); 5808 __ mls(result_mod, IP, result_div, temp);
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
5994 // The product of two signed 32-bit integers fits in a signed 64-bit 6308 // The product of two signed 32-bit integers fits in a signed 64-bit
5995 // result without causing overflow. 6309 // result without causing overflow.
5996 // We deopt on larger inputs. 6310 // We deopt on larger inputs.
5997 // TODO(regis): Range analysis may eliminate the deopt check. 6311 // TODO(regis): Range analysis may eliminate the deopt check.
5998 if (TargetCPUFeatures::arm_version() == ARMv7) { 6312 if (TargetCPUFeatures::arm_version() == ARMv7) {
5999 __ cmp(left_hi, Operand(left_lo, ASR, 31)); 6313 __ cmp(left_hi, Operand(left_lo, ASR, 31));
6000 __ cmp(right_hi, Operand(right_lo, ASR, 31), EQ); 6314 __ cmp(right_hi, Operand(right_lo, ASR, 31), EQ);
6001 __ b(deopt, NE); 6315 __ b(deopt, NE);
6002 __ smull(out_lo, out_hi, left_lo, right_lo); 6316 __ smull(out_lo, out_hi, left_lo, right_lo);
6003 } else { 6317 } else {
6318 // TODO(vegorov): never emit this instruction if hardware does not
6319 // support it! This will lead to deopt cycle penalizing the code.
6004 __ b(deopt); 6320 __ b(deopt);
6005 } 6321 }
6006 break; 6322 break;
6007 } 6323 }
6008 default: 6324 default:
6009 UNREACHABLE(); 6325 UNREACHABLE();
6010 } 6326 }
6011 if (FLAG_throw_on_javascript_int_overflow) { 6327 if (FLAG_throw_on_javascript_int_overflow) {
6012 EmitJavascriptIntOverflowCheck(compiler, deopt, out_lo, out_hi); 6328 EmitJavascriptIntOverflowCheck(compiler, deopt, out_lo, out_hi);
6013 } 6329 }
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after
6211 CompileType ShiftUint32OpInstr::ComputeType() const { 6527 CompileType ShiftUint32OpInstr::ComputeType() const {
6212 return CompileType::Int(); 6528 return CompileType::Int();
6213 } 6529 }
6214 6530
6215 6531
6216 CompileType UnaryUint32OpInstr::ComputeType() const { 6532 CompileType UnaryUint32OpInstr::ComputeType() const {
6217 return CompileType::Int(); 6533 return CompileType::Int();
6218 } 6534 }
6219 6535
6220 6536
6221 CompileType BoxUint32Instr::ComputeType() const {
6222 return CompileType::Int();
6223 }
6224
6225
6226 CompileType UnboxUint32Instr::ComputeType() const {
6227 return CompileType::Int();
6228 }
6229
6230
6231 LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Isolate* isolate, 6537 LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Isolate* isolate,
6232 bool opt) const { 6538 bool opt) const {
6233 const intptr_t kNumInputs = 2; 6539 const intptr_t kNumInputs = 2;
6234 const intptr_t kNumTemps = 0; 6540 const intptr_t kNumTemps = 0;
6235 LocationSummary* summary = new(isolate) LocationSummary( 6541 LocationSummary* summary = new(isolate) LocationSummary(
6236 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 6542 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6237 summary->set_in(0, Location::RequiresRegister()); 6543 summary->set_in(0, Location::RequiresRegister());
6238 summary->set_in(1, Location::RequiresRegister()); 6544 summary->set_in(1, Location::RequiresRegister());
6239 summary->set_out(0, Location::RequiresRegister()); 6545 summary->set_out(0, Location::RequiresRegister());
6240 return summary; 6546 return summary;
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
6473 __ b(&done, EQ); 6779 __ b(&done, EQ);
6474 // Mint case. 6780 // Mint case.
6475 __ CompareClassId(value, kMintCid, temp); 6781 __ CompareClassId(value, kMintCid, temp);
6476 __ b(deopt, NE); 6782 __ b(deopt, NE);
6477 __ LoadFromOffset(kWord, out, value, Mint::value_offset() - kHeapObjectTag); 6783 __ LoadFromOffset(kWord, out, value, Mint::value_offset() - kHeapObjectTag);
6478 __ Bind(&done); 6784 __ Bind(&done);
6479 } 6785 }
6480 } 6786 }
6481 6787
6482 6788
6789 LocationSummary* BoxInt32Instr::MakeLocationSummary(Isolate* isolate,
6790 bool opt) const {
6791 const intptr_t kNumInputs = 1;
6792 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
6793 LocationSummary* summary = new(isolate) LocationSummary(
6794 isolate,
6795 kNumInputs,
6796 kNumTemps,
6797 ValueFitsSmi() ? LocationSummary::kNoCall
6798 : LocationSummary::kCallOnSlowPath);
6799 summary->set_in(0, Location::RequiresRegister());
6800 if (!ValueFitsSmi()) {
6801 summary->set_temp(0, Location::RequiresRegister());
6802 }
6803 summary->set_out(0, Location::RequiresRegister());
6804 return summary;
6805 }
6806
6807
6808 void BoxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
6809 Register value = locs()->in(0).reg();
6810 Register out = locs()->out(0).reg();
6811 ASSERT(value != out);
6812
6813 __ Lsl(out, value, 1);
6814 if (!ValueFitsSmi()) {
6815 Register temp = locs()->temp(0).reg();
6816 Label done;
6817 __ cmp(value, Operand(out, ASR, 1));
6818 __ b(&done, EQ);
6819 BoxAllocationSlowPath::Allocate(
6820 compiler,
6821 this,
6822 compiler->mint_class(),
6823 out,
6824 temp);
6825 __ Asr(temp, value, kBitsPerWord - 1);
6826 __ StoreToOffset(kWord,
6827 value,
6828 out,
6829 Mint::value_offset() - kHeapObjectTag);
6830 __ StoreToOffset(kWord,
6831 temp,
6832 out,
6833 Mint::value_offset() - kHeapObjectTag + kWordSize);
6834 __ Bind(&done);
6835 }
6836 }
6837
6838
6839
6840 LocationSummary* UnboxInt32Instr::MakeLocationSummary(Isolate* isolate,
6841 bool opt) const {
6842 const intptr_t value_cid = value()->Type()->ToCid();
6843 const intptr_t kNumInputs = 1;
6844 const intptr_t kNumTemps =
6845 ((value_cid == kMintCid) || (value_cid == kSmiCid)) ? 0 : 1;
6846 LocationSummary* summary = new(isolate) LocationSummary(
6847 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6848 summary->set_in(0, Location::RequiresRegister());
6849 if (kNumTemps > 0) {
6850 summary->set_temp(0, Location::RequiresRegister());
6851 }
6852 summary->set_out(0, Location::RequiresRegister());
6853 return summary;
6854 }
6855
6856
6857 static void LoadInt32FromMint(FlowGraphCompiler* compiler,
6858 Register mint,
6859 Register result,
6860 Register temp,
6861 Label* deopt) {
6862 __ LoadFromOffset(kWord,
6863 result,
6864 mint,
6865 Mint::value_offset() - kHeapObjectTag);
6866 if (deopt != NULL) {
6867 __ LoadFromOffset(kWord,
6868 temp,
6869 mint,
6870 Mint::value_offset() - kHeapObjectTag + kWordSize);
6871 __ cmp(temp, Operand(result, ASR, kBitsPerWord - 1));
6872 __ b(deopt, NE);
6873 }
6874 }
6875
6876
6877 void UnboxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
6878 const intptr_t value_cid = value()->Type()->ToCid();
6879 const Register value = locs()->in(0).reg();
6880 const Register out = locs()->out(0).reg();
6881 ASSERT(value != out);
6882
6883 if (value_cid == kMintCid) {
6884 Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
6885 Label* deopt = CanDeoptimize() ?
6886 compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
6887 LoadInt32FromMint(compiler,
6888 value,
6889 out,
6890 temp,
6891 deopt);
6892 } else if (value_cid == kSmiCid) {
6893 __ SmiUntag(out, value);
6894 } else {
6895 Register temp = locs()->temp(0).reg();
6896 Label* deopt = compiler->AddDeoptStub(deopt_id_,
6897 ICData::kDeoptUnboxInteger);
6898 Label done;
6899 __ tst(value, Operand(kSmiTagMask));
6900 // Smi case.
6901 __ mov(out, Operand(value), EQ);
6902 __ SmiUntag(out, EQ);
6903 __ b(&done, EQ);
6904 // Mint case.
6905 __ CompareClassId(value, kMintCid, temp);
6906 __ b(deopt, NE);
6907 LoadInt32FromMint(compiler,
6908 value,
6909 out,
6910 temp,
6911 deopt);
6912 __ Bind(&done);
6913 }
6914 }
6915
6916
6483 LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Isolate* isolate, 6917 LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Isolate* isolate,
6484 bool opt) const { 6918 bool opt) const {
6485 const intptr_t kNumInputs = 1; 6919 const intptr_t kNumInputs = 1;
6486 const intptr_t kNumTemps = 0; 6920 const intptr_t kNumTemps = 0;
6487 LocationSummary* summary = new(isolate) LocationSummary( 6921 LocationSummary* summary = new(isolate) LocationSummary(
6488 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 6922 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6489 if (from() == kUnboxedMint) { 6923 if (from() == kUnboxedMint) {
6924 ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
6490 summary->set_in(0, Location::Pair(Location::RequiresRegister(), 6925 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6491 Location::RequiresRegister())); 6926 Location::RequiresRegister()));
6492 summary->set_out(0, Location::RequiresRegister()); 6927 summary->set_out(0, Location::RequiresRegister());
6493 } else { 6928 } else if (to() == kUnboxedMint) {
6494 ASSERT(from() == kUnboxedUint32); 6929 ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
6495 summary->set_in(0, Location::RequiresRegister()); 6930 summary->set_in(0, Location::RequiresRegister());
6496 summary->set_out(0, Location::Pair(Location::RequiresRegister(), 6931 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6497 Location::RequiresRegister())); 6932 Location::RequiresRegister()));
6933 } else {
6934 ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
6935 ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
6936 summary->set_in(0, Location::RequiresRegister());
6937 summary->set_out(0, Location::SameAsFirstInput());
6498 } 6938 }
6499 return summary; 6939 return summary;
6500 } 6940 }
6501 6941
6502 6942
6503 void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 6943 void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6504 if (from() == kUnboxedMint) { 6944 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6945 const Register out = locs()->out(0).reg();
6946 // Representations are bitwise equivalent.
6947 ASSERT(out == locs()->in(0).reg());
6948 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6949 const Register out = locs()->out(0).reg();
6950 // Representations are bitwise equivalent.
6951 ASSERT(out == locs()->in(0).reg());
6952 if (CanDeoptimize()) {
6953 Label* deopt =
6954 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6955 __ tst(out, Operand(out));
6956 __ b(deopt, MI);
6957 }
6958 } else if (from() == kUnboxedMint) {
6959 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6505 PairLocation* in_pair = locs()->in(0).AsPairLocation(); 6960 PairLocation* in_pair = locs()->in(0).AsPairLocation();
6506 Register in_lo = in_pair->At(0).reg(); 6961 Register in_lo = in_pair->At(0).reg();
6962 Register in_hi = in_pair->At(1).reg();
6507 Register out = locs()->out(0).reg(); 6963 Register out = locs()->out(0).reg();
6508 // Copy low word. 6964 // Copy low word.
6509 __ mov(out, Operand(in_lo)); 6965 __ mov(out, Operand(in_lo));
6510 } else { 6966 if (CanDeoptimize()) {
6511 ASSERT(from() == kUnboxedUint32); 6967 Label* deopt =
6968 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6969 ASSERT(to() == kUnboxedInt32);
6970 __ cmp(in_hi, Operand(in_lo, ASR, kBitsPerWord - 1));
6971 __ b(deopt, NE);
6972 }
6973 } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
6974 ASSERT(to() == kUnboxedMint);
6512 Register in = locs()->in(0).reg(); 6975 Register in = locs()->in(0).reg();
6513 PairLocation* out_pair = locs()->out(0).AsPairLocation(); 6976 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6514 Register out_lo = out_pair->At(0).reg(); 6977 Register out_lo = out_pair->At(0).reg();
6515 Register out_hi = out_pair->At(1).reg(); 6978 Register out_hi = out_pair->At(1).reg();
6516 // Copy low word. 6979 // Copy low word.
6517 __ mov(out_lo, Operand(in)); 6980 __ mov(out_lo, Operand(in));
6518 // Zero upper word. 6981 if (from() == kUnboxedUint32) {
6519 __ eor(out_hi, out_hi, Operand(out_hi)); 6982 __ eor(out_hi, out_hi, Operand(out_hi));
6983 } else {
6984 ASSERT(from() == kUnboxedInt32);
6985 __ mov(out_hi, Operand(in, ASR, kBitsPerWord - 1));
6986 }
6987 } else {
6988 UNREACHABLE();
6520 } 6989 }
6521 } 6990 }
6522 6991
6523 6992
6524 LocationSummary* ThrowInstr::MakeLocationSummary(Isolate* isolate, 6993 LocationSummary* ThrowInstr::MakeLocationSummary(Isolate* isolate,
6525 bool opt) const { 6994 bool opt) const {
6526 return new(isolate) LocationSummary(isolate, 0, 0, LocationSummary::kCall); 6995 return new(isolate) LocationSummary(isolate, 0, 0, LocationSummary::kCall);
6527 } 6996 }
6528 6997
6529 6998
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after
6747 compiler->GenerateCall(token_pos(), &label, stub_kind_, locs()); 7216 compiler->GenerateCall(token_pos(), &label, stub_kind_, locs());
6748 #if defined(DEBUG) 7217 #if defined(DEBUG)
6749 __ LoadImmediate(R4, kInvalidObjectPointer); 7218 __ LoadImmediate(R4, kInvalidObjectPointer);
6750 __ LoadImmediate(R5, kInvalidObjectPointer); 7219 __ LoadImmediate(R5, kInvalidObjectPointer);
6751 #endif 7220 #endif
6752 } 7221 }
6753 7222
6754 } // namespace dart 7223 } // namespace dart
6755 7224
6756 #endif // defined TARGET_ARCH_ARM 7225 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698