Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1075)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 6315004: Truncate rather than round to nearest when performing float-to-integer... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/ic-arm.cc ('k') | src/builtins.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 3184 matching lines...) Expand 10 before | Expand all | Expand 10 after
3195 __ bind(&generic_stub_call); 3195 __ bind(&generic_stub_call);
3196 Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); 3196 Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
3197 Handle<Code> generic_construct_stub(code); 3197 Handle<Code> generic_construct_stub(code);
3198 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); 3198 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
3199 3199
3200 // Return the generated code. 3200 // Return the generated code.
3201 return GetCode(); 3201 return GetCode();
3202 } 3202 }
3203 3203
3204 3204
3205 static bool IsElementTypeSigned(ExternalArrayType array_type) {
3206 switch (array_type) {
3207 case kExternalByteArray:
3208 case kExternalShortArray:
3209 case kExternalIntArray:
3210 return true;
3211
3212 case kExternalUnsignedByteArray:
3213 case kExternalUnsignedShortArray:
3214 case kExternalUnsignedIntArray:
3215 return false;
3216
3217 default:
3218 UNREACHABLE();
3219 return false;
3220 }
3221 }
3222
3223
3224 MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
3225 ExternalArrayType array_type, Code::Flags flags) {
3226 // ---------- S t a t e --------------
3227 // -- lr : return address
3228 // -- r0 : key
3229 // -- r1 : receiver
3230 // -----------------------------------
3231 Label slow, failed_allocation;
3232
3233 Register key = r0;
3234 Register receiver = r1;
3235
3236 // Check that the object isn't a smi
3237 __ BranchOnSmi(receiver, &slow);
3238
3239 // Check that the key is a smi.
3240 __ BranchOnNotSmi(key, &slow);
3241
3242 // Check that the object is a JS object. Load map into r2.
3243 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
3244 __ b(lt, &slow);
3245
3246 // Check that the receiver does not require access checks. We need
3247 // to check this explicitly since this generic stub does not perform
3248 // map checks.
3249 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
3250 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
3251 __ b(ne, &slow);
3252
3253 // Check that the elements array is the appropriate type of
3254 // ExternalArray.
3255 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3256 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
3257 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
3258 __ cmp(r2, ip);
3259 __ b(ne, &slow);
3260
3261 // Check that the index is in range.
3262 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3263 __ cmp(ip, Operand(key, ASR, kSmiTagSize));
3264 // Unsigned comparison catches both negative and too-large values.
3265 __ b(lo, &slow);
3266
3267 // r3: elements array
3268 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3269 // r3: base pointer of external storage
3270
3271 // We are not untagging smi key and instead work with it
3272 // as if it was premultiplied by 2.
3273 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3274
3275 Register value = r2;
3276 switch (array_type) {
3277 case kExternalByteArray:
3278 __ ldrsb(value, MemOperand(r3, key, LSR, 1));
3279 break;
3280 case kExternalUnsignedByteArray:
3281 __ ldrb(value, MemOperand(r3, key, LSR, 1));
3282 break;
3283 case kExternalShortArray:
3284 __ ldrsh(value, MemOperand(r3, key, LSL, 0));
3285 break;
3286 case kExternalUnsignedShortArray:
3287 __ ldrh(value, MemOperand(r3, key, LSL, 0));
3288 break;
3289 case kExternalIntArray:
3290 case kExternalUnsignedIntArray:
3291 __ ldr(value, MemOperand(r3, key, LSL, 1));
3292 break;
3293 case kExternalFloatArray:
3294 if (CpuFeatures::IsSupported(VFP3)) {
3295 CpuFeatures::Scope scope(VFP3);
3296 __ add(r2, r3, Operand(key, LSL, 1));
3297 __ vldr(s0, r2, 0);
3298 } else {
3299 __ ldr(value, MemOperand(r3, key, LSL, 1));
3300 }
3301 break;
3302 default:
3303 UNREACHABLE();
3304 break;
3305 }
3306
3307 // For integer array types:
3308 // r2: value
3309 // For floating-point array type
3310 // s0: value (if VFP3 is supported)
3311 // r2: value (if VFP3 is not supported)
3312
3313 if (array_type == kExternalIntArray) {
3314 // For the Int and UnsignedInt array types, we need to see whether
3315 // the value can be represented in a Smi. If not, we need to convert
3316 // it to a HeapNumber.
3317 Label box_int;
3318 __ cmp(value, Operand(0xC0000000));
3319 __ b(mi, &box_int);
3320 // Tag integer as smi and return it.
3321 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3322 __ Ret();
3323
3324 __ bind(&box_int);
3325 // Allocate a HeapNumber for the result and perform int-to-double
3326 // conversion. Don't touch r0 or r1 as they are needed if allocation
3327 // fails.
3328 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3329 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3330 // Now we can use r0 for the result as key is not needed any more.
3331 __ mov(r0, r5);
3332
3333 if (CpuFeatures::IsSupported(VFP3)) {
3334 CpuFeatures::Scope scope(VFP3);
3335 __ vmov(s0, value);
3336 __ vcvt_f64_s32(d0, s0);
3337 __ sub(r3, r0, Operand(kHeapObjectTag));
3338 __ vstr(d0, r3, HeapNumber::kValueOffset);
3339 __ Ret();
3340 } else {
3341 WriteInt32ToHeapNumberStub stub(value, r0, r3);
3342 __ TailCallStub(&stub);
3343 }
3344 } else if (array_type == kExternalUnsignedIntArray) {
3345 // The test is different for unsigned int values. Since we need
3346 // the value to be in the range of a positive smi, we can't
3347 // handle either of the top two bits being set in the value.
3348 if (CpuFeatures::IsSupported(VFP3)) {
3349 CpuFeatures::Scope scope(VFP3);
3350 Label box_int, done;
3351 __ tst(value, Operand(0xC0000000));
3352 __ b(ne, &box_int);
3353 // Tag integer as smi and return it.
3354 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3355 __ Ret();
3356
3357 __ bind(&box_int);
3358 __ vmov(s0, value);
3359 // Allocate a HeapNumber for the result and perform int-to-double
3360 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
3361 // registers - also when jumping due to exhausted young space.
3362 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3363 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3364
3365 __ vcvt_f64_u32(d0, s0);
3366 __ sub(r1, r2, Operand(kHeapObjectTag));
3367 __ vstr(d0, r1, HeapNumber::kValueOffset);
3368
3369 __ mov(r0, r2);
3370 __ Ret();
3371 } else {
3372 // Check whether unsigned integer fits into smi.
3373 Label box_int_0, box_int_1, done;
3374 __ tst(value, Operand(0x80000000));
3375 __ b(ne, &box_int_0);
3376 __ tst(value, Operand(0x40000000));
3377 __ b(ne, &box_int_1);
3378 // Tag integer as smi and return it.
3379 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3380 __ Ret();
3381
3382 Register hiword = value; // r2.
3383 Register loword = r3;
3384
3385 __ bind(&box_int_0);
3386 // Integer does not have leading zeros.
3387 GenerateUInt2Double(masm, hiword, loword, r4, 0);
3388 __ b(&done);
3389
3390 __ bind(&box_int_1);
3391 // Integer has one leading zero.
3392 GenerateUInt2Double(masm, hiword, loword, r4, 1);
3393
3394
3395 __ bind(&done);
3396 // Integer was converted to double in registers hiword:loword.
3397 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
3398 // clobbers all registers - also when jumping due to exhausted young
3399 // space.
3400 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3401 __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
3402
3403 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
3404 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
3405
3406 __ mov(r0, r4);
3407 __ Ret();
3408 }
3409 } else if (array_type == kExternalFloatArray) {
3410 // For the floating-point array type, we need to always allocate a
3411 // HeapNumber.
3412 if (CpuFeatures::IsSupported(VFP3)) {
3413 CpuFeatures::Scope scope(VFP3);
3414 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3415 // AllocateHeapNumber clobbers all registers - also when jumping due to
3416 // exhausted young space.
3417 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3418 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3419 __ vcvt_f64_f32(d0, s0);
3420 __ sub(r1, r2, Operand(kHeapObjectTag));
3421 __ vstr(d0, r1, HeapNumber::kValueOffset);
3422
3423 __ mov(r0, r2);
3424 __ Ret();
3425 } else {
3426 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3427 // AllocateHeapNumber clobbers all registers - also when jumping due to
3428 // exhausted young space.
3429 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3430 __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
3431 // VFP is not available, do manual single to double conversion.
3432
3433 // r2: floating point value (binary32)
3434 // r3: heap number for result
3435
3436 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
3437 // the slow case from here.
3438 __ and_(r0, value, Operand(kBinary32MantissaMask));
3439
3440 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
3441 // the slow case from here.
3442 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
3443 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3444
3445 Label exponent_rebiased;
3446 __ teq(r1, Operand(0x00));
3447 __ b(eq, &exponent_rebiased);
3448
3449 __ teq(r1, Operand(0xff));
3450 __ mov(r1, Operand(0x7ff), LeaveCC, eq);
3451 __ b(eq, &exponent_rebiased);
3452
3453 // Rebias exponent.
3454 __ add(r1,
3455 r1,
3456 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3457
3458 __ bind(&exponent_rebiased);
3459 __ and_(r2, value, Operand(kBinary32SignMask));
3460 value = no_reg;
3461 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
3462
3463 // Shift mantissa.
3464 static const int kMantissaShiftForHiWord =
3465 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3466
3467 static const int kMantissaShiftForLoWord =
3468 kBitsPerInt - kMantissaShiftForHiWord;
3469
3470 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
3471 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
3472
3473 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
3474 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
3475
3476 __ mov(r0, r3);
3477 __ Ret();
3478 }
3479
3480 } else {
3481 // Tag integer as smi and return it.
3482 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3483 __ Ret();
3484 }
3485
3486 // Slow case, key and receiver still in r0 and r1.
3487 __ bind(&slow);
3488 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
3489
3490 // ---------- S t a t e --------------
3491 // -- lr : return address
3492 // -- r0 : key
3493 // -- r1 : receiver
3494 // -----------------------------------
3495
3496 __ Push(r1, r0);
3497
3498 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
3499
3500 return GetCode(flags);
3501 }
3502
3503
3504 MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
3505 ExternalArrayType array_type, Code::Flags flags) {
3506 // ---------- S t a t e --------------
3507 // -- r0 : value
3508 // -- r1 : key
3509 // -- r2 : receiver
3510 // -- lr : return address
3511 // -----------------------------------
3512 Label slow, check_heap_number;
3513
3514 // Register usage.
3515 Register value = r0;
3516 Register key = r1;
3517 Register receiver = r2;
3518 // r3 mostly holds the elements array or the destination external array.
3519
3520 // Check that the object isn't a smi.
3521 __ BranchOnSmi(receiver, &slow);
3522
3523 // Check that the object is a JS object. Load map into r3.
3524 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
3525 __ b(le, &slow);
3526
3527 // Check that the receiver does not require access checks. We need
3528 // to do this because this generic stub does not perform map checks.
3529 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
3530 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
3531 __ b(ne, &slow);
3532
3533 // Check that the key is a smi.
3534 __ BranchOnNotSmi(key, &slow);
3535
3536 // Check that the elements array is the appropriate type of ExternalArray.
3537 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3538 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
3539 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
3540 __ cmp(r4, ip);
3541 __ b(ne, &slow);
3542
3543 // Check that the index is in range.
3544 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
3545 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3546 __ cmp(r4, ip);
3547 // Unsigned comparison catches both negative and too-large values.
3548 __ b(hs, &slow);
3549
3550 // Handle both smis and HeapNumbers in the fast path. Go to the
3551 // runtime for all other kinds of values.
3552 // r3: external array.
3553 // r4: key (integer).
3554 __ BranchOnNotSmi(value, &check_heap_number);
3555 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
3556 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3557
3558 // r3: base pointer of external storage.
3559 // r4: key (integer).
3560 // r5: value (integer).
3561 switch (array_type) {
3562 case kExternalByteArray:
3563 case kExternalUnsignedByteArray:
3564 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3565 break;
3566 case kExternalShortArray:
3567 case kExternalUnsignedShortArray:
3568 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3569 break;
3570 case kExternalIntArray:
3571 case kExternalUnsignedIntArray:
3572 __ str(r5, MemOperand(r3, r4, LSL, 2));
3573 break;
3574 case kExternalFloatArray:
3575 // Perform int-to-float conversion and store to memory.
3576 StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
3577 break;
3578 default:
3579 UNREACHABLE();
3580 break;
3581 }
3582
3583 // Entry registers are intact, r0 holds the value which is the return value.
3584 __ Ret();
3585
3586
3587 // r3: external array.
3588 // r4: index (integer).
3589 __ bind(&check_heap_number);
3590 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
3591 __ b(ne, &slow);
3592
3593 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3594
3595 // r3: base pointer of external storage.
3596 // r4: key (integer).
3597
3598 // The WebGL specification leaves the behavior of storing NaN and
3599 // +/-Infinity into integer arrays basically undefined. For more
3600 // reproducible behavior, convert these to zero.
3601 if (CpuFeatures::IsSupported(VFP3)) {
3602 CpuFeatures::Scope scope(VFP3);
3603
3604
3605 if (array_type == kExternalFloatArray) {
3606 // vldr requires offset to be a multiple of 4 so we can not
3607 // include -kHeapObjectTag into it.
3608 __ sub(r5, r0, Operand(kHeapObjectTag));
3609 __ vldr(d0, r5, HeapNumber::kValueOffset);
3610 __ add(r5, r3, Operand(r4, LSL, 2));
3611 __ vcvt_f32_f64(s0, d0);
3612 __ vstr(s0, r5, 0);
3613 } else {
3614 // Need to perform float-to-int conversion.
3615 // Test for NaN or infinity (both give zero).
3616 __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
3617
3618 // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
3619 // include -kHeapObjectTag into it.
3620 __ sub(r5, r0, Operand(kHeapObjectTag));
3621 __ vldr(d0, r5, HeapNumber::kValueOffset);
3622
3623 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
3624 // NaNs and Infinities have all-one exponents so they sign extend to -1.
3625 __ cmp(r6, Operand(-1));
3626 __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
3627
3628 // Not infinity or NaN simply convert to int.
3629 if (IsElementTypeSigned(array_type)) {
3630 __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
3631 } else {
3632 __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
3633 }
3634 __ vmov(r5, s0, ne);
3635
3636 switch (array_type) {
3637 case kExternalByteArray:
3638 case kExternalUnsignedByteArray:
3639 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3640 break;
3641 case kExternalShortArray:
3642 case kExternalUnsignedShortArray:
3643 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3644 break;
3645 case kExternalIntArray:
3646 case kExternalUnsignedIntArray:
3647 __ str(r5, MemOperand(r3, r4, LSL, 2));
3648 break;
3649 default:
3650 UNREACHABLE();
3651 break;
3652 }
3653 }
3654
3655 // Entry registers are intact, r0 holds the value which is the return value.
3656 __ Ret();
3657 } else {
3658 // VFP3 is not available do manual conversions.
3659 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
3660 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
3661
3662 if (array_type == kExternalFloatArray) {
3663 Label done, nan_or_infinity_or_zero;
3664 static const int kMantissaInHiWordShift =
3665 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3666
3667 static const int kMantissaInLoWordShift =
3668 kBitsPerInt - kMantissaInHiWordShift;
3669
3670 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3671 // and infinities. All these should be converted to 0.
3672 __ mov(r7, Operand(HeapNumber::kExponentMask));
3673 __ and_(r9, r5, Operand(r7), SetCC);
3674 __ b(eq, &nan_or_infinity_or_zero);
3675
3676 __ teq(r9, Operand(r7));
3677 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
3678 __ b(eq, &nan_or_infinity_or_zero);
3679
3680 // Rebias exponent.
3681 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3682 __ add(r9,
3683 r9,
3684 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
3685
3686 __ cmp(r9, Operand(kBinary32MaxExponent));
3687 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
3688 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
3689 __ b(gt, &done);
3690
3691 __ cmp(r9, Operand(kBinary32MinExponent));
3692 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
3693 __ b(lt, &done);
3694
3695 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3696 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3697 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
3698 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
3699 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
3700
3701 __ bind(&done);
3702 __ str(r5, MemOperand(r3, r4, LSL, 2));
3703 // Entry registers are intact, r0 holds the value which is the return
3704 // value.
3705 __ Ret();
3706
3707 __ bind(&nan_or_infinity_or_zero);
3708 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3709 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3710 __ orr(r9, r9, r7);
3711 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
3712 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
3713 __ b(&done);
3714 } else {
3715 bool is_signed_type = IsElementTypeSigned(array_type);
3716 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
3717 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
3718
3719 Label done, sign;
3720
3721 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3722 // and infinities. All these should be converted to 0.
3723 __ mov(r7, Operand(HeapNumber::kExponentMask));
3724 __ and_(r9, r5, Operand(r7), SetCC);
3725 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3726 __ b(eq, &done);
3727
3728 __ teq(r9, Operand(r7));
3729 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3730 __ b(eq, &done);
3731
3732 // Unbias exponent.
3733 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3734 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
3735 // If exponent is negative than result is 0.
3736 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
3737 __ b(mi, &done);
3738
3739 // If exponent is too big than result is minimal value.
3740 __ cmp(r9, Operand(meaningfull_bits - 1));
3741 __ mov(r5, Operand(min_value), LeaveCC, ge);
3742 __ b(ge, &done);
3743
3744 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
3745 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3746 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
3747
3748 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
3749 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
3750 __ b(pl, &sign);
3751
3752 __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
3753 __ mov(r5, Operand(r5, LSL, r9));
3754 __ rsb(r9, r9, Operand(meaningfull_bits));
3755 __ orr(r5, r5, Operand(r6, LSR, r9));
3756
3757 __ bind(&sign);
3758 __ teq(r7, Operand(0, RelocInfo::NONE));
3759 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
3760
3761 __ bind(&done);
3762 switch (array_type) {
3763 case kExternalByteArray:
3764 case kExternalUnsignedByteArray:
3765 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3766 break;
3767 case kExternalShortArray:
3768 case kExternalUnsignedShortArray:
3769 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3770 break;
3771 case kExternalIntArray:
3772 case kExternalUnsignedIntArray:
3773 __ str(r5, MemOperand(r3, r4, LSL, 2));
3774 break;
3775 default:
3776 UNREACHABLE();
3777 break;
3778 }
3779 }
3780 }
3781
3782 // Slow case: call runtime.
3783 __ bind(&slow);
3784
3785 // Entry registers are intact.
3786 // ---------- S t a t e --------------
3787 // -- r0 : value
3788 // -- r1 : key
3789 // -- r2 : receiver
3790 // -- lr : return address
3791 // -----------------------------------
3792
3793 // Push receiver, key and value for runtime call.
3794 __ Push(r2, r1, r0);
3795
3796 __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
3797
3798 return GetCode(flags);
3799 }
3800
3801
3205 #undef __ 3802 #undef __
3206 3803
3207 } } // namespace v8::internal 3804 } } // namespace v8::internal
3208 3805
3209 #endif // V8_TARGET_ARCH_ARM 3806 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/ic-arm.cc ('k') | src/builtins.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698