Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(289)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 6295013: Revert r6376 and r6373 which changes external array support. The ARM... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/ic-arm.cc ('k') | src/builtins.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 884 matching lines...) Expand 10 before | Expand all | Expand 10 after
895 miss); 895 miss);
896 if (result->IsFailure()) return result; 896 if (result->IsFailure()) return result;
897 } 897 }
898 ASSERT(current->IsJSObject()); 898 ASSERT(current->IsJSObject());
899 current = JSObject::cast(current->GetPrototype()); 899 current = JSObject::cast(current->GetPrototype());
900 } 900 }
901 return NULL; 901 return NULL;
902 } 902 }
903 903
904 904
905 // Convert and store int passed in register ival to IEEE 754 single precision
906 // floating point value at memory location (dst + 4 * wordoffset)
907 // If VFP3 is available use it for conversion.
908 static void StoreIntAsFloat(MacroAssembler* masm,
909 Register dst,
910 Register wordoffset,
911 Register ival,
912 Register fval,
913 Register scratch1,
914 Register scratch2) {
915 if (CpuFeatures::IsSupported(VFP3)) {
916 CpuFeatures::Scope scope(VFP3);
917 __ vmov(s0, ival);
918 __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
919 __ vcvt_f32_s32(s0, s0);
920 __ vstr(s0, scratch1, 0);
921 } else {
922 Label not_special, done;
923 // Move sign bit from source to destination. This works because the sign
924 // bit in the exponent word of the double has the same position and polarity
925 // as the 2's complement sign bit in a Smi.
926 ASSERT(kBinary32SignMask == 0x80000000u);
927
928 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
929 // Negate value if it is negative.
930 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
931
932 // We have -1, 0 or 1, which we treat specially. Register ival contains
933 // absolute value: it is either equal to 1 (special case of -1 and 1),
934 // greater than 1 (not a special case) or less than 1 (special case of 0).
935 __ cmp(ival, Operand(1));
936 __ b(gt, &not_special);
937
938 // For 1 or -1 we need to or in the 0 exponent (biased).
939 static const uint32_t exponent_word_for_1 =
940 kBinary32ExponentBias << kBinary32ExponentShift;
941
942 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
943 __ b(&done);
944
945 __ bind(&not_special);
946 // Count leading zeros.
947 // Gets the wrong answer for 0, but we already checked for that case above.
948 Register zeros = scratch2;
949 __ CountLeadingZeros(zeros, ival, scratch1);
950
951 // Compute exponent and or it into the exponent register.
952 __ rsb(scratch1,
953 zeros,
954 Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
955
956 __ orr(fval,
957 fval,
958 Operand(scratch1, LSL, kBinary32ExponentShift));
959
960 // Shift up the source chopping the top bit off.
961 __ add(zeros, zeros, Operand(1));
962 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
963 __ mov(ival, Operand(ival, LSL, zeros));
964 // And the top (top 20 bits).
965 __ orr(fval,
966 fval,
967 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
968
969 __ bind(&done);
970 __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
971 }
972 }
973
974
975 // Convert unsigned integer with specified number of leading zeroes in binary
976 // representation to IEEE 754 double.
977 // Integer to convert is passed in register hiword.
978 // Resulting double is returned in registers hiword:loword.
979 // This functions does not work correctly for 0.
980 static void GenerateUInt2Double(MacroAssembler* masm,
981 Register hiword,
982 Register loword,
983 Register scratch,
984 int leading_zeroes) {
985 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
986 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
987
988 const int mantissa_shift_for_hi_word =
989 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
990
991 const int mantissa_shift_for_lo_word =
992 kBitsPerInt - mantissa_shift_for_hi_word;
993
994 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
995 if (mantissa_shift_for_hi_word > 0) {
996 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
997 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
998 } else {
999 __ mov(loword, Operand(0, RelocInfo::NONE));
1000 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1001 }
1002
1003 // If least significant bit of biased exponent was not 1 it was corrupted
1004 // by most significant bit of mantissa so we should fix that.
1005 if (!(biased_exponent & 1)) {
1006 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1007 }
1008 }
1009
1010 905
1011 #undef __ 906 #undef __
1012 #define __ ACCESS_MASM(masm()) 907 #define __ ACCESS_MASM(masm())
1013 908
1014 909
1015 Register StubCompiler::CheckPrototypes(JSObject* object, 910 Register StubCompiler::CheckPrototypes(JSObject* object,
1016 Register object_reg, 911 Register object_reg,
1017 JSObject* holder, 912 JSObject* holder,
1018 Register holder_reg, 913 Register holder_reg,
1019 Register scratch1, 914 Register scratch1,
(...skipping 2302 matching lines...) Expand 10 before | Expand all | Expand 10 after
3322 __ bind(&generic_stub_call); 3217 __ bind(&generic_stub_call);
3323 Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); 3218 Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
3324 Handle<Code> generic_construct_stub(code); 3219 Handle<Code> generic_construct_stub(code);
3325 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); 3220 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
3326 3221
3327 // Return the generated code. 3222 // Return the generated code.
3328 return GetCode(); 3223 return GetCode();
3329 } 3224 }
3330 3225
3331 3226
3332 static bool IsElementTypeSigned(ExternalArrayType array_type) {
3333 switch (array_type) {
3334 case kExternalByteArray:
3335 case kExternalShortArray:
3336 case kExternalIntArray:
3337 return true;
3338
3339 case kExternalUnsignedByteArray:
3340 case kExternalUnsignedShortArray:
3341 case kExternalUnsignedIntArray:
3342 return false;
3343
3344 default:
3345 UNREACHABLE();
3346 return false;
3347 }
3348 }
3349
3350
3351 MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
3352 ExternalArrayType array_type, Code::Flags flags) {
3353 // ---------- S t a t e --------------
3354 // -- lr : return address
3355 // -- r0 : key
3356 // -- r1 : receiver
3357 // -----------------------------------
3358 Label slow, failed_allocation;
3359
3360 Register key = r0;
3361 Register receiver = r1;
3362
3363 // Check that the object isn't a smi
3364 __ BranchOnSmi(receiver, &slow);
3365
3366 // Check that the key is a smi.
3367 __ BranchOnNotSmi(key, &slow);
3368
3369 // Check that the object is a JS object. Load map into r2.
3370 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
3371 __ b(lt, &slow);
3372
3373 // Check that the receiver does not require access checks. We need
3374 // to check this explicitly since this generic stub does not perform
3375 // map checks.
3376 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
3377 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
3378 __ b(ne, &slow);
3379
3380 // Check that the elements array is the appropriate type of
3381 // ExternalArray.
3382 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3383 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
3384 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
3385 __ cmp(r2, ip);
3386 __ b(ne, &slow);
3387
3388 // Check that the index is in range.
3389 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3390 __ cmp(ip, Operand(key, ASR, kSmiTagSize));
3391 // Unsigned comparison catches both negative and too-large values.
3392 __ b(lo, &slow);
3393
3394 // r3: elements array
3395 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3396 // r3: base pointer of external storage
3397
3398 // We are not untagging smi key and instead work with it
3399 // as if it was premultiplied by 2.
3400 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3401
3402 Register value = r2;
3403 switch (array_type) {
3404 case kExternalByteArray:
3405 __ ldrsb(value, MemOperand(r3, key, LSR, 1));
3406 break;
3407 case kExternalUnsignedByteArray:
3408 __ ldrb(value, MemOperand(r3, key, LSR, 1));
3409 break;
3410 case kExternalShortArray:
3411 __ ldrsh(value, MemOperand(r3, key, LSL, 0));
3412 break;
3413 case kExternalUnsignedShortArray:
3414 __ ldrh(value, MemOperand(r3, key, LSL, 0));
3415 break;
3416 case kExternalIntArray:
3417 case kExternalUnsignedIntArray:
3418 __ ldr(value, MemOperand(r3, key, LSL, 1));
3419 break;
3420 case kExternalFloatArray:
3421 if (CpuFeatures::IsSupported(VFP3)) {
3422 CpuFeatures::Scope scope(VFP3);
3423 __ add(r2, r3, Operand(key, LSL, 1));
3424 __ vldr(s0, r2, 0);
3425 } else {
3426 __ ldr(value, MemOperand(r3, key, LSL, 1));
3427 }
3428 break;
3429 default:
3430 UNREACHABLE();
3431 break;
3432 }
3433
3434 // For integer array types:
3435 // r2: value
3436 // For floating-point array type
3437 // s0: value (if VFP3 is supported)
3438 // r2: value (if VFP3 is not supported)
3439
3440 if (array_type == kExternalIntArray) {
3441 // For the Int and UnsignedInt array types, we need to see whether
3442 // the value can be represented in a Smi. If not, we need to convert
3443 // it to a HeapNumber.
3444 Label box_int;
3445 __ cmp(value, Operand(0xC0000000));
3446 __ b(mi, &box_int);
3447 // Tag integer as smi and return it.
3448 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3449 __ Ret();
3450
3451 __ bind(&box_int);
3452 // Allocate a HeapNumber for the result and perform int-to-double
3453 // conversion. Don't touch r0 or r1 as they are needed if allocation
3454 // fails.
3455 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3456 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3457 // Now we can use r0 for the result as key is not needed any more.
3458 __ mov(r0, r5);
3459
3460 if (CpuFeatures::IsSupported(VFP3)) {
3461 CpuFeatures::Scope scope(VFP3);
3462 __ vmov(s0, value);
3463 __ vcvt_f64_s32(d0, s0);
3464 __ sub(r3, r0, Operand(kHeapObjectTag));
3465 __ vstr(d0, r3, HeapNumber::kValueOffset);
3466 __ Ret();
3467 } else {
3468 WriteInt32ToHeapNumberStub stub(value, r0, r3);
3469 __ TailCallStub(&stub);
3470 }
3471 } else if (array_type == kExternalUnsignedIntArray) {
3472 // The test is different for unsigned int values. Since we need
3473 // the value to be in the range of a positive smi, we can't
3474 // handle either of the top two bits being set in the value.
3475 if (CpuFeatures::IsSupported(VFP3)) {
3476 CpuFeatures::Scope scope(VFP3);
3477 Label box_int, done;
3478 __ tst(value, Operand(0xC0000000));
3479 __ b(ne, &box_int);
3480 // Tag integer as smi and return it.
3481 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3482 __ Ret();
3483
3484 __ bind(&box_int);
3485 __ vmov(s0, value);
3486 // Allocate a HeapNumber for the result and perform int-to-double
3487 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
3488 // registers - also when jumping due to exhausted young space.
3489 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3490 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3491
3492 __ vcvt_f64_u32(d0, s0);
3493 __ sub(r1, r2, Operand(kHeapObjectTag));
3494 __ vstr(d0, r1, HeapNumber::kValueOffset);
3495
3496 __ mov(r0, r2);
3497 __ Ret();
3498 } else {
3499 // Check whether unsigned integer fits into smi.
3500 Label box_int_0, box_int_1, done;
3501 __ tst(value, Operand(0x80000000));
3502 __ b(ne, &box_int_0);
3503 __ tst(value, Operand(0x40000000));
3504 __ b(ne, &box_int_1);
3505 // Tag integer as smi and return it.
3506 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3507 __ Ret();
3508
3509 Register hiword = value; // r2.
3510 Register loword = r3;
3511
3512 __ bind(&box_int_0);
3513 // Integer does not have leading zeros.
3514 GenerateUInt2Double(masm(), hiword, loword, r4, 0);
3515 __ b(&done);
3516
3517 __ bind(&box_int_1);
3518 // Integer has one leading zero.
3519 GenerateUInt2Double(masm(), hiword, loword, r4, 1);
3520
3521
3522 __ bind(&done);
3523 // Integer was converted to double in registers hiword:loword.
3524 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
3525 // clobbers all registers - also when jumping due to exhausted young
3526 // space.
3527 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3528 __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
3529
3530 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
3531 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
3532
3533 __ mov(r0, r4);
3534 __ Ret();
3535 }
3536 } else if (array_type == kExternalFloatArray) {
3537 // For the floating-point array type, we need to always allocate a
3538 // HeapNumber.
3539 if (CpuFeatures::IsSupported(VFP3)) {
3540 CpuFeatures::Scope scope(VFP3);
3541 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3542 // AllocateHeapNumber clobbers all registers - also when jumping due to
3543 // exhausted young space.
3544 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3545 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3546 __ vcvt_f64_f32(d0, s0);
3547 __ sub(r1, r2, Operand(kHeapObjectTag));
3548 __ vstr(d0, r1, HeapNumber::kValueOffset);
3549
3550 __ mov(r0, r2);
3551 __ Ret();
3552 } else {
3553 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3554 // AllocateHeapNumber clobbers all registers - also when jumping due to
3555 // exhausted young space.
3556 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3557 __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
3558 // VFP is not available, do manual single to double conversion.
3559
3560 // r2: floating point value (binary32)
3561 // r3: heap number for result
3562
3563 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
3564 // the slow case from here.
3565 __ and_(r0, value, Operand(kBinary32MantissaMask));
3566
3567 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
3568 // the slow case from here.
3569 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
3570 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3571
3572 Label exponent_rebiased;
3573 __ teq(r1, Operand(0x00));
3574 __ b(eq, &exponent_rebiased);
3575
3576 __ teq(r1, Operand(0xff));
3577 __ mov(r1, Operand(0x7ff), LeaveCC, eq);
3578 __ b(eq, &exponent_rebiased);
3579
3580 // Rebias exponent.
3581 __ add(r1,
3582 r1,
3583 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3584
3585 __ bind(&exponent_rebiased);
3586 __ and_(r2, value, Operand(kBinary32SignMask));
3587 value = no_reg;
3588 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
3589
3590 // Shift mantissa.
3591 static const int kMantissaShiftForHiWord =
3592 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3593
3594 static const int kMantissaShiftForLoWord =
3595 kBitsPerInt - kMantissaShiftForHiWord;
3596
3597 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
3598 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
3599
3600 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
3601 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
3602
3603 __ mov(r0, r3);
3604 __ Ret();
3605 }
3606
3607 } else {
3608 // Tag integer as smi and return it.
3609 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3610 __ Ret();
3611 }
3612
3613 // Slow case, key and receiver still in r0 and r1.
3614 __ bind(&slow);
3615 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
3616
3617 // ---------- S t a t e --------------
3618 // -- lr : return address
3619 // -- r0 : key
3620 // -- r1 : receiver
3621 // -----------------------------------
3622
3623 __ Push(r1, r0);
3624
3625 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
3626
3627 return GetCode(flags);
3628 }
3629
3630
3631 MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
3632 ExternalArrayType array_type, Code::Flags flags) {
3633 // ---------- S t a t e --------------
3634 // -- r0 : value
3635 // -- r1 : key
3636 // -- r2 : receiver
3637 // -- lr : return address
3638 // -----------------------------------
3639 Label slow, check_heap_number;
3640
3641 // Register usage.
3642 Register value = r0;
3643 Register key = r1;
3644 Register receiver = r2;
3645 // r3 mostly holds the elements array or the destination external array.
3646
3647 // Check that the object isn't a smi.
3648 __ BranchOnSmi(receiver, &slow);
3649
3650 // Check that the object is a JS object. Load map into r3.
3651 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
3652 __ b(le, &slow);
3653
3654 // Check that the receiver does not require access checks. We need
3655 // to do this because this generic stub does not perform map checks.
3656 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
3657 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
3658 __ b(ne, &slow);
3659
3660 // Check that the key is a smi.
3661 __ BranchOnNotSmi(key, &slow);
3662
3663 // Check that the elements array is the appropriate type of ExternalArray.
3664 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3665 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
3666 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
3667 __ cmp(r4, ip);
3668 __ b(ne, &slow);
3669
3670 // Check that the index is in range.
3671 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
3672 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3673 __ cmp(r4, ip);
3674 // Unsigned comparison catches both negative and too-large values.
3675 __ b(hs, &slow);
3676
3677 // Handle both smis and HeapNumbers in the fast path. Go to the
3678 // runtime for all other kinds of values.
3679 // r3: external array.
3680 // r4: key (integer).
3681 __ BranchOnNotSmi(value, &check_heap_number);
3682 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
3683 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3684
3685 // r3: base pointer of external storage.
3686 // r4: key (integer).
3687 // r5: value (integer).
3688 switch (array_type) {
3689 case kExternalByteArray:
3690 case kExternalUnsignedByteArray:
3691 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3692 break;
3693 case kExternalShortArray:
3694 case kExternalUnsignedShortArray:
3695 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3696 break;
3697 case kExternalIntArray:
3698 case kExternalUnsignedIntArray:
3699 __ str(r5, MemOperand(r3, r4, LSL, 2));
3700 break;
3701 case kExternalFloatArray:
3702 // Perform int-to-float conversion and store to memory.
3703 StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
3704 break;
3705 default:
3706 UNREACHABLE();
3707 break;
3708 }
3709
3710 // Entry registers are intact, r0 holds the value which is the return value.
3711 __ Ret();
3712
3713
3714 // r3: external array.
3715 // r4: index (integer).
3716 __ bind(&check_heap_number);
3717 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
3718 __ b(ne, &slow);
3719
3720 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3721
3722 // r3: base pointer of external storage.
3723 // r4: key (integer).
3724
3725 // The WebGL specification leaves the behavior of storing NaN and
3726 // +/-Infinity into integer arrays basically undefined. For more
3727 // reproducible behavior, convert these to zero.
3728 if (CpuFeatures::IsSupported(VFP3)) {
3729 CpuFeatures::Scope scope(VFP3);
3730
3731
3732 if (array_type == kExternalFloatArray) {
3733 // vldr requires offset to be a multiple of 4 so we can not
3734 // include -kHeapObjectTag into it.
3735 __ sub(r5, r0, Operand(kHeapObjectTag));
3736 __ vldr(d0, r5, HeapNumber::kValueOffset);
3737 __ add(r5, r3, Operand(r4, LSL, 2));
3738 __ vcvt_f32_f64(s0, d0);
3739 __ vstr(s0, r5, 0);
3740 } else {
3741 // Need to perform float-to-int conversion.
3742 // Test for NaN or infinity (both give zero).
3743 __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
3744
3745 // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
3746 // include -kHeapObjectTag into it.
3747 __ sub(r5, r0, Operand(kHeapObjectTag));
3748 __ vldr(d0, r5, HeapNumber::kValueOffset);
3749
3750 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
3751 // NaNs and Infinities have all-one exponents so they sign extend to -1.
3752 __ cmp(r6, Operand(-1));
3753 __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
3754
3755 // Not infinity or NaN simply convert to int.
3756 if (IsElementTypeSigned(array_type)) {
3757 __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
3758 } else {
3759 __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
3760 }
3761 __ vmov(r5, s0, ne);
3762
3763 switch (array_type) {
3764 case kExternalByteArray:
3765 case kExternalUnsignedByteArray:
3766 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3767 break;
3768 case kExternalShortArray:
3769 case kExternalUnsignedShortArray:
3770 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3771 break;
3772 case kExternalIntArray:
3773 case kExternalUnsignedIntArray:
3774 __ str(r5, MemOperand(r3, r4, LSL, 2));
3775 break;
3776 default:
3777 UNREACHABLE();
3778 break;
3779 }
3780 }
3781
3782 // Entry registers are intact, r0 holds the value which is the return value.
3783 __ Ret();
3784 } else {
3785 // VFP3 is not available do manual conversions.
3786 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
3787 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
3788
3789 if (array_type == kExternalFloatArray) {
3790 Label done, nan_or_infinity_or_zero;
3791 static const int kMantissaInHiWordShift =
3792 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3793
3794 static const int kMantissaInLoWordShift =
3795 kBitsPerInt - kMantissaInHiWordShift;
3796
3797 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3798 // and infinities. All these should be converted to 0.
3799 __ mov(r7, Operand(HeapNumber::kExponentMask));
3800 __ and_(r9, r5, Operand(r7), SetCC);
3801 __ b(eq, &nan_or_infinity_or_zero);
3802
3803 __ teq(r9, Operand(r7));
3804 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
3805 __ b(eq, &nan_or_infinity_or_zero);
3806
3807 // Rebias exponent.
3808 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3809 __ add(r9,
3810 r9,
3811 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
3812
3813 __ cmp(r9, Operand(kBinary32MaxExponent));
3814 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
3815 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
3816 __ b(gt, &done);
3817
3818 __ cmp(r9, Operand(kBinary32MinExponent));
3819 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
3820 __ b(lt, &done);
3821
3822 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3823 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3824 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
3825 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
3826 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
3827
3828 __ bind(&done);
3829 __ str(r5, MemOperand(r3, r4, LSL, 2));
3830 // Entry registers are intact, r0 holds the value which is the return
3831 // value.
3832 __ Ret();
3833
3834 __ bind(&nan_or_infinity_or_zero);
3835 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3836 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3837 __ orr(r9, r9, r7);
3838 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
3839 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
3840 __ b(&done);
3841 } else {
3842 bool is_signed_type = IsElementTypeSigned(array_type);
3843 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
3844 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
3845
3846 Label done, sign;
3847
3848 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3849 // and infinities. All these should be converted to 0.
3850 __ mov(r7, Operand(HeapNumber::kExponentMask));
3851 __ and_(r9, r5, Operand(r7), SetCC);
3852 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3853 __ b(eq, &done);
3854
3855 __ teq(r9, Operand(r7));
3856 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3857 __ b(eq, &done);
3858
3859 // Unbias exponent.
3860 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3861 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
3862 // If exponent is negative than result is 0.
3863 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
3864 __ b(mi, &done);
3865
3866 // If exponent is too big than result is minimal value.
3867 __ cmp(r9, Operand(meaningfull_bits - 1));
3868 __ mov(r5, Operand(min_value), LeaveCC, ge);
3869 __ b(ge, &done);
3870
3871 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
3872 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3873 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
3874
3875 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
3876 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
3877 __ b(pl, &sign);
3878
3879 __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
3880 __ mov(r5, Operand(r5, LSL, r9));
3881 __ rsb(r9, r9, Operand(meaningfull_bits));
3882 __ orr(r5, r5, Operand(r6, LSR, r9));
3883
3884 __ bind(&sign);
3885 __ teq(r7, Operand(0, RelocInfo::NONE));
3886 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
3887
3888 __ bind(&done);
3889 switch (array_type) {
3890 case kExternalByteArray:
3891 case kExternalUnsignedByteArray:
3892 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3893 break;
3894 case kExternalShortArray:
3895 case kExternalUnsignedShortArray:
3896 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3897 break;
3898 case kExternalIntArray:
3899 case kExternalUnsignedIntArray:
3900 __ str(r5, MemOperand(r3, r4, LSL, 2));
3901 break;
3902 default:
3903 UNREACHABLE();
3904 break;
3905 }
3906 }
3907 }
3908
3909 // Slow case: call runtime.
3910 __ bind(&slow);
3911
3912 // Entry registers are intact.
3913 // ---------- S t a t e --------------
3914 // -- r0 : value
3915 // -- r1 : key
3916 // -- r2 : receiver
3917 // -- lr : return address
3918 // -----------------------------------
3919
3920 // Push receiver, key and value for runtime call.
3921 __ Push(r2, r1, r0);
3922
3923 __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
3924
3925 return GetCode(flags);
3926 }
3927
3928
3929 #undef __ 3227 #undef __
3930 3228
3931 } } // namespace v8::internal 3229 } } // namespace v8::internal
3932 3230
3933 #endif // V8_TARGET_ARCH_ARM 3231 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/ic-arm.cc ('k') | src/builtins.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698