OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 7894 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7905 __ and_(r2, r2, Operand(0x1f)); | 7905 __ and_(r2, r2, Operand(0x1f)); |
7906 __ mov(r2, Operand(r3, ASR, r2)); | 7906 __ mov(r2, Operand(r3, ASR, r2)); |
7907 break; | 7907 break; |
7908 case Token::SHR: | 7908 case Token::SHR: |
7909 // Use only the 5 least significant bits of the shift count. | 7909 // Use only the 5 least significant bits of the shift count. |
7910 __ and_(r2, r2, Operand(0x1f)); | 7910 __ and_(r2, r2, Operand(0x1f)); |
7911 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 7911 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
7912 // SHR is special because it is required to produce a positive answer. | 7912 // SHR is special because it is required to produce a positive answer. |
7913 // The code below for writing into heap numbers isn't capable of writing | 7913 // The code below for writing into heap numbers isn't capable of writing |
7914 // the register as an unsigned int so we go to slow case if we hit this | 7914 // the register as an unsigned int so we go to slow case if we hit this |
7915 // case. | 7915 // case. |
Erik Corry
2010/07/01 14:56:47
If we have VFP3 we don't have to bail out here. W
| |
7916 __ b(mi, &slow); | 7916 __ b(mi, &slow); |
7917 break; | 7917 break; |
7918 case Token::SHL: | 7918 case Token::SHL: |
7919 // Use only the 5 least significant bits of the shift count. | 7919 // Use only the 5 least significant bits of the shift count. |
7920 __ and_(r2, r2, Operand(0x1f)); | 7920 __ and_(r2, r2, Operand(0x1f)); |
7921 __ mov(r2, Operand(r3, LSL, r2)); | 7921 __ mov(r2, Operand(r3, LSL, r2)); |
7922 break; | 7922 break; |
7923 default: UNREACHABLE(); | 7923 default: UNREACHABLE(); |
7924 } | 7924 } |
7925 // check that the *signed* result fits in a smi | 7925 // check that the *signed* result fits in a smi |
(...skipping 24 matching lines...) Expand all Loading... | |
7950 default: break; | 7950 default: break; |
7951 } | 7951 } |
7952 __ bind(&got_a_heap_number); | 7952 __ bind(&got_a_heap_number); |
7953 // r2: Answer as signed int32. | 7953 // r2: Answer as signed int32. |
7954 // r5: Heap number to write answer into. | 7954 // r5: Heap number to write answer into. |
7955 | 7955 |
7956 // Nothing can go wrong now, so move the heap number to r0, which is the | 7956 // Nothing can go wrong now, so move the heap number to r0, which is the |
7957 // result. | 7957 // result. |
7958 __ mov(r0, Operand(r5)); | 7958 __ mov(r0, Operand(r5)); |
7959 | 7959 |
7960 // Tail call that writes the int32 in r2 to the heap number in r0, using | 7960 if (CpuFeatures::IsSupported(VFP3)) { |
7961 // r3 as scratch. r0 is preserved and returned. | 7961 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. |
7962 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 7962 CpuFeatures::Scope scope(VFP3); |
7963 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 7963 __ vmov(s0, r2); |
7964 __ vcvt_f64_s32(d0, s0); | |
7965 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
7966 __ vstr(d0, r3, HeapNumber::kValueOffset); | |
7967 __ Ret(); | |
7968 } else { | |
7969 // Tail call that writes the int32 in r2 to the heap number in r0, using | |
7970 // r3 as scratch. r0 is preserved and returned. | |
7971 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | |
7972 __ TailCallStub(&stub); | |
7973 } | |
7964 | 7974 |
7965 if (mode_ != NO_OVERWRITE) { | 7975 if (mode_ != NO_OVERWRITE) { |
7966 __ bind(&have_to_allocate); | 7976 __ bind(&have_to_allocate); |
7967 // Get a new heap number in r5. r4 and r7 are scratch. | 7977 // Get a new heap number in r5. r4 and r7 are scratch. |
7968 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 7978 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
7969 __ jmp(&got_a_heap_number); | 7979 __ jmp(&got_a_heap_number); |
7970 } | 7980 } |
7971 | 7981 |
7972 // If all else failed then we go to the runtime system. | 7982 // If all else failed then we go to the runtime system. |
7973 __ bind(&slow); | 7983 __ bind(&slow); |
(...skipping 828 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
8802 | 8812 |
8803 __ bind(&try_float); | 8813 __ bind(&try_float); |
8804 if (!overwrite_) { | 8814 if (!overwrite_) { |
8805 // Allocate a fresh heap number, but don't overwrite r0 until | 8815 // Allocate a fresh heap number, but don't overwrite r0 until |
8806 // we're sure we can do it without going through the slow case | 8816 // we're sure we can do it without going through the slow case |
8807 // that needs the value in r0. | 8817 // that needs the value in r0. |
8808 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | 8818 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
8809 __ mov(r0, Operand(r2)); | 8819 __ mov(r0, Operand(r2)); |
8810 } | 8820 } |
8811 | 8821 |
8812 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | 8822 if (CpuFeatures::IsSupported(VFP3)) { |
8813 // have to set up a frame. | 8823 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. |
8814 WriteInt32ToHeapNumberStub stub(r1, r0, r2); | 8824 CpuFeatures::Scope scope(VFP3); |
8815 __ push(lr); | 8825 __ vmov(s0, r1); |
8816 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | 8826 __ vcvt_f64_s32(d0, s0); |
8817 __ pop(lr); | 8827 __ sub(r2, r0, Operand(kHeapObjectTag)); |
8828 __ vstr(d0, r2, HeapNumber::kValueOffset); | |
8829 } else { | |
8830 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | |
8831 // have to set up a frame. | |
8832 WriteInt32ToHeapNumberStub stub(r1, r0, r2); | |
8833 __ push(lr); | |
8834 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | |
8835 __ pop(lr); | |
8836 } | |
8818 } else { | 8837 } else { |
8819 UNIMPLEMENTED(); | 8838 UNIMPLEMENTED(); |
8820 } | 8839 } |
8821 | 8840 |
8822 __ bind(&done); | 8841 __ bind(&done); |
8823 __ StubReturn(1); | 8842 __ StubReturn(1); |
8824 | 8843 |
8825 // Handle the slow case by jumping to the JavaScript builtin. | 8844 // Handle the slow case by jumping to the JavaScript builtin. |
8826 __ bind(&slow); | 8845 __ bind(&slow); |
8827 __ push(r0); | 8846 __ push(r0); |
(...skipping 2214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
11042 __ bind(&string_add_runtime); | 11061 __ bind(&string_add_runtime); |
11043 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 11062 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
11044 } | 11063 } |
11045 | 11064 |
11046 | 11065 |
11047 #undef __ | 11066 #undef __ |
11048 | 11067 |
11049 } } // namespace v8::internal | 11068 } } // namespace v8::internal |
11050 | 11069 |
11051 #endif // V8_TARGET_ARCH_ARM | 11070 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |