OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. | 2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
(...skipping 603 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
614 // We check the value as if it was a uint32 against the m_fastAccess
Cutoff - which will always fail if | 614 // We check the value as if it was a uint32 against the m_fastAccess
Cutoff - which will always fail if |
615 // number was signed since m_fastAccessCutoff is always less than in
tmax (since the total allocation | 615 // number was signed since m_fastAccessCutoff is always less than in
tmax (since the total allocation |
616 // size is always less than 4Gb). As such zero extending wil have b
een correct (and extending the value | 616 // size is always less than 4Gb). As such zero extending wil have b
een correct (and extending the value |
617 // to 64-bits is necessary since it's used in the address calculatio
n. We zero extend rather than sign | 617 // to 64-bits is necessary since it's used in the address calculatio
n. We zero extend rather than sign |
618 // extending since it makes it easier to re-tag the value in the slo
w case. | 618 // extending since it makes it easier to re-tag the value in the slo
w case. |
619 zeroExtend32ToPtr(regT1, regT1); | 619 zeroExtend32ToPtr(regT1, regT1); |
620 #else | 620 #else |
621 emitFastArithImmToInt(regT1); | 621 emitFastArithImmToInt(regT1); |
622 #endif | 622 #endif |
623 emitJumpSlowCaseIfNotJSCell(regT0); | 623 emitJumpSlowCaseIfNotJSCell(regT0); |
624 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_interpreter
->m_jsArrayVptr))); | 624 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData-
>jsArrayVPtr))); |
625 | 625 |
626 // This is an array; get the m_storage pointer into ecx, then check
if the index is below the fast cutoff | 626 // This is an array; get the m_storage pointer into ecx, then check
if the index is below the fast cutoff |
627 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); | 627 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); |
628 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSE
T(JSArray, m_fastAccessCutoff)))); | 628 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSE
T(JSArray, m_fastAccessCutoff)))); |
629 | 629 |
630 // Get the value from the vector | 630 // Get the value from the vector |
631 loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage,
m_vector[0])), regT0); | 631 loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage,
m_vector[0])), regT0); |
632 emitPutVirtualRegister(currentInstruction[1].u.operand); | 632 emitPutVirtualRegister(currentInstruction[1].u.operand); |
633 NEXT_OPCODE(op_get_by_val); | 633 NEXT_OPCODE(op_get_by_val); |
634 } | 634 } |
(...skipping 12 matching lines...) Expand all Loading... |
647 case op_put_by_val: { | 647 case op_put_by_val: { |
648 emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, curr
entInstruction[2].u.operand, regT1); | 648 emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, curr
entInstruction[2].u.operand, regT1); |
649 emitJumpSlowCaseIfNotImmediateInteger(regT1); | 649 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
650 #if USE(ALTERNATE_JSIMMEDIATE) | 650 #if USE(ALTERNATE_JSIMMEDIATE) |
651 // See comment in op_get_by_val. | 651 // See comment in op_get_by_val. |
652 zeroExtend32ToPtr(regT1, regT1); | 652 zeroExtend32ToPtr(regT1, regT1); |
653 #else | 653 #else |
654 emitFastArithImmToInt(regT1); | 654 emitFastArithImmToInt(regT1); |
655 #endif | 655 #endif |
656 emitJumpSlowCaseIfNotJSCell(regT0); | 656 emitJumpSlowCaseIfNotJSCell(regT0); |
657 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_interpreter
->m_jsArrayVptr))); | 657 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData-
>jsArrayVPtr))); |
658 | 658 |
659 // This is an array; get the m_storage pointer into ecx, then check
if the index is below the fast cutoff | 659 // This is an array; get the m_storage pointer into ecx, then check
if the index is below the fast cutoff |
660 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); | 660 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); |
661 Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFS
ET(JSArray, m_fastAccessCutoff))); | 661 Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFS
ET(JSArray, m_fastAccessCutoff))); |
662 // No; oh well, check if the access if within the vector - if so, we
may still be okay. | 662 // No; oh well, check if the access if within the vector - if so, we
may still be okay. |
663 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSE
T(ArrayStorage, m_vectorLength)))); | 663 addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSE
T(ArrayStorage, m_vectorLength)))); |
664 | 664 |
665 // This is a write to the slow part of the vector; first, we have to
check if this would be the first write to this location. | 665 // This is a write to the slow part of the vector; first, we have to
check if this would be the first write to this location. |
666 // FIXME: should be able to handle initial write to array; increment
the the number of items in the array, and potentially update fast access cutoff
. | 666 // FIXME: should be able to handle initial write to array; increment
the the number of items in the array, and potentially update fast access cutoff
. |
667 addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FI
ELD_OFFSET(ArrayStorage, m_vector[0])))); | 667 addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FI
ELD_OFFSET(ArrayStorage, m_vector[0])))); |
(...skipping 996 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1664 emitCTICall(JITStubs::cti_register_file_check); | 1664 emitCTICall(JITStubs::cti_register_file_check); |
1665 #ifndef NDEBUG | 1665 #ifndef NDEBUG |
1666 // reset this, in order to guard it's use with asserts | 1666 // reset this, in order to guard it's use with asserts |
1667 m_bytecodeIndex = (unsigned)-1; | 1667 m_bytecodeIndex = (unsigned)-1; |
1668 #endif | 1668 #endif |
1669 jump(afterRegisterFileCheck); | 1669 jump(afterRegisterFileCheck); |
1670 } | 1670 } |
1671 | 1671 |
1672 ASSERT(m_jmpTable.isEmpty()); | 1672 ASSERT(m_jmpTable.isEmpty()); |
1673 | 1673 |
1674 RefPtr<ExecutablePool> allocator = m_globalData->poolForSize(m_assembler.siz
e()); | 1674 RefPtr<ExecutablePool> allocator = m_globalData->executableAllocator.poolFor
Size(m_assembler.size()); |
1675 void* code = m_assembler.executableCopy(allocator.get()); | 1675 void* code = m_assembler.executableCopy(allocator.get()); |
1676 JITCodeRef codeRef(code, allocator); | 1676 JITCodeRef codeRef(code, allocator); |
1677 #ifndef NDEBUG | 1677 #ifndef NDEBUG |
1678 codeRef.codeSize = m_assembler.size(); | 1678 codeRef.codeSize = m_assembler.size(); |
1679 #endif | 1679 #endif |
1680 | 1680 |
1681 PatchBuffer patchBuffer(code); | 1681 PatchBuffer patchBuffer(code); |
1682 | 1682 |
1683 // Translate vPC offsets into addresses in JIT generated code, for switch ta
bles. | 1683 // Translate vPC offsets into addresses in JIT generated code, for switch ta
bles. |
1684 for (unsigned i = 0; i < m_switches.size(); ++i) { | 1684 for (unsigned i = 0; i < m_switches.size(); ++i) { |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1741 info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructure
StubCompilationInfo[i].callReturnLocation); | 1741 info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructure
StubCompilationInfo[i].callReturnLocation); |
1742 info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilatio
nInfo[i].hotPathBegin); | 1742 info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilatio
nInfo[i].hotPathBegin); |
1743 info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCo
mpilationInfo[i].hotPathOther); | 1743 info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCo
mpilationInfo[i].hotPathOther); |
1744 info.coldPathOther = patchBuffer.locationOf(m_callStructureStubCompilati
onInfo[i].coldPathOther); | 1744 info.coldPathOther = patchBuffer.locationOf(m_callStructureStubCompilati
onInfo[i].coldPathOther); |
1745 } | 1745 } |
1746 #endif | 1746 #endif |
1747 | 1747 |
1748 m_codeBlock->setJITCode(codeRef); | 1748 m_codeBlock->setJITCode(codeRef); |
1749 } | 1749 } |
1750 | 1750 |
1751 void JIT::privateCompileCTIMachineTrampolines() | 1751 void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
Pool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void**
ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall) |
1752 { | 1752 { |
1753 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) | 1753 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
1754 // (1) The first function provides fast property access for array length | 1754 // (1) The first function provides fast property access for array length |
1755 Label arrayLengthBegin = align(); | 1755 Label arrayLengthBegin = align(); |
1756 | 1756 |
1757 // Check eax is an array | 1757 // Check eax is an array |
1758 Jump array_failureCases1 = emitJumpIfNotJSCell(regT0); | 1758 Jump array_failureCases1 = emitJumpIfNotJSCell(regT0); |
1759 Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_inte
rpreter->m_jsArrayVptr)); | 1759 Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_glob
alData->jsArrayVPtr)); |
1760 | 1760 |
1761 // Checks out okay! - get the length from the storage | 1761 // Checks out okay! - get the length from the storage |
1762 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT0); | 1762 loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT0); |
1763 load32(Address(regT0, FIELD_OFFSET(ArrayStorage, m_length)), regT0); | 1763 load32(Address(regT0, FIELD_OFFSET(ArrayStorage, m_length)), regT0); |
1764 | 1764 |
1765 Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImme
diateInt)); | 1765 Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImme
diateInt)); |
1766 | 1766 |
1767 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't
need sign extend here. | 1767 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't
need sign extend here. |
1768 emitFastArithIntToImmNoCheck(regT0, regT0); | 1768 emitFastArithIntToImmNoCheck(regT0, regT0); |
1769 | 1769 |
1770 ret(); | 1770 ret(); |
1771 | 1771 |
1772 // (2) The second function provides fast property access for string length | 1772 // (2) The second function provides fast property access for string length |
1773 Label stringLengthBegin = align(); | 1773 Label stringLengthBegin = align(); |
1774 | 1774 |
1775 // Check eax is a string | 1775 // Check eax is a string |
1776 Jump string_failureCases1 = emitJumpIfNotJSCell(regT0); | 1776 Jump string_failureCases1 = emitJumpIfNotJSCell(regT0); |
1777 Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_int
erpreter->m_jsStringVptr)); | 1777 Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_glo
balData->jsStringVPtr)); |
1778 | 1778 |
1779 // Checks out okay! - get the length from the Ustring. | 1779 // Checks out okay! - get the length from the Ustring. |
1780 loadPtr(Address(regT0, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UStrin
g, m_rep)), regT0); | 1780 loadPtr(Address(regT0, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UStrin
g, m_rep)), regT0); |
1781 load32(Address(regT0, FIELD_OFFSET(UString::Rep, len)), regT0); | 1781 load32(Address(regT0, FIELD_OFFSET(UString::Rep, len)), regT0); |
1782 | 1782 |
1783 Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImm
ediateInt)); | 1783 Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImm
ediateInt)); |
1784 | 1784 |
1785 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't
need sign extend here. | 1785 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't
need sign extend here. |
1786 emitFastArithIntToImmNoCheck(regT0, regT0); | 1786 emitFastArithIntToImmNoCheck(regT0, regT0); |
1787 | 1787 |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1905 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) | 1905 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
1906 Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1); | 1906 Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1); |
1907 Call array_failureCases2Call = makeTailRecursiveCall(array_failureCases2); | 1907 Call array_failureCases2Call = makeTailRecursiveCall(array_failureCases2); |
1908 Call array_failureCases3Call = makeTailRecursiveCall(array_failureCases3); | 1908 Call array_failureCases3Call = makeTailRecursiveCall(array_failureCases3); |
1909 Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1); | 1909 Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1); |
1910 Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2); | 1910 Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2); |
1911 Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3); | 1911 Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3); |
1912 #endif | 1912 #endif |
1913 | 1913 |
1914 // All trampolines constructed! copy the code, link up calls, and set the po
inters on the Machine object. | 1914 // All trampolines constructed! copy the code, link up calls, and set the po
inters on the Machine object. |
1915 m_interpreter->m_executablePool = m_globalData->poolForSize(m_assembler.size
()); | 1915 *executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.
size()); |
1916 void* code = m_assembler.executableCopy(m_interpreter->m_executablePool.get(
)); | 1916 void* code = m_assembler.executableCopy((*executablePool).get()); |
| 1917 |
1917 PatchBuffer patchBuffer(code); | 1918 PatchBuffer patchBuffer(code); |
1918 | |
1919 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) | 1919 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
1920 patchBuffer.link(array_failureCases1Call, JITStubs::cti_op_get_by_id_array_f
ail); | 1920 patchBuffer.link(array_failureCases1Call, JITStubs::cti_op_get_by_id_array_f
ail); |
1921 patchBuffer.link(array_failureCases2Call, JITStubs::cti_op_get_by_id_array_f
ail); | 1921 patchBuffer.link(array_failureCases2Call, JITStubs::cti_op_get_by_id_array_f
ail); |
1922 patchBuffer.link(array_failureCases3Call, JITStubs::cti_op_get_by_id_array_f
ail); | 1922 patchBuffer.link(array_failureCases3Call, JITStubs::cti_op_get_by_id_array_f
ail); |
1923 patchBuffer.link(string_failureCases1Call, JITStubs::cti_op_get_by_id_string
_fail); | 1923 patchBuffer.link(string_failureCases1Call, JITStubs::cti_op_get_by_id_string
_fail); |
1924 patchBuffer.link(string_failureCases2Call, JITStubs::cti_op_get_by_id_string
_fail); | 1924 patchBuffer.link(string_failureCases2Call, JITStubs::cti_op_get_by_id_string
_fail); |
1925 patchBuffer.link(string_failureCases3Call, JITStubs::cti_op_get_by_id_string
_fail); | 1925 patchBuffer.link(string_failureCases3Call, JITStubs::cti_op_get_by_id_string
_fail); |
1926 | 1926 |
1927 m_interpreter->m_ctiArrayLengthTrampoline = patchBuffer.trampolineAt(arrayLe
ngthBegin); | 1927 *ctiArrayLengthTrampoline = patchBuffer.trampolineAt(arrayLengthBegin); |
1928 m_interpreter->m_ctiStringLengthTrampoline = patchBuffer.trampolineAt(string
LengthBegin); | 1928 *ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin); |
| 1929 #else |
| 1930 UNUSED_PARAM(ctiArrayLengthTrampoline); |
| 1931 UNUSED_PARAM(ctiStringLengthTrampoline); |
1929 #endif | 1932 #endif |
1930 patchBuffer.link(callArityCheck1, JITStubs::cti_op_call_arityCheck); | 1933 patchBuffer.link(callArityCheck1, JITStubs::cti_op_call_arityCheck); |
1931 patchBuffer.link(callArityCheck2, JITStubs::cti_op_call_arityCheck); | 1934 patchBuffer.link(callArityCheck2, JITStubs::cti_op_call_arityCheck); |
1932 patchBuffer.link(callArityCheck3, JITStubs::cti_op_call_arityCheck); | 1935 patchBuffer.link(callArityCheck3, JITStubs::cti_op_call_arityCheck); |
1933 patchBuffer.link(callJSFunction1, JITStubs::cti_op_call_JSFunction); | 1936 patchBuffer.link(callJSFunction1, JITStubs::cti_op_call_JSFunction); |
1934 patchBuffer.link(callJSFunction2, JITStubs::cti_op_call_JSFunction); | 1937 patchBuffer.link(callJSFunction2, JITStubs::cti_op_call_JSFunction); |
1935 patchBuffer.link(callJSFunction3, JITStubs::cti_op_call_JSFunction); | 1938 patchBuffer.link(callJSFunction3, JITStubs::cti_op_call_JSFunction); |
1936 patchBuffer.link(callDontLazyLinkCall, JITStubs::cti_vm_dontLazyLinkCall); | 1939 patchBuffer.link(callDontLazyLinkCall, JITStubs::cti_vm_dontLazyLinkCall); |
1937 patchBuffer.link(callLazyLinkCall, JITStubs::cti_vm_lazyLinkCall); | 1940 patchBuffer.link(callLazyLinkCall, JITStubs::cti_vm_lazyLinkCall); |
1938 | 1941 |
1939 m_interpreter->m_ctiVirtualCallPreLink = patchBuffer.trampolineAt(virtualCal
lPreLinkBegin); | 1942 *ctiVirtualCallPreLink = patchBuffer.trampolineAt(virtualCallPreLinkBegin); |
1940 m_interpreter->m_ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLi
nkBegin); | 1943 *ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin); |
1941 m_interpreter->m_ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin)
; | 1944 *ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin); |
1942 } | 1945 } |
1943 | 1946 |
1944 void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, Re
gisterID dst) | 1947 void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, Re
gisterID dst) |
1945 { | 1948 { |
1946 loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), dst); | 1949 loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), dst); |
1947 loadPtr(Address(dst, FIELD_OFFSET(JSVariableObject::JSVariableObjectData, re
gisters)), dst); | 1950 loadPtr(Address(dst, FIELD_OFFSET(JSVariableObject::JSVariableObjectData, re
gisters)), dst); |
1948 loadPtr(Address(dst, index * sizeof(Register)), dst); | 1951 loadPtr(Address(dst, index * sizeof(Register)), dst); |
1949 } | 1952 } |
1950 | 1953 |
1951 void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObjec
t, int index) | 1954 void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObjec
t, int index) |
1952 { | 1955 { |
1953 loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), variable
Object); | 1956 loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), variable
Object); |
1954 loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject::JSVariableObj
ectData, registers)), variableObject); | 1957 loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject::JSVariableObj
ectData, registers)), variableObject); |
1955 storePtr(src, Address(variableObject, index * sizeof(Register))); | 1958 storePtr(src, Address(variableObject, index * sizeof(Register))); |
1956 } | 1959 } |
1957 | 1960 |
1958 } // namespace JSC | 1961 } // namespace JSC |
1959 | 1962 |
1960 #endif // ENABLE(JIT) | 1963 #endif // ENABLE(JIT) |
1961 | 1964 |
1962 | 1965 |
OLD | NEW |