| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | 23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | 24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | 25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | 26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | 27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | 30 |
| 31 // The original source code covered by the above license above has been | 31 // The original source code covered by the above license above has been |
| 32 // modified significantly by Google Inc. | 32 // modified significantly by Google Inc. |
| 33 // Copyright 2010 the V8 project authors. All rights reserved. | 33 // Copyright 2011 the V8 project authors. All rights reserved. |
| 34 | 34 |
| 35 // A light-weight IA32 Assembler. | 35 // A light-weight IA32 Assembler. |
| 36 | 36 |
| 37 #ifndef V8_IA32_ASSEMBLER_IA32_H_ | 37 #ifndef V8_IA32_ASSEMBLER_IA32_H_ |
| 38 #define V8_IA32_ASSEMBLER_IA32_H_ | 38 #define V8_IA32_ASSEMBLER_IA32_H_ |
| 39 | 39 |
| 40 #include "serialize.h" | 40 #include "serialize.h" |
| 41 | 41 |
| 42 namespace v8 { | 42 namespace v8 { |
| 43 namespace internal { | 43 namespace internal { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 57 // | 57 // |
| 58 // 3) By not using an enum, we are possibly preventing the compiler from | 58 // 3) By not using an enum, we are possibly preventing the compiler from |
| 59 // doing certain constant folds, which may significantly reduce the | 59 // doing certain constant folds, which may significantly reduce the |
| 60 // code generated for some assembly instructions (because they boil down | 60 // code generated for some assembly instructions (because they boil down |
| 61 // to a few constants). If this is a problem, we could change the code | 61 // to a few constants). If this is a problem, we could change the code |
| 62 // such that we use an enum in optimized mode, and the struct in debug | 62 // such that we use an enum in optimized mode, and the struct in debug |
| 63 // mode. This way we get the compile-time error checking in debug mode | 63 // mode. This way we get the compile-time error checking in debug mode |
| 64 // and best performance in optimized code. | 64 // and best performance in optimized code. |
| 65 // | 65 // |
| 66 struct Register { | 66 struct Register { |
| 67 static const int kNumAllocatableRegisters = 5; | 67 static const int kNumAllocatableRegisters = 6; |
| 68 static const int kNumRegisters = 8; | 68 static const int kNumRegisters = 8; |
| 69 | 69 |
| 70 static int ToAllocationIndex(Register reg) { | 70 static inline const char* AllocationIndexToString(int index); |
| 71 ASSERT(reg.code() < 4 || reg.code() == 7); | |
| 72 return (reg.code() == 7) ? 4 : reg.code(); | |
| 73 } | |
| 74 | 71 |
| 75 static Register FromAllocationIndex(int index) { | 72 static inline int ToAllocationIndex(Register reg); |
| 76 ASSERT(index >= 0 && index < kNumAllocatableRegisters); | |
| 77 return (index == 4) ? from_code(7) : from_code(index); | |
| 78 } | |
| 79 | 73 |
| 80 static const char* AllocationIndexToString(int index) { | 74 static inline Register FromAllocationIndex(int index); |
| 81 ASSERT(index >= 0 && index < kNumAllocatableRegisters); | |
| 82 const char* const names[] = { | |
| 83 "eax", | |
| 84 "ecx", | |
| 85 "edx", | |
| 86 "ebx", | |
| 87 "edi" | |
| 88 }; | |
| 89 return names[index]; | |
| 90 } | |
| 91 | 75 |
| 92 static Register from_code(int code) { | 76 static Register from_code(int code) { |
| 93 ASSERT(code >= 0); | 77 ASSERT(code >= 0); |
| 94 ASSERT(code < kNumRegisters); | 78 ASSERT(code < kNumRegisters); |
| 95 Register r = { code }; | 79 Register r = { code }; |
| 96 return r; | 80 return r; |
| 97 } | 81 } |
| 98 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } | 82 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } |
| 99 bool is(Register reg) const { return code_ == reg.code_; } | 83 bool is(Register reg) const { return code_ == reg.code_; } |
| 100 // eax, ebx, ecx and edx are byte registers, the rest are not. | 84 // eax, ebx, ecx and edx are byte registers, the rest are not. |
| 101 bool is_byte_register() const { return code_ <= 3; } | 85 bool is_byte_register() const { return code_ <= 3; } |
| 102 int code() const { | 86 int code() const { |
| 103 ASSERT(is_valid()); | 87 ASSERT(is_valid()); |
| 104 return code_; | 88 return code_; |
| 105 } | 89 } |
| 106 int bit() const { | 90 int bit() const { |
| 107 ASSERT(is_valid()); | 91 ASSERT(is_valid()); |
| 108 return 1 << code_; | 92 return 1 << code_; |
| 109 } | 93 } |
| 110 | 94 |
| 111 // Unfortunately we can't make this private in a struct. | 95 // Unfortunately we can't make this private in a struct. |
| 112 int code_; | 96 int code_; |
| 113 }; | 97 }; |
| 114 | 98 |
| 99 |
| 115 const Register eax = { 0 }; | 100 const Register eax = { 0 }; |
| 116 const Register ecx = { 1 }; | 101 const Register ecx = { 1 }; |
| 117 const Register edx = { 2 }; | 102 const Register edx = { 2 }; |
| 118 const Register ebx = { 3 }; | 103 const Register ebx = { 3 }; |
| 119 const Register esp = { 4 }; | 104 const Register esp = { 4 }; |
| 120 const Register ebp = { 5 }; | 105 const Register ebp = { 5 }; |
| 121 const Register esi = { 6 }; | 106 const Register esi = { 6 }; |
| 122 const Register edi = { 7 }; | 107 const Register edi = { 7 }; |
| 123 const Register no_reg = { -1 }; | 108 const Register no_reg = { -1 }; |
| 124 | 109 |
| 125 | 110 |
| 111 inline const char* Register::AllocationIndexToString(int index) { |
| 112 ASSERT(index >= 0 && index < kNumAllocatableRegisters); |
| 113 // This is the mapping of allocation indices to registers. |
| 114 const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" }; |
| 115 return kNames[index]; |
| 116 } |
| 117 |
| 118 |
| 119 inline int Register::ToAllocationIndex(Register reg) { |
| 120 ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp)); |
| 121 return (reg.code() >= 6) ? reg.code() - 2 : reg.code(); |
| 122 } |
| 123 |
| 124 |
| 125 inline Register Register::FromAllocationIndex(int index) { |
| 126 ASSERT(index >= 0 && index < kNumAllocatableRegisters); |
| 127 return (index >= 4) ? from_code(index + 2) : from_code(index); |
| 128 } |
| 129 |
| 130 |
| 126 struct XMMRegister { | 131 struct XMMRegister { |
| 127 static const int kNumAllocatableRegisters = 7; | 132 static const int kNumAllocatableRegisters = 7; |
| 128 static const int kNumRegisters = 8; | 133 static const int kNumRegisters = 8; |
| 129 | 134 |
| 130 static int ToAllocationIndex(XMMRegister reg) { | 135 static int ToAllocationIndex(XMMRegister reg) { |
| 131 ASSERT(reg.code() != 0); | 136 ASSERT(reg.code() != 0); |
| 132 return reg.code() - 1; | 137 return reg.code() - 1; |
| 133 } | 138 } |
| 134 | 139 |
| 135 static XMMRegister FromAllocationIndex(int index) { | 140 static XMMRegister FromAllocationIndex(int index) { |
| (...skipping 778 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 914 // Use either movsd or movlpd. | 919 // Use either movsd or movlpd. |
| 915 void movdbl(XMMRegister dst, const Operand& src); | 920 void movdbl(XMMRegister dst, const Operand& src); |
| 916 void movdbl(const Operand& dst, XMMRegister src); | 921 void movdbl(const Operand& dst, XMMRegister src); |
| 917 | 922 |
| 918 void movd(XMMRegister dst, const Operand& src); | 923 void movd(XMMRegister dst, const Operand& src); |
| 919 void movd(const Operand& src, XMMRegister dst); | 924 void movd(const Operand& src, XMMRegister dst); |
| 920 void movsd(XMMRegister dst, XMMRegister src); | 925 void movsd(XMMRegister dst, XMMRegister src); |
| 921 | 926 |
| 922 void pand(XMMRegister dst, XMMRegister src); | 927 void pand(XMMRegister dst, XMMRegister src); |
| 923 void pxor(XMMRegister dst, XMMRegister src); | 928 void pxor(XMMRegister dst, XMMRegister src); |
| 929 void por(XMMRegister dst, XMMRegister src); |
| 924 void ptest(XMMRegister dst, XMMRegister src); | 930 void ptest(XMMRegister dst, XMMRegister src); |
| 925 | 931 |
| 926 void psllq(XMMRegister reg, int8_t shift); | 932 void psllq(XMMRegister reg, int8_t shift); |
| 933 void psllq(XMMRegister dst, XMMRegister src); |
| 934 void psrlq(XMMRegister reg, int8_t shift); |
| 935 void psrlq(XMMRegister dst, XMMRegister src); |
| 927 void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle); | 936 void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle); |
| 928 void pextrd(const Operand& dst, XMMRegister src, int8_t offset); | 937 void pextrd(const Operand& dst, XMMRegister src, int8_t offset); |
| 938 void pinsrd(XMMRegister dst, const Operand& src, int8_t offset); |
| 929 | 939 |
| 930 // Parallel XMM operations. | 940 // Parallel XMM operations. |
| 931 void movntdqa(XMMRegister src, const Operand& dst); | 941 void movntdqa(XMMRegister src, const Operand& dst); |
| 932 void movntdq(const Operand& dst, XMMRegister src); | 942 void movntdq(const Operand& dst, XMMRegister src); |
| 933 // Prefetch src position into cache level. | 943 // Prefetch src position into cache level. |
| 934 // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a | 944 // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a |
| 935 // non-temporal | 945 // non-temporal |
| 936 void prefetch(const Operand& src, int level); | 946 void prefetch(const Operand& src, int level); |
| 937 // TODO(lrn): Need SFENCE for movnt? | 947 // TODO(lrn): Need SFENCE for movnt? |
| 938 | 948 |
| 939 // Debugging | 949 // Debugging |
| 940 void Print(); | 950 void Print(); |
| 941 | 951 |
| 942 // Check the code size generated from label to here. | 952 // Check the code size generated from label to here. |
| 943 int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); } | 953 int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); } |
| 944 | 954 |
| 945 // Mark address of the ExitJSFrame code. | 955 // Mark address of the ExitJSFrame code. |
| 946 void RecordJSReturn(); | 956 void RecordJSReturn(); |
| 947 | 957 |
| 948 // Mark address of a debug break slot. | 958 // Mark address of a debug break slot. |
| 949 void RecordDebugBreakSlot(); | 959 void RecordDebugBreakSlot(); |
| 950 | 960 |
| 951 // Record a comment relocation entry that can be used by a disassembler. | 961 // Record a comment relocation entry that can be used by a disassembler. |
| 952 // Use --code-comments to enable. | 962 // Use --code-comments to enable, or provide "force = true" flag to always |
| 953 void RecordComment(const char* msg); | 963 // write a comment. |
| 964 void RecordComment(const char* msg, bool force = false); |
| 954 | 965 |
| 955 // Writes a single byte or word of data in the code stream. Used for | 966 // Writes a single byte or word of data in the code stream. Used for |
| 956 // inline tables, e.g., jump-tables. | 967 // inline tables, e.g., jump-tables. |
| 957 void db(uint8_t data); | 968 void db(uint8_t data); |
| 958 void dd(uint32_t data); | 969 void dd(uint32_t data); |
| 959 | 970 |
| 960 int pc_offset() const { return pc_ - buffer_; } | 971 int pc_offset() const { return pc_ - buffer_; } |
| 961 | 972 |
| 962 // Check if there is less than kGap bytes available in the buffer. | 973 // Check if there is less than kGap bytes available in the buffer. |
| 963 // If this is the case, we need to grow the buffer before emitting | 974 // If this is the case, we need to grow the buffer before emitting |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1078 private: | 1089 private: |
| 1079 Assembler* assembler_; | 1090 Assembler* assembler_; |
| 1080 #ifdef DEBUG | 1091 #ifdef DEBUG |
| 1081 int space_before_; | 1092 int space_before_; |
| 1082 #endif | 1093 #endif |
| 1083 }; | 1094 }; |
| 1084 | 1095 |
| 1085 } } // namespace v8::internal | 1096 } } // namespace v8::internal |
| 1086 | 1097 |
| 1087 #endif // V8_IA32_ASSEMBLER_IA32_H_ | 1098 #endif // V8_IA32_ASSEMBLER_IA32_H_ |
| OLD | NEW |