OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
rmcilroy_google
2013/09/17 17:28:16
2013
rkrithiv
2013/09/27 21:40:49
Done.
| |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "v8.h" | |
29 | |
30 #if defined(V8_TARGET_ARCH_ARM) | |
31 | |
32 #include "arm/assembler-arm-inl.h" | |
33 #include "serialize.h" | |
34 | |
35 namespace v8 { | |
36 namespace internal { | |
rmcilroy_google
2013/09/17 17:28:16
I'm not sure whether this should be a separate fil
rkrithiv
2013/09/27 21:40:49
I agree that having separate AssemblerARM and Asse
rmcilroy
2013/10/01 10:26:04
My thoughts were along the same lines as yours. S
| |
37 | |
38 // A.6.2.1 -> mode1 | |
39 Instr16 Assembler::thumb16_mode1(ThumbMode1Opcode16 op) { | |
40 return (op*B9); | |
41 } | |
42 | |
43 | |
44 // A.6.2.2 -> mode2 | |
45 Instr16 Assembler::thumb16_mode2(ThumbMode2Opcode16 op) { | |
46 return (B14 | op*B6); | |
47 } | |
48 | |
49 | |
50 // A.6.2.3 -> mode3 | |
51 Instr16 Assembler::thumb16_mode3(ThumbMode3Opcode16 op) { | |
52 return (B14 | B10 | op*B6); | |
53 } | |
54 | |
55 | |
56 // A.6.2.4 -> mode4, opA 0101 -> _1 | |
57 Instr16 Assembler::thumb16_mode4_1(ThumbMode4_1Opcode16 opB) { | |
58 return (B14 | B12 | opB*B9); | |
59 } | |
60 | |
61 | |
62 // A.6.2.4 -> mode4, opA 0110 -> _2 | |
63 Instr16 Assembler::thumb16_mode4_2(ThumbMode4_2Opcode16 opB) { | |
64 return (B14 | B13 | opB*B9); | |
65 } | |
66 | |
67 | |
68 // A.6.2.4 -> mode4, opA 0111 -> _3 | |
69 Instr16 Assembler::thumb16_mode4_3(ThumbMode4_3Opcode16 opB) { | |
70 return (B14 | B13 | B12 | opB*B9); | |
71 } | |
72 | |
73 | |
74 // A.6.2.4 -> mode4, opA 1000 -> _4 | |
75 Instr16 Assembler::thumb16_mode4_4(ThumbMode4_4Opcode16 opB) { | |
76 return (B15 | opB*B9); | |
77 } | |
78 | |
79 | |
80 // A.6.2.4 -> mode4, opA 1001 -> _5 | |
81 Instr16 Assembler::thumb16_mode4_5(ThumbMode4_5Opcode16 opB) { | |
82 return (B15 | B12 | opB*B9); | |
83 } | |
84 | |
85 | |
86 // two low reg r0-r7 | |
87 Instr16 Assembler::thumb16_2lowreg_encoding(Register rd, | |
88 const Operand& x) { | |
89 return (x.rm_.code()*B3 | rd.code()); | |
90 } | |
91 | |
92 | |
93 // two low reg r0-r7 | |
94 Instr16 Assembler::thumb16_2lowreg_encoding(Register rd, | |
95 Register rs) { | |
96 return (rs.code()*B3 | rd.code()); | |
97 } | |
98 | |
99 | |
100 // imm3 with two low reg | |
101 Instr16 Assembler::thumb16_2lowreg_imm3_encoding(Register rd, | |
102 Register rn, | |
103 const Operand& x) { | |
104 ASSERT(!x.rm_.is_valid()); // is Immediate. | |
105 ASSERT(is_uint3(x.imm32_)); | |
106 uint16_t imm3 = x.imm32_ & 7; | |
107 return (imm3*B6 | rn.code()*B3 | rd.code()); | |
108 } | |
109 | |
110 | |
111 // imm5 with two low reg | |
112 Instr16 Assembler::thumb16_2lowreg_imm5_encoding(Register rd, | |
113 Register rn, | |
114 uint32_t offset) { | |
115 ASSERT(is_uint5(offset)); | |
116 uint16_t imm5 = offset & 0x1f; | |
117 return (imm5*B6 | rn.code()*B3 | rd.code()); | |
118 } | |
119 | |
120 | |
121 Instr16 Assembler::thumb16_2lowreg_imm5_encoding(Register rd, | |
122 Register rn, | |
123 const Operand& x) { | |
124 ASSERT(!x.rs_.is_valid()); | |
125 ASSERT(is_uint5(x.shift_imm_)); | |
126 uint16_t imm5 = x.shift_imm_ & 0x1f; | |
127 return (imm5*B6 | rn.code()*B3 | rd.code()); | |
128 } | |
129 | |
130 | |
131 // three low reg | |
132 Instr16 Assembler::thumb16_3lowreg_encoding(Register rd, | |
133 const MemOperand& x) { | |
134 ASSERT(x.rm_.is_valid()); // is Register. | |
135 return (x.rm_.code()*B6 | x.rn_.code()*B3 | rd.code()); | |
136 } | |
137 | |
138 | |
139 Instr16 Assembler::thumb16_3lowreg_encoding(Register rd, | |
140 Register rn, | |
141 const Operand& x) { | |
142 ASSERT(x.rm_.is_valid()); // is Register. | |
143 return (x.rm_.code()*B6 | rn.code()*B3 | rd.code()); | |
144 } | |
145 | |
146 | |
147 // one any reg | |
148 Instr16 Assembler::thumb16_anyreg_encoding(const Operand& x) { | |
149 return (x.rm_.code()*B3); | |
150 } | |
151 | |
152 | |
153 // one any reg | |
154 Instr16 Assembler::thumb16_anyreg_encoding(const Register rm) { | |
155 return (rm.code()*B3); | |
156 } | |
157 | |
158 | |
159 // two any reg | |
160 Instr16 Assembler::thumb16_2anyreg_encoding(Register rd, const Operand& x) { | |
161 uint16_t d = rd.code() >> 3; | |
162 return (d*B7 | x.rm_.code()*B3 | (rd.code() & 7)); | |
163 } | |
164 | |
165 | |
166 // low reg with imm8 | |
167 Instr16 Assembler::thumb16_lowreg_imm8_encoding(Register rd, uint32_t offset) { | |
168 ASSERT(is_uint8(offset)); | |
169 uint16_t imm8 = offset & 0xff; | |
170 return (rd.code()*B8 | imm8); | |
171 } | |
172 | |
173 | |
174 Instr16 Assembler::thumb16_lowreg_imm8_encoding(Register rd, const Operand& x) { | |
175 ASSERT(!x.rm_.is_valid()); // Immediate. | |
176 ASSERT(is_uint8(x.imm32_)); | |
177 uint16_t imm8 = x.imm32_ & 0xff; | |
178 return (rd.code()*B8 | imm8); | |
179 } | |
180 | |
181 | |
182 bool Assembler::is_low_reg(Register reg) { | |
183 return is_uint3(reg.code()); | |
184 } | |
185 | |
186 | |
187 bool Assembler::are_low_reg(Register reg1, Register reg2) { | |
188 return is_uint3(reg1.code()) && is_uint3(reg2.code()); | |
189 } | |
190 | |
191 | |
192 Instr16 Assembler::thumb16_instr_at(Address addr) { | |
193 return Memory::int16_at(addr); | |
194 } | |
195 | |
196 | |
197 void Assembler::thumb16_instr_at_put(int pos, Instr16 instr) { | |
198 *reinterpret_cast<Instr16*>(buffer_ + pos) = instr; | |
199 } | |
200 | |
201 } } // namespace v8::internal | |
202 | |
203 #endif // V8_TARGET_ARCH_ARM | |
204 | |
OLD | NEW |