Index: src/arm/assembler-thumb32.cc |
diff --git a/src/arm/assembler-thumb32.cc b/src/arm/assembler-thumb32.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..06e748a39d399b5f0061324928cbb417c3a4732d |
--- /dev/null |
+++ b/src/arm/assembler-thumb32.cc |
@@ -0,0 +1,386 @@ |
+// Copyright 2013 the V8 project authors. All rights reserved. |
+// Redistribution and use in source and binary forms, with or without |
+// modification, are permitted provided that the following conditions are |
+// met: |
+// |
+// * Redistributions of source code must retain the above copyright |
+// notice, this list of conditions and the following disclaimer. |
+// * Redistributions in binary form must reproduce the above |
+// copyright notice, this list of conditions and the following |
+// disclaimer in the documentation and/or other materials provided |
+// with the distribution. |
+// * Neither the name of Google Inc. nor the names of its |
+// contributors may be used to endorse or promote products derived |
+// from this software without specific prior written permission. |
+// |
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
+ |
+#include "v8.h" |
+ |
+#if defined(V8_TARGET_ARCH_ARM) |
+ |
+#include "arm/assembler-arm-inl.h" |
+#include "serialize.h" |
+ |
+namespace v8 { |
+namespace internal { |
+ |
+Instr Assembler::thumb32_mode1(ThumbOpcode32Mode1 op, SBit s) { |
+ return (BH15 | BH14 | BH13 | BH12 | op*BH5 | s); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode3(ThumbOpcode32Mode3 op) { |
+ return (BH15 | BH14 | BH13 | BH12 | BH9 | op*BH4); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode4(ThumbOpcode32Mode4 op) { |
+ return (BH15 | BH14 | BH13 | BH12 | B15 | op*B12); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode5() { |
+ return (BH15 | BH14 | BH13 | BH11); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode6(ThumbOpcode32Mode6 op1, |
+ ThumbOpcode32Mode6 op2) { |
+ return (BH15 | BH14 | BH13 | BH11 | op1*BH7 | BH6 | op2*BH4); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode7(ThumbOpcode32Mode7 op) { |
+ return (BH15 | BH14 | BH13 | BH12 | BH11 | op*BH7 | BH6 | BH4); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode8(ThumbOpcode32Mode8 op) { |
+ return (BH15 | BH14 | BH13 | BH12 | BH11 | op*BH7 | BH5 | BH4); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode9(ThumbOpcode32Mode9 op) { |
+ return (BH15 | BH14 | BH13 | BH12 | BH11 | op*BH7 | BH4); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode10(ThumbOpcode32Mode10 op) { |
+ return (BH15 | BH14 | BH13 | BH12 | BH11 | op*BH5); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode11(ThumbOpcode32Mode11 op, SBit s) { |
+ return (BH15 | BH14 | BH13 | BH11 | BH9 | op*BH5 | s); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode12(ThumbOpcode32Mode12 op1) { |
+ return (BH15 | BH14 | BH13 | BH12 | BH11 | BH9 | op1*BH4); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode16(ThumbOpcode32Mode16 op2) { |
+ return (BH15 | BH14 | BH13 | BH12 | BH11 | BH9 | BH8 | op2*B4); |
+} |
+ |
+ |
+Instr Assembler::thumb32_mode17(ThumbOpcode32Mode17 op1) { |
+ return (BH15 | BH14 | BH13 | BH12 | BH11 | BH9 | BH8 | BH7 | op1*BH4); |
+} |
+ |
+ |
+// used by 32-bit mode 1 instructions |
+bool Assembler::thumb_expand_imm(uint32_t imm32, |
+ uint32_t* i, |
+ uint32_t* imm3, |
+ uint32_t* imm8) { |
+ // 00000000 00000000 00000000 abcdefgh |
+ if ((imm32 & 0xFFFFFF00) == 0) { |
+ *imm8 = imm32; // abcdefgh not 1bcdefgh |
+ *i = 0; // i_imm3 == '0000x' |
+ *imm3 = 0; |
+ return true; |
+ } |
+ // 00000000 abcdefgh 00000000 abcdefgh |
+ if (((0xff00ff00 & imm32) == 0) && |
+ (((0xff0000 & imm32) >> 16) == (0xff & imm32))) { |
+ *i = 0; |
+ *imm3 = 1; // i:imm3:a == '0001x" |
+ *imm8 = imm32 & 0xff; |
+ return true; |
+ } |
+ // abcdefgh 00000000 abcdefgh 00000000 |
+ if (((0x00ff00ff & imm32) == 0) && |
+ (((0xff000000 & imm32) >> 16) == (0xff00 & imm32))) { |
+ *i = 0; |
+ *imm3 = 2; // i:imm3:a == '0010x" |
+ *imm8 = (imm32 & 0xff00) >> 8; |
+ return true; |
+ } |
+ // abcdefgh abcdefgh abcdefgh abcdefgh |
+ if (((0xffff0000 & imm32) >> 16 == (0xffff & imm32)) && |
+ (((0xff00 & imm32) >> 8) == (0xff & imm32))) { |
+ *i = 0; |
+ *imm3 = 3; // i:imm3:a == '0010x" |
+ *imm8 = imm32 & 0xff; |
+ return true; |
+ } |
+ |
+ // <0's> (a=1)bcdefgh <0's> |
+ // look for the lowest bit set first, to fail faster (the most common case) |
+ if ((imm32 & 0xFFF80000) && (imm32 & 0xFFF)) |
+ return 0; // short circuit - ON bits too far apart to fit |
+ |
+ int lowestbyteOn = 0; |
+ for (lowestbyteOn = 0; lowestbyteOn < 4; lowestbyteOn++) |
+ if (imm32 & (0xff << lowestbyteOn*8)) |
+ break; // bytenum is the lowest byte with any bit on |
+ |
+ // because case '0000x' is above, value is not 0, |
+ // so bytenum must be less than 4 |
+ int bitnum = 0; // find lowest bit ON |
+ for (bitnum = (lowestbyteOn)*8; bitnum < (lowestbyteOn+1)*8; bitnum++) |
+ if (imm32 & (1 << bitnum)) |
+ break; // this is the bottom bit on |
+ |
+ // bitnum must < 32 |
+ if ((bitnum < (lowestbyteOn+1)*8) && |
+ ((imm32 & ~(0xff << bitnum)) == 0)) { // then fits this pattern |
+ // now need top bit ON; which becomes 'a' in 1bcdefgh pattern |
+ int top_bit_on = (bitnum + 7 < 32) ? bitnum + 7 : 31; |
+ while ((imm32 & (1 << top_bit_on)) == 0) |
+ top_bit_on--; |
+ |
+ // i:imm3:a goes from 01001 to 11111, so 39 - i:imm3:a goes from 30 to 8 |
+ // 39 - i:imm3:a = top_bit_on; |
+ int i_imm3_a = 39 - top_bit_on; |
+ *i = (i_imm3_a >> 4) & 0x1; |
+ *imm3 = (i_imm3_a >> 1) & 0x7; |
+ |
+ *imm8 = imm32 >> (top_bit_on - 7); |
+ if ((i_imm3_a & 0x1) == 0) |
+ *imm8 = *imm8 & 0x7f; // 1bcdefgh |
+ |
+ return true; |
+ } |
+ |
+ *i = *imm3 = *imm8 = 0; |
+ return false; |
+} |
+ |
+ |
+Instr Assembler::thumb32_sign_extend_imm24(int imm) { |
+ uint32_t imm11 = imm & 0x7ff; |
+ uint32_t imm10 = (imm >> 11) & 0x3ff; |
+ uint32_t i2 = (imm >> 21) & 1; |
+ uint32_t i1 = (imm >> 22) & 1; |
+ uint32_t s = (imm >> 23) & 1; |
+ uint32_t j1 = (~(i1 ^ s)) & 1; |
+ uint32_t j2 = (~(i2 ^ s)) & 1; |
+ return (s*BH10 | imm10*BH0 | j1*B13 | j2*B11 | imm11); |
+} |
+ |
+ |
+Instr Assembler::thumb32_2reg_zero_extend_imm12(Register rd, |
+ const MemOperand& x) { |
+ ASSERT(!x.rm_.is_valid()); // is Immediate. |
+ uint32_t offset = x.offset_; |
+ uint32_t sign = U; |
+ if (x.rn_.code() == 15) { |
+ if (x.offset_ < 0) { |
+ sign = 0; |
+ offset = -x.offset_; |
+ } |
+ } |
+ ASSERT(is_uint12(offset)); |
+ return (sign | x.rn_.code()*BH0 | rd.code()*B12 | offset); |
+} |
+ |
+ |
+Instr Assembler::thumb32_2reg_zero_extend_imm8(Register rd, |
+ const MemOperand& x) { |
+ ASSERT(!x.rm_.is_valid()); // is Immediate. |
+ int am = x.am_; |
+ int offset_8 = x.offset_; |
+ if (offset_8 < 0) { |
+ offset_8 = -offset_8; |
+ am ^= U; |
+ } |
+ ASSERT(is_uint8(offset_8)); |
+ int thumbP = (am & P) > 0 ? B10 : 0; |
+ int thumbU = (am & U) > 0 ? B9 : 0; |
+ int thumbW = (am & W) > 0 ? B8 : 0; |
+ if (thumbP == 0) { |
+ thumbW = B8; |
+ } |
+ return (x.rn_.code()*BH0 | rd.code()*B12 | B11 | thumbP | thumbU | |
+ thumbW | offset_8); |
+} |
+ |
+ |
+// Mode 6 |
+Instr Assembler::thumb32_3reg_zero_extend_imm8(Register rt, |
+ Register rt2, |
+ const MemOperand& x) { |
+ int am = x.am_; |
+ int offset_8 = x.offset_; |
+ if (offset_8 < 0) { |
+ offset_8 = -offset_8; |
+ am ^= U; |
+ } |
+ // should we just use 'am' instead of thumb[P|U|W]? |
+ int thumbP = (am & P) > 0 ? BH8 : 0; |
+ int thumbU = (am & U) > 0 ? BH7 : 0; |
+ int thumbW = (am & W) > 0 ? BH5 : 0; |
+ // PU W Rn Rt Rt2 imm8 |
+ return (thumbP | thumbU | thumbW | x.rn_.code()*BH0 | |
+ rt.code()*B12 | rt2.code()*B8 | offset_8); |
+} |
+ |
+ |
+Instr Assembler::thumb32_2reg_zero_extend_imm_split(Register rn, |
+ Register rd, |
+ const Operand& x) { |
+ ASSERT(!x.rm_.is_valid()); // is Immediate. |
+ ASSERT(is_uint12(x.imm32_)); |
+ uint32_t i = (x.imm32_ >> 11) & 1; |
+ uint32_t imm3 = (x.imm32_ >> 8) & 7; |
+ uint32_t imm8 = x.imm32_ & 0xff; |
+ return (i*BH10 | rn.code()*BH0 | imm3*B12 | rd.code()*B8 | imm8); |
+} |
+ |
+ |
+// MOV imm T3 and MOVT T1 set imm4, where others in mode 3 set Rn |
+Instr Assembler::thumb32_1reg_zero_extend_imm_split_4i38(Register rd, |
+ uint32_t imm) { |
+ ASSERT(is_uint16(imm)); |
+ uint32_t imm4 = (imm >> 12) & 0xf; |
+ uint32_t i = (imm >> 11) & 1; |
+ uint32_t imm3 = (imm >> 8) & 7; |
+ uint32_t imm8 = imm & 0xff; |
+ return (i*BH10 | imm4*BH0 | imm3*B12 | rd.code()*B8 | imm8); |
+} |
+ |
+ |
+// common in mode 1; some instruction use 1 reg, set other to pc |
+// ex: MOV, MVN no Rm, TST,TEQ,CMN,CMP, no Rd |
+// otherwise~: i S Rn imm3 Rd imm8 |
+Instr Assembler::thumb32_2reg_thumb_expand_imm(Register rd, |
+ Register rn, |
+ uint32_t i, |
+ uint32_t imm3, |
+ uint32_t imm8) { |
+ return (i*BH10 | rn.code()*BH0 | imm3*B12 | rd.code()*B8 | imm8); |
+} |
+ |
+ |
+// common in mode 11; some instruction use 2 regs, set other to pc |
+// ex: MOV, MVN no Rn, TST no Rm, TEQ,CMN,CMP, no Rd |
+Instr Assembler::thumb32_3reg_shift_imm8(Register rn, |
+ Register rd, |
+ const Operand& x) { |
+ ASSERT(x.rm_.is_valid() && !x.rs_.is_valid()); // is Register & not shift |
+ ASSERT(is_uint5(x.shift_imm_)); |
+ uint8_t imm3 = x.shift_imm_ >> 2; |
+ uint8_t imm2 = x.shift_imm_ & 3; |
+ return (rn.code()*BH0 | imm3*B12 | rd.code()*B8 | imm2*B6 | |
+ (x.shift_op_>> 1) | x.rm_.code()); |
+} |
+ |
+ |
+// also used for usat |
+Instr Assembler::thumb32_3reg_shift(Register rd, |
+ const Operand& x) { |
+ ASSERT(x.rm_.is_valid()); // is Register |
+ ASSERT(x.rs_.is_valid()); // is shift |
+ return (x.rm_.code()*BH0 | B15 | B14 | B13 | B12 | |
+ rd.code()*B8 | x.rs_.code()); |
+} |
+ |
+ |
+// also used for usat |
+Instr Assembler::thumb32_bit_field(Register rn, |
+ Register rd, |
+ int split_imm, |
+ int lower_imm) { |
+ int imm3 = split_imm >> 2; |
+ int imm2 = split_imm & 3; |
+ return (rn.code()*BH0 | imm3*B12 | rd.code()*B8 | imm2*B6 | lower_imm); |
+} |
+ |
+ |
+Instr Assembler::thumb32_3reg_lsl(Register rd, |
+ const MemOperand& x) { |
+ ASSERT(x.rn_.is_valid() && x.rm_.is_valid()); // is Register, both valid |
+ uint8_t imm2 = 0; |
+ if (x.shift_op_ == LSL && is_uint2(x.shift_imm_)) { |
+ imm2 = x.shift_imm_ & 3; |
+ return (x.rn_.code()*BH0 | rd.code()*B12 | imm2*B4 | x.rm_.code()); |
+ } |
+ switch (x.shift_op_) { |
+ case LSL: // TODO(rkrithiv): call method to encode lsl instruction |
+ case LSR: // TODO(rkrithiv): call method to encode lsr instruction |
+ case ASR: // TODO(rkrithiv): call method to encode asr instruction |
+ default: return (x.rn_.code()*BH0 | rd.code()*B12 | x.rm_.code()); |
+ } |
+ return (x.rn_.code()*BH0 | rd.code()*B12 | ip.code()); |
+} |
+ |
+ |
+Instr Assembler::thumb32_4reg(Register dst, Register src1, Register src2, |
+ Register srcA) { |
+ return (src1.code()*BH0 | srcA.code()*B12 | dst.code()*B8 | src2.code()); |
+} |
+ |
+ |
+uint16_t Assembler::thumb32_movw_immediate(Instr instr) { |
+ uint16_t i = (instr >> 26) & 1; |
+ uint16_t imm4 = (instr >> 16) & 15; |
+ uint16_t imm3 = (instr >> 12) & 7; |
+ uint16_t imm8 = instr & 0xff; |
+ return ((imm4 << 12) | (i << 11) | (imm3 << 8) | imm8); |
+} |
+ |
+ |
+Instr Assembler::thumb32_set_movw_immediate(uint32_t imm) { |
+ ASSERT(is_uint16(imm)); |
+ uint32_t imm4 = (imm >> 12) & 0xf; |
+ uint32_t i = (imm >> 11) & 1; |
+ uint32_t imm3 = (imm >> 8) & 7; |
+ uint32_t imm8 = imm & 0xff; |
+ return (i*BH10 | imm4*BH0 | imm3*B12 | imm8); |
+} |
+ |
+ |
+Instr Assembler::thumb32_instr_at(Address addr) { |
+ return (Memory::int16_at(addr) << 16) | (Memory::int16_at(addr + 2) & 0xffff); |
+} |
+ |
+ |
+void Assembler::thumb32_instr_at_put(int pos, Instr instr) { |
+ *reinterpret_cast<Instr16*>(buffer_ + pos) = instr >> 16; |
+ *reinterpret_cast<Instr16*>(buffer_ + pos + kInstr16Size) = instr & 0xFFFF; |
+} |
+ |
+ |
+void Assembler::thumb32_instr_at_put(byte* pc, Instr instr) { |
+ *reinterpret_cast<Instr16*>(pc) = instr >> 16; |
+ *reinterpret_cast<Instr16*>(pc + kInstr16Size) = instr & 0xFFFF; |
+} |
+ |
+} } // namespace v8::internal |
+ |
+#endif // V8_TARGET_ARCH_ARM |
+ |