Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(342)

Unified Diff: src/IceInstARM32.def

Issue 1382063002: Subzero. Adds I64 register pairs for ARM32. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: merge Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | src/IceRegistersARM32.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/IceInstARM32.def
diff --git a/src/IceInstARM32.def b/src/IceInstARM32.def
index 3fb6898b272776388bf37a893e26b3a7f0ca62c9..b8a9778f578b5072fd354bc92366b5c0787d703a 100644
--- a/src/IceInstARM32.def
+++ b/src/IceInstARM32.def
@@ -16,9 +16,9 @@
// NOTE: PC and SP are not considered isInt, to avoid register allocating.
//
-// For the NaCl sandbox we also need to r9 for TLS, so just reserve always.
-// TODO(jvoung): Allow r9 to be isInt when sandboxing is turned off (native
-// mode).
+// For the NaCl sandbox we also need to r9 (and the r8-r9 pair) for TLS, so
+// just reserve always.
+// TODO(jpp): Allow r9 to be isInt when sandboxing is turned off (native mode).
//
// IP is not considered isInt to reserve it as a scratch register. A scratch
// register is useful for expanding instructions post-register allocation.
@@ -46,25 +46,64 @@
#define REGARM32_GPR_TABLE \
/* val, encode, name, scratch, preserved, stackptr, frameptr, \
- isInt, isFP32, isFP64, isVec128, aliases_init */ \
- X(Reg_r0, 0, "r0", 1, 0, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r0)) \
- X(Reg_r1, 1, "r1", 1, 0, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r1)) \
- X(Reg_r2, 2, "r2", 1, 0, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r2)) \
- X(Reg_r3, 3, "r3", 1, 0, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r3)) \
- X(Reg_r4, 4, "r4", 0, 1, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r4)) \
- X(Reg_r5, 5, "r5", 0, 1, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r5)) \
- X(Reg_r6, 6, "r6", 0, 1, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r6)) \
- X(Reg_r7, 7, "r7", 0, 1, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r7)) \
- X(Reg_r8, 8, "r8", 0, 1, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r8)) \
- X(Reg_r9, 9, "r9", 0, 1, 0, 0, 0, 0, 0, 0, ALIASES1(Reg_r9)) \
- X(Reg_r10, 10, "r10", 0, 1, 0, 0, 1, 0, 0, 0, ALIASES1(Reg_r10)) \
- X(Reg_fp, 11, "fp", 0, 1, 0, 1, 1, 0, 0, 0, ALIASES1(Reg_fp)) \
- X(Reg_ip, 12, "ip", 1, 0, 0, 0, 0, 0, 0, 0, ALIASES1(Reg_ip)) \
- X(Reg_sp, 13, "sp", 0, 0, 1, 0, 0, 0, 0, 0, ALIASES1(Reg_sp)) \
- X(Reg_lr, 14, "lr", 0, 0, 0, 0, 0, 0, 0, 0, ALIASES1(Reg_lr)) \
- X(Reg_pc, 15, "pc", 0, 0, 0, 0, 0, 0, 0, 0, ALIASES1(Reg_pc))
+ isInt, isI64Pair, isFP32, isFP64, isVec128, aliases_init */ \
+ X(Reg_r0, 0, "r0", 1, 0, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r0, Reg_r0r1)) \
+ X(Reg_r1, 1, "r1", 1, 0, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r1, Reg_r0r1)) \
+ X(Reg_r2, 2, "r2", 1, 0, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r2, Reg_r2r3)) \
+ X(Reg_r3, 3, "r3", 1, 0, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r3, Reg_r2r3)) \
+ X(Reg_r4, 4, "r4", 0, 1, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r4, Reg_r4r5)) \
+ X(Reg_r5, 5, "r5", 0, 1, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r5, Reg_r4r5)) \
+ X(Reg_r6, 6, "r6", 0, 1, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r6, Reg_r6r7)) \
+ X(Reg_r7, 7, "r7", 0, 1, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r7, Reg_r6r7)) \
+ X(Reg_r8, 8, "r8", 0, 1, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r8, Reg_r8r9)) \
+ X(Reg_r9, 9, "r9", 0, 1, 0, 0, 0, 0, 0, 0, 0, \
+ ALIASES2(Reg_r9, Reg_r8r9)) \
+ X(Reg_r10, 10, "r10", 0, 1, 0, 0, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_r10, Reg_r10fp)) \
+ X(Reg_fp, 11, "fp", 0, 1, 0, 1, 1, 0, 0, 0, 0, \
+ ALIASES2(Reg_fp, Reg_r10fp)) \
+ X(Reg_ip, 12, "ip", 1, 0, 0, 0, 0, 0, 0, 0, 0, \
+ ALIASES1(Reg_ip)) \
+ X(Reg_sp, 13, "sp", 0, 0, 1, 0, 0, 0, 0, 0, 0, \
+ ALIASES1(Reg_sp)) \
+ X(Reg_lr, 14, "lr", 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ ALIASES1(Reg_lr)) \
+ X(Reg_pc, 15, "pc", 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ ALIASES1(Reg_pc))
//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
-// isInt, isFP32, isFP64, isVec128, aliases_init)
+// isInt, isI64Pair, isFP32, isFP64, isVec128, aliases_init)
+
+// The following defines a table with the available pairs of consecutive i32
+// GPRs starting at an even GPR that is not r14. Those are used to hold i64
+// variables for atomic memory operations. If one of the registers in the pair
+// is preserved, then we mark the whole pair as preserved to help the register
+// allocator.
+#define REGARM32_I64PAIR_TABLE \
+ /* val, encode, name, scratch, preserved, stackptr, frameptr, \
+ isInt, isI64Pair, isFP32, isFP64, isVec128, aliases_init */ \
+ X(Reg_r0r1, 0, "r0, r1", 1, 0, 0, 0, 0, 1, 0, 0, 0, \
+ ALIASES3(Reg_r0, Reg_r1, Reg_r0r1)) \
+ X(Reg_r2r3, 2, "r2, r3", 1, 0, 0, 0, 0, 1, 0, 0, 0, \
+ ALIASES3(Reg_r2, Reg_r3, Reg_r2r3)) \
+ X(Reg_r4r5, 4, "r4, r5", 0, 1, 0, 0, 0, 1, 0, 0, 0, \
+ ALIASES3(Reg_r4, Reg_r5, Reg_r4r5)) \
+ X(Reg_r6r7, 6, "r6, r7", 0, 1, 0, 0, 0, 1, 0, 0, 0, \
+ ALIASES3(Reg_r6, Reg_r7, Reg_r6r7)) \
+ X(Reg_r8r9, 8, "r8, r9", 0, 1, 0, 0, 0, 0, 0, 0, 0, \
+ ALIASES3(Reg_r8, Reg_r9, Reg_r8r9)) \
+ X(Reg_r10fp, 10, "r10, fp", 0, 1, 0, 0, 0, 0, 0, 0, 0, \
+ ALIASES3(Reg_r10, Reg_fp, Reg_r10fp)) \
+//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
+// isInt, isI64Pair, isFP32, isFP64, isVec128, aliases_init)
// S registers 0-15 are scratch, but 16-31 are preserved.
// Regenerate this with the following python script:
@@ -74,7 +113,7 @@
// is_scratch = 1 if i < 16 else 0
// is_preserved = 1 if i >= 16 else 0
// print (' X(Reg_s{regnum:<2}, {regnum:<2}, "s{regnum}", ' +
-// '{scratch}, {preserved}, 0, 0, 0, 1, 0, 0, ' +
+// '{scratch}, {preserved}, 0, 0, 0, 0, 1, 0, 0, ' +
// 'ALIASES(Reg_s{regnum_s:<2}, Reg_d{regnum:<2}, ' +
// 'Reg_q{regnum_q:<2})) \\').format(
// regnum=i, regnum_d=i>>1,
@@ -84,73 +123,73 @@
//
#define REGARM32_FP32_TABLE \
/* val, encode, name, scratch, preserved, stackptr, frameptr, \
- isInt, isFP32, isFP64, isVec128, aliases_init */ \
- X(Reg_s0 , 0 , "s0" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ isInt, isI64Pair, isFP32, isFP64, isVec128, aliases_init */ \
+ X(Reg_s0 , 0 , "s0" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s0 , Reg_d0 , Reg_q0)) \
- X(Reg_s1 , 1 , "s1" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s1 , 1 , "s1" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s1 , Reg_d0 , Reg_q0)) \
- X(Reg_s2 , 2 , "s2" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s2 , 2 , "s2" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s2 , Reg_d1 , Reg_q0)) \
- X(Reg_s3 , 3 , "s3" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s3 , 3 , "s3" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s3 , Reg_d1 , Reg_q0)) \
- X(Reg_s4 , 4 , "s4" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s4 , 4 , "s4" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s4 , Reg_d2 , Reg_q1)) \
- X(Reg_s5 , 5 , "s5" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s5 , 5 , "s5" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s5 , Reg_d2 , Reg_q1)) \
- X(Reg_s6 , 6 , "s6" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s6 , 6 , "s6" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s6 , Reg_d3 , Reg_q1)) \
- X(Reg_s7 , 7 , "s7" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s7 , 7 , "s7" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s7 , Reg_d3 , Reg_q1)) \
- X(Reg_s8 , 8 , "s8" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s8 , 8 , "s8" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s8 , Reg_d4 , Reg_q2)) \
- X(Reg_s9 , 9 , "s9" , 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s9 , 9 , "s9" , 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s9 , Reg_d4 , Reg_q2)) \
- X(Reg_s10, 10, "s10", 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s10, 10, "s10", 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s10, Reg_d5 , Reg_q2)) \
- X(Reg_s11, 11, "s11", 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s11, 11, "s11", 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s11, Reg_d5 , Reg_q2)) \
- X(Reg_s12, 12, "s12", 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s12, 12, "s12", 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s12, Reg_d6 , Reg_q3)) \
- X(Reg_s13, 13, "s13", 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s13, 13, "s13", 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s13, Reg_d6 , Reg_q3)) \
- X(Reg_s14, 14, "s14", 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s14, 14, "s14", 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s14, Reg_d7 , Reg_q3)) \
- X(Reg_s15, 15, "s15", 1, 0, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s15, 15, "s15", 1, 0, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s15, Reg_d7 , Reg_q3)) \
- X(Reg_s16, 16, "s16", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s16, 16, "s16", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s16, Reg_d8 , Reg_q4)) \
- X(Reg_s17, 17, "s17", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s17, 17, "s17", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s17, Reg_d8 , Reg_q4)) \
- X(Reg_s18, 18, "s18", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s18, 18, "s18", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s18, Reg_d9 , Reg_q4)) \
- X(Reg_s19, 19, "s19", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s19, 19, "s19", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s19, Reg_d9 , Reg_q4)) \
- X(Reg_s20, 20, "s20", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s20, 20, "s20", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s20, Reg_d10, Reg_q5)) \
- X(Reg_s21, 21, "s21", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s21, 21, "s21", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s21, Reg_d10, Reg_q5)) \
- X(Reg_s22, 22, "s22", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s22, 22, "s22", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s22, Reg_d11, Reg_q5)) \
- X(Reg_s23, 23, "s23", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s23, 23, "s23", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s23, Reg_d11, Reg_q5)) \
- X(Reg_s24, 24, "s24", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s24, 24, "s24", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s24, Reg_d12, Reg_q6)) \
- X(Reg_s25, 25, "s25", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s25, 25, "s25", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s25, Reg_d12, Reg_q6)) \
- X(Reg_s26, 26, "s26", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s26, 26, "s26", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s26, Reg_d13, Reg_q6)) \
- X(Reg_s27, 27, "s27", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s27, 27, "s27", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s27, Reg_d13, Reg_q6)) \
- X(Reg_s28, 28, "s28", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s28, 28, "s28", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s28, Reg_d14, Reg_q7)) \
- X(Reg_s29, 29, "s29", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s29, 29, "s29", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s29, Reg_d14, Reg_q7)) \
- X(Reg_s30, 30, "s30", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s30, 30, "s30", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s30, Reg_d15, Reg_q7)) \
- X(Reg_s31, 31, "s31", 0, 1, 0, 0, 0, 1, 0, 0, \
+ X(Reg_s31, 31, "s31", 0, 1, 0, 0, 0, 0, 1, 0, 0, \
ALIASES3(Reg_s31, Reg_d15, Reg_q7))
//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
-// isInt, isFP32,isFP64, isVec128, aliases_init)
+// isInt, isI64Pair, isFP32,isFP64, isVec128, aliases_init)
// D registers 0-7 are scratch, 8-15 are preserved, and 16-31 are also scratch
// (if supported by the D32 feature vs D16). D registers are defined in reverse
@@ -165,7 +204,7 @@
// is_scratch = 1 if (i < 8 or i >= 16) else 0
// is_preserved = 1 if (8 <= i and i < 16) else 0
// print (' X(Reg_d{regnum:<2}, {regnum:<2}, "d{regnum}", ' +
-// '{scratch}, {preserved}, 0, 0, 0, 0, 1, 0, ' +
+// '{scratch}, {preserved}, 0, 0, 0, 0, 0, 1, 0, ' +
// 'ALIASES(Reg_d{regnum:<2}, Reg_q{regnum_q:<2}) \\').format(
// regnum=i, regnum_q=i>>1, scratch=is_scratch,
// preserved=is_preserved)
@@ -173,7 +212,7 @@
// is_scratch = 1 if (i < 8 or i >= 16) else 0
// is_preserved = 1 if (8 <= i and i < 16) else 0
// print (' X(Reg_d{regnum:<2}, {regnum:<2}, "d{regnum}", ' +
-// '{scratch}, {preserved}, 0, 0, 0, 0, 1, 0, ' +
+// '{scratch}, {preserved}, 0, 0, 0, 0, 0, 1, 0, ' +
// 'ALIASES(Reg_s{regnum_s0:<2}, Reg_s{regnum_s1:<2}, ' +
// 'Reg_d{regnum:<2}, Reg_q{regnum_q:<2})) \\').format(
// regnum_s0 = (i<<1), regnum_s1 = (i<<1) + 1, regnum=i,
@@ -183,73 +222,73 @@
//
#define REGARM32_FP64_TABLE \
/* val, encode, name, scratch, preserved, stackptr, frameptr, \
- isInt, isFP32, isFP64, isVec128, aliases_init */ \
- X(Reg_d31, 31, "d31", 1, 0, 0, 0, 0, 0, 1, 0, \
+ isInt, isI64Pair, isFP32, isFP64, isVec128, aliases_init */ \
+ X(Reg_d31, 31, "d31", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d31, Reg_q15)) \
- X(Reg_d30, 30, "d30", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d30, 30, "d30", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d30, Reg_q15)) \
- X(Reg_d29, 29, "d29", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d29, 29, "d29", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d29, Reg_q14)) \
- X(Reg_d28, 28, "d28", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d28, 28, "d28", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d28, Reg_q14)) \
- X(Reg_d27, 27, "d27", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d27, 27, "d27", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d27, Reg_q13)) \
- X(Reg_d26, 26, "d26", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d26, 26, "d26", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d26, Reg_q13)) \
- X(Reg_d25, 25, "d25", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d25, 25, "d25", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d25, Reg_q12)) \
- X(Reg_d24, 24, "d24", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d24, 24, "d24", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d24, Reg_q12)) \
- X(Reg_d23, 23, "d23", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d23, 23, "d23", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d23, Reg_q11)) \
- X(Reg_d22, 22, "d22", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d22, 22, "d22", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d22, Reg_q11)) \
- X(Reg_d21, 21, "d21", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d21, 21, "d21", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d21, Reg_q10)) \
- X(Reg_d20, 20, "d20", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d20, 20, "d20", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d20, Reg_q10)) \
- X(Reg_d19, 19, "d19", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d19, 19, "d19", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d19, Reg_q9)) \
- X(Reg_d18, 18, "d18", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d18, 18, "d18", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d18, Reg_q9)) \
- X(Reg_d17, 17, "d17", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d17, 17, "d17", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d17, Reg_q8)) \
- X(Reg_d16, 16, "d16", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d16, 16, "d16", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES2(Reg_d16, Reg_q8)) \
- X(Reg_d15, 15, "d15", 0, 1, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d15, 15, "d15", 0, 1, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s30, Reg_s31, Reg_d15, Reg_q7)) \
- X(Reg_d14, 14, "d14", 0, 1, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d14, 14, "d14", 0, 1, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s28, Reg_s29, Reg_d14, Reg_q7)) \
- X(Reg_d13, 13, "d13", 0, 1, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d13, 13, "d13", 0, 1, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s26, Reg_s27, Reg_d13, Reg_q6)) \
- X(Reg_d12, 12, "d12", 0, 1, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d12, 12, "d12", 0, 1, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s24, Reg_s25, Reg_d12, Reg_q6)) \
- X(Reg_d11, 11, "d11", 0, 1, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d11, 11, "d11", 0, 1, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s22, Reg_s23, Reg_d11, Reg_q5)) \
- X(Reg_d10, 10, "d10", 0, 1, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d10, 10, "d10", 0, 1, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s20, Reg_s21, Reg_d10, Reg_q5)) \
- X(Reg_d9 , 9 , "d9", 0, 1, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d9 , 9 , "d9", 0, 1, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s18, Reg_s19, Reg_d9 , Reg_q4)) \
- X(Reg_d8 , 8 , "d8", 0, 1, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d8 , 8 , "d8", 0, 1, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s16, Reg_s17, Reg_d8 , Reg_q4)) \
- X(Reg_d7 , 7 , "d7", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d7 , 7 , "d7", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s14, Reg_s15, Reg_d7 , Reg_q3)) \
- X(Reg_d6 , 6 , "d6", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d6 , 6 , "d6", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s12, Reg_s13, Reg_d6 , Reg_q3)) \
- X(Reg_d5 , 5 , "d5", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d5 , 5 , "d5", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s10, Reg_s11, Reg_d5 , Reg_q2)) \
- X(Reg_d4 , 4 , "d4", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d4 , 4 , "d4", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s8 , Reg_s9 , Reg_d4 , Reg_q2)) \
- X(Reg_d3 , 3 , "d3", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d3 , 3 , "d3", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s6 , Reg_s7 , Reg_d3 , Reg_q1)) \
- X(Reg_d2 , 2 , "d2", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d2 , 2 , "d2", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s4 , Reg_s5 , Reg_d2 , Reg_q1)) \
- X(Reg_d1 , 1 , "d1", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d1 , 1 , "d1", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s2 , Reg_s3 , Reg_d1 , Reg_q0)) \
- X(Reg_d0 , 0 , "d0", 1, 0, 0, 0, 0, 0, 1, 0, \
+ X(Reg_d0 , 0 , "d0", 1, 0, 0, 0, 0, 0, 0, 1, 0, \
ALIASES4(Reg_s0 , Reg_s1 , Reg_d0 , Reg_q0))
//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
-// isInt, isFP32, isFP64, isVec128, aliases_init)
+// isInt, isI64Pair, isFP32, isFP64, isVec128, aliases_init)
// Q registers 0-3 are scratch, 4-7 are preserved, and 8-15 are also scratch
// (if supported by the D32 feature). Q registers are defined in reverse order
@@ -261,7 +300,7 @@
// is_scratch = 1 if (i < 4 or i >= 8) else 0
// is_preserved = 1 if (4 <= i and i < 8) else 0
// print (' X(Reg_q{regnum:<2}, {regnum:<2}, "q{regnum}", ' +
-// '{scratch}, {preserved}, 0, 0, 0, 0, 0, 1, ALIASES(' +
+// '{scratch}, {preserved}, 0, 0, 0, 0, 0, 0, 1, ALIASES(' +
// 'Reg_d{regnum_d0:<2}, Reg_d{regnum_d1:<2}, ' +
// 'Reg_q{regnum:<2})) \\').format(
// regnum_d0=(i<<1), regnum_d1=(i<<1)+1, regnum=i,
@@ -270,7 +309,7 @@
// is_scratch = 1 if (i < 4 or i >= 8) else 0
// is_preserved = 1 if (4 <= i and i < 8) else 0
// print (' X(Reg_q{regnum:<2}, {regnum:<2}, "q{regnum}", ' +
-// '{scratch}, {preserved}, 0, 0, 0, 0, 0, 1, ALIASES(' +
+// '{scratch}, {preserved}, 0, 0, 0, 0, 0, 0, 1, ALIASES(' +
// 'Reg_s{regnum_s0:<2}, Reg_s{regnum_s1:<2}, ' +
// 'Reg_s{regnum_s2:<2}, Reg_s{regnum_s3:<2}, ' +
// 'Reg_d{regnum_d0:<2}, Reg_d{regnum_d1:<2}, ' +
@@ -283,41 +322,41 @@
//
#define REGARM32_VEC128_TABLE \
/* val, encode, name, scratch, preserved, stackptr, frameptr, \
- isInt, isFP32, isFP64, isVec128, alias_init */ \
- X(Reg_q15, 15, "q15", 1, 0, 0, 0, 0, 0, 0, 1, \
+ isInt, isI64Pair, isFP32, isFP64, isVec128, alias_init */ \
+ X(Reg_q15, 15, "q15", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES3(Reg_d30, Reg_d31, Reg_q15)) \
- X(Reg_q14, 14, "q14", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q14, 14, "q14", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES3(Reg_d28, Reg_d29, Reg_q14)) \
- X(Reg_q13, 13, "q13", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q13, 13, "q13", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES3(Reg_d26, Reg_d27, Reg_q13)) \
- X(Reg_q12, 12, "q12", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q12, 12, "q12", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES3(Reg_d24, Reg_d25, Reg_q12)) \
- X(Reg_q11, 11, "q11", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q11, 11, "q11", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES3(Reg_d22, Reg_d23, Reg_q11)) \
- X(Reg_q10, 10, "q10", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q10, 10, "q10", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES3(Reg_d20, Reg_d21, Reg_q10)) \
- X(Reg_q9 , 9 , "q9", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q9 , 9 , "q9", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES3(Reg_d18, Reg_d19, Reg_q9)) \
- X(Reg_q8 , 8 , "q8", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q8 , 8 , "q8", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES3(Reg_d16, Reg_d17, Reg_q8)) \
- X(Reg_q7 , 7 , "q7", 0, 1, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q7 , 7 , "q7", 0, 1, 0, 0, 0, 0, 0, 0, 1, \
ALIASES7(Reg_s28, Reg_s29, Reg_s30, Reg_s31, Reg_d14, Reg_d15, Reg_q7)) \
- X(Reg_q6 , 6 , "q6", 0, 1, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q6 , 6 , "q6", 0, 1, 0, 0, 0, 0, 0, 0, 1, \
ALIASES7(Reg_s24, Reg_s25, Reg_s26, Reg_s27, Reg_d12, Reg_d13, Reg_q6)) \
- X(Reg_q5 , 5 , "q5", 0, 1, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q5 , 5 , "q5", 0, 1, 0, 0, 0, 0, 0, 0, 1, \
ALIASES7(Reg_s20, Reg_s21, Reg_s22, Reg_s23, Reg_d10, Reg_d11, Reg_q5)) \
- X(Reg_q4 , 4 , "q4", 0, 1, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q4 , 4 , "q4", 0, 1, 0, 0, 0, 0, 0, 0, 1, \
ALIASES7(Reg_s16, Reg_s17, Reg_s18, Reg_s19, Reg_d8 , Reg_d9 , Reg_q4)) \
- X(Reg_q3 , 3 , "q3", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q3 , 3 , "q3", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES7(Reg_s12, Reg_s13, Reg_s14, Reg_s15, Reg_d6 , Reg_d7 , Reg_q3)) \
- X(Reg_q2 , 2 , "q2", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q2 , 2 , "q2", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES7(Reg_s8 , Reg_s9 , Reg_s10, Reg_s11, Reg_d4 , Reg_d5 , Reg_q2)) \
- X(Reg_q1 , 1 , "q1", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q1 , 1 , "q1", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES7(Reg_s4 , Reg_s5 , Reg_s6 , Reg_s7 , Reg_d2 , Reg_d3 , Reg_q1)) \
- X(Reg_q0 , 0 , "q0", 1, 0, 0, 0, 0, 0, 0, 1, \
+ X(Reg_q0 , 0 , "q0", 1, 0, 0, 0, 0, 0, 0, 0, 1, \
ALIASES7(Reg_s0 , Reg_s1 , Reg_s2 , Reg_s3 , Reg_d0 , Reg_d1 , Reg_q0))
//#define X(val, encode, name, scratch, preserved, stackptr, frameptr,
-// isInt, isFP32, isFP64, isVec128, alias_init)
+// isInt, isI64Pair, isFP32, isFP64, isVec128, alias_init)
#undef ALIASES
// We also provide a combined table, so that there is a namespace where all of
@@ -328,6 +367,7 @@
/* val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
isFP32, isFP64, isVec128, alias_init */ \
REGARM32_GPR_TABLE \
+ REGARM32_I64PAIR_TABLE \
REGARM32_FP32_TABLE \
REGARM32_FP64_TABLE \
REGARM32_VEC128_TABLE
@@ -338,6 +378,8 @@
/* val, init */ \
X(Reg_GPR_First, = Reg_r0) \
X(Reg_GPR_Last, = Reg_pc) \
+ X(Reg_I64PAIR_First, = Reg_r0r1) \
+ X(Reg_I64PAIR_Last, = Reg_r10fp) \
X(Reg_SREG_First, = Reg_s0) \
X(Reg_SREG_Last, = Reg_s31) \
X(Reg_DREG_First, = Reg_d0) \
@@ -351,21 +393,21 @@
// extending load/stores).
#define ICETYPEARM32_TABLE \
/* tag, element type, int_width, vec_width, addr bits sext, zext, \
- reg-reg addr allowed */ \
- X(IceType_void, IceType_void, "" , "" , 0 , 0 , 0) \
- X(IceType_i1, IceType_void, "b", "" , 8 , 12, 1) \
- X(IceType_i8, IceType_void, "b", "" , 8 , 12, 1) \
- X(IceType_i16, IceType_void, "h", "" , 8 , 8 , 1) \
- X(IceType_i32, IceType_void, "" , "" , 12, 12, 1) \
- X(IceType_i64, IceType_void, "d", "" , 8 , 8 , 1) \
- X(IceType_f32, IceType_void, "" , ".f32", 8, 8 , 0) \
- X(IceType_f64, IceType_void, "" , ".f64", 8, 8 , 0) \
- X(IceType_v4i1, IceType_i32 , "" , ".i32", 0 , 0 , 1) \
- X(IceType_v8i1, IceType_i16 , "" , ".i16", 0 , 0 , 1) \
- X(IceType_v16i1, IceType_i8 , "" , ".i8" , 0 , 0 , 1) \
- X(IceType_v16i8, IceType_i8 , "" , ".i8" , 0 , 0 , 1) \
- X(IceType_v8i16, IceType_i16 , "" , ".i16", 0 , 0 , 1) \
- X(IceType_v4i32, IceType_i32 , "" , ".i32", 0 , 0 , 1) \
+ reg-reg addr allowed */ \
+ X(IceType_void, IceType_void, "" , "" , 0 , 0 , 0) \
+ X(IceType_i1, IceType_void, "b", "" , 8 , 12, 1) \
+ X(IceType_i8, IceType_void, "b", "" , 8 , 12, 1) \
+ X(IceType_i16, IceType_void, "h", "" , 8 , 8 , 1) \
+ X(IceType_i32, IceType_void, "" , "" , 12, 12, 1) \
+ X(IceType_i64, IceType_void, "d", "" , 0 , 0 , 0) \
+ X(IceType_f32, IceType_void, "" , ".f32", 8, 8 , 0) \
+ X(IceType_f64, IceType_void, "" , ".f64", 8, 8 , 0) \
+ X(IceType_v4i1, IceType_i32 , "" , ".i32", 0 , 0 , 1) \
+ X(IceType_v8i1, IceType_i16 , "" , ".i16", 0 , 0 , 1) \
+ X(IceType_v16i1, IceType_i8 , "" , ".i8" , 0 , 0 , 1) \
+ X(IceType_v16i8, IceType_i8 , "" , ".i8" , 0 , 0 , 1) \
+ X(IceType_v8i16, IceType_i16 , "" , ".i16", 0 , 0 , 1) \
+ X(IceType_v4i32, IceType_i32 , "" , ".i32", 0 , 0 , 1) \
X(IceType_v4f32, IceType_f32 , "" , ".f32", 0 , 0 , 1)
//#define X(tag, elementty, int_width, vec_width, sbits, ubits, rraddr)
@@ -400,6 +442,6 @@
X(LE, 13, GT, "le") /* signed less than or equal */ \
X(AL, 14, kNone, "") /* always (unconditional) */ \
X(kNone, 15, kNone, "??") /* special condition / none */
-//#define(tag, encode, opp, emit)
+//#define X(tag, encode, opp, emit)
#endif // SUBZERO_SRC_ICEINSTARM32_DEF
« no previous file with comments | « no previous file | src/IceRegistersARM32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698