OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_PPC_CONSTANTS_PPC_H_ | 5 #ifndef V8_PPC_CONSTANTS_PPC_H_ |
6 #define V8_PPC_CONSTANTS_PPC_H_ | 6 #define V8_PPC_CONSTANTS_PPC_H_ |
7 | 7 |
8 #include <stdint.h> | 8 #include <stdint.h> |
9 | 9 |
10 #include "src/base/logging.h" | 10 #include "src/base/logging.h" |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
84 } | 84 } |
85 } | 85 } |
86 | 86 |
87 // ----------------------------------------------------------------------------- | 87 // ----------------------------------------------------------------------------- |
88 // Instructions encoding. | 88 // Instructions encoding. |
89 | 89 |
90 // Instr is merely used by the Assembler to distinguish 32bit integers | 90 // Instr is merely used by the Assembler to distinguish 32bit integers |
91 // representing instructions from usual 32 bit values. | 91 // representing instructions from usual 32 bit values. |
92 // Instruction objects are pointers to 32bit values, and provide methods to | 92 // Instruction objects are pointers to 32bit values, and provide methods to |
93 // access the various ISA fields. | 93 // access the various ISA fields. |
94 typedef int32_t Instr; | 94 typedef uint32_t Instr; |
95 | 95 |
96 // Opcodes as defined in section 4.2 table 34 (32bit PowerPC) | 96 #define PPC_XX3_OPCODE_LIST(V) \ |
97 enum Opcode { | 97 /* VSX Scalar Add Double-Precision */ \ |
98 TWI = 3 << 26, // Trap Word Immediate | 98 V(xsadddp, XSADDDP, 0xF0000100) \ |
99 MULLI = 7 << 26, // Multiply Low Immediate | 99 /* VSX Scalar Add Single-Precision */ \ |
100 SUBFIC = 8 << 26, // Subtract from Immediate Carrying | 100 V(xsaddsp, XSADDSP, 0xF0000000) \ |
101 CMPLI = 10 << 26, // Compare Logical Immediate | 101 /* VSX Scalar Compare Ordered Double-Precision */ \ |
102 CMPI = 11 << 26, // Compare Immediate | 102 V(xscmpodp, XSCMPODP, 0xF0000158) \ |
103 ADDIC = 12 << 26, // Add Immediate Carrying | 103 /* VSX Scalar Compare Unordered Double-Precision */ \ |
104 ADDICx = 13 << 26, // Add Immediate Carrying and Record | 104 V(xscmpudp, XSCMPUDP, 0xF0000118) \ |
105 ADDI = 14 << 26, // Add Immediate | 105 /* VSX Scalar Copy Sign Double-Precision */ \ |
106 ADDIS = 15 << 26, // Add Immediate Shifted | 106 V(xscpsgndp, XSCPSGNDP, 0xF0000580) \ |
107 BCX = 16 << 26, // Branch Conditional | 107 /* VSX Scalar Divide Double-Precision */ \ |
108 SC = 17 << 26, // System Call | 108 V(xsdivdp, XSDIVDP, 0xF00001C0) \ |
109 BX = 18 << 26, // Branch | 109 /* VSX Scalar Divide Single-Precision */ \ |
110 EXT1 = 19 << 26, // Extended code set 1 | 110 V(xsdivsp, XSDIVSP, 0xF00000C0) \ |
111 RLWIMIX = 20 << 26, // Rotate Left Word Immediate then Mask Insert | 111 /* VSX Scalar Multiply-Add Type-A Double-Precision */ \ |
112 RLWINMX = 21 << 26, // Rotate Left Word Immediate then AND with Mask | 112 V(xsmaddadp, XSMADDADP, 0xF0000108) \ |
113 RLWNMX = 23 << 26, // Rotate Left Word then AND with Mask | 113 /* VSX Scalar Multiply-Add Type-A Single-Precision */ \ |
114 ORI = 24 << 26, // OR Immediate | 114 V(xsmaddasp, XSMADDASP, 0xF0000008) \ |
115 ORIS = 25 << 26, // OR Immediate Shifted | 115 /* VSX Scalar Multiply-Add Type-M Double-Precision */ \ |
116 XORI = 26 << 26, // XOR Immediate | 116 V(xsmaddmdp, XSMADDMDP, 0xF0000148) \ |
117 XORIS = 27 << 26, // XOR Immediate Shifted | 117 /* VSX Scalar Multiply-Add Type-M Single-Precision */ \ |
118 ANDIx = 28 << 26, // AND Immediate | 118 V(xsmaddmsp, XSMADDMSP, 0xF0000048) \ |
119 ANDISx = 29 << 26, // AND Immediate Shifted | 119 /* VSX Scalar Maximum Double-Precision */ \ |
120 EXT5 = 30 << 26, // Extended code set 5 - 64bit only | 120 V(xsmaxdp, XSMAXDP, 0xF0000500) \ |
121 EXT2 = 31 << 26, // Extended code set 2 | 121 /* VSX Scalar Minimum Double-Precision */ \ |
122 LWZ = 32 << 26, // Load Word and Zero | 122 V(xsmindp, XSMINDP, 0xF0000540) \ |
123 LWZU = 33 << 26, // Load Word with Zero Update | 123 /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \ |
124 LBZ = 34 << 26, // Load Byte and Zero | 124 V(xsmsubadp, XSMSUBADP, 0xF0000188) \ |
125 LBZU = 35 << 26, // Load Byte and Zero with Update | 125 /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \ |
126 STW = 36 << 26, // Store | 126 V(xsmsubasp, XSMSUBASP, 0xF0000088) \ |
127 STWU = 37 << 26, // Store Word with Update | 127 /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \ |
128 STB = 38 << 26, // Store Byte | 128 V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \ |
129 STBU = 39 << 26, // Store Byte with Update | 129 /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \ |
130 LHZ = 40 << 26, // Load Half and Zero | 130 V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \ |
131 LHZU = 41 << 26, // Load Half and Zero with Update | 131 /* VSX Scalar Multiply Double-Precision */ \ |
132 LHA = 42 << 26, // Load Half Algebraic | 132 V(xsmuldp, XSMULDP, 0xF0000180) \ |
133 LHAU = 43 << 26, // Load Half Algebraic with Update | 133 /* VSX Scalar Multiply Single-Precision */ \ |
134 STH = 44 << 26, // Store Half | 134 V(xsmulsp, XSMULSP, 0xF0000080) \ |
135 STHU = 45 << 26, // Store Half with Update | 135 /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \ |
136 LMW = 46 << 26, // Load Multiple Word | 136 V(xsnmaddadp, XSNMADDADP, 0xF0000508) \ |
137 STMW = 47 << 26, // Store Multiple Word | 137 /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \ |
138 LFS = 48 << 26, // Load Floating-Point Single | 138 V(xsnmaddasp, XSNMADDASP, 0xF0000408) \ |
139 LFSU = 49 << 26, // Load Floating-Point Single with Update | 139 /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \ |
140 LFD = 50 << 26, // Load Floating-Point Double | 140 V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \ |
141 LFDU = 51 << 26, // Load Floating-Point Double with Update | 141 /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \ |
142 STFS = 52 << 26, // Store Floating-Point Single | 142 V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \ |
143 STFSU = 53 << 26, // Store Floating-Point Single with Update | 143 /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \ |
144 STFD = 54 << 26, // Store Floating-Point Double | 144 V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \ |
145 STFDU = 55 << 26, // Store Floating-Point Double with Update | 145 /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \ |
146 LD = 58 << 26, // Load Double Word | 146 V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \ |
147 EXT3 = 59 << 26, // Extended code set 3 | 147 /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \ |
148 EXT6 = 60 << 26, // Extended code set 6 | 148 V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \ |
149 STD = 62 << 26, // Store Double Word (optionally with Update) | 149 /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \ |
150 EXT4 = 63 << 26 // Extended code set 4 | 150 V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \ |
| 151 /* VSX Scalar Reciprocal Estimate Double-Precision */ \ |
| 152 V(xsredp, XSREDP, 0xF0000168) \ |
| 153 /* VSX Scalar Reciprocal Estimate Single-Precision */ \ |
| 154 V(xsresp, XSRESP, 0xF0000068) \ |
| 155 /* VSX Scalar Subtract Double-Precision */ \ |
| 156 V(xssubdp, XSSUBDP, 0xF0000140) \ |
| 157 /* VSX Scalar Subtract Single-Precision */ \ |
| 158 V(xssubsp, XSSUBSP, 0xF0000040) \ |
| 159 /* VSX Scalar Test for software Divide Double-Precision */ \ |
| 160 V(xstdivdp, XSTDIVDP, 0xF00001E8) \ |
| 161 /* VSX Vector Add Double-Precision */ \ |
| 162 V(xvadddp, XVADDDP, 0xF0000300) \ |
| 163 /* VSX Vector Add Single-Precision */ \ |
| 164 V(xvaddsp, XVADDSP, 0xF0000200) \ |
| 165 /* VSX Vector Compare Equal To Double-Precision */ \ |
| 166 V(xvcmpeqdp, XVCMPEQDP, 0xF0000318) \ |
| 167 /* VSX Vector Compare Equal To Double-Precision & record CR6 */ \ |
| 168 V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718) \ |
| 169 /* VSX Vector Compare Equal To Single-Precision */ \ |
| 170 V(xvcmpeqsp, XVCMPEQSP, 0xF0000218) \ |
| 171 /* VSX Vector Compare Equal To Single-Precision & record CR6 */ \ |
| 172 V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618) \ |
| 173 /* VSX Vector Compare Greater Than or Equal To Double-Precision */ \ |
| 174 V(xvcmpgedp, XVCMPGEDP, 0xF0000398) \ |
| 175 /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \ |
| 176 /* CR6 */ \ |
| 177 V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798) \ |
| 178 /* VSX Vector Compare Greater Than or Equal To Single-Precision */ \ |
| 179 V(xvcmpgesp, XVCMPGESP, 0xF0000298) \ |
| 180 /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \ |
| 181 /* CR6 */ \ |
| 182 V(xvcmpgespx, XVCMPGESPx, 0xF0000698) \ |
| 183 /* VSX Vector Compare Greater Than Double-Precision */ \ |
| 184 V(xvcmpgtdp, XVCMPGTDP, 0xF0000358) \ |
| 185 /* VSX Vector Compare Greater Than Double-Precision & record CR6 */ \ |
| 186 V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758) \ |
| 187 /* VSX Vector Compare Greater Than Single-Precision */ \ |
| 188 V(xvcmpgtsp, XVCMPGTSP, 0xF0000258) \ |
| 189 /* VSX Vector Compare Greater Than Single-Precision & record CR6 */ \ |
| 190 V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658) \ |
| 191 /* VSX Vector Copy Sign Double-Precision */ \ |
| 192 V(xvcpsgndp, XVCPSGNDP, 0xF0000780) \ |
| 193 /* VSX Vector Copy Sign Single-Precision */ \ |
| 194 V(xvcpsgnsp, XVCPSGNSP, 0xF0000680) \ |
| 195 /* VSX Vector Divide Double-Precision */ \ |
| 196 V(xvdivdp, XVDIVDP, 0xF00003C0) \ |
| 197 /* VSX Vector Divide Single-Precision */ \ |
| 198 V(xvdivsp, XVDIVSP, 0xF00002C0) \ |
| 199 /* VSX Vector Multiply-Add Type-A Double-Precision */ \ |
| 200 V(xvmaddadp, XVMADDADP, 0xF0000308) \ |
| 201 /* VSX Vector Multiply-Add Type-A Single-Precision */ \ |
| 202 V(xvmaddasp, XVMADDASP, 0xF0000208) \ |
| 203 /* VSX Vector Multiply-Add Type-M Double-Precision */ \ |
| 204 V(xvmaddmdp, XVMADDMDP, 0xF0000348) \ |
| 205 /* VSX Vector Multiply-Add Type-M Single-Precision */ \ |
| 206 V(xvmaddmsp, XVMADDMSP, 0xF0000248) \ |
| 207 /* VSX Vector Maximum Double-Precision */ \ |
| 208 V(xvmaxdp, XVMAXDP, 0xF0000700) \ |
| 209 /* VSX Vector Maximum Single-Precision */ \ |
| 210 V(xvmaxsp, XVMAXSP, 0xF0000600) \ |
| 211 /* VSX Vector Minimum Double-Precision */ \ |
| 212 V(xvmindp, XVMINDP, 0xF0000740) \ |
| 213 /* VSX Vector Minimum Single-Precision */ \ |
| 214 V(xvminsp, XVMINSP, 0xF0000640) \ |
| 215 /* VSX Vector Multiply-Subtract Type-A Double-Precision */ \ |
| 216 V(xvmsubadp, XVMSUBADP, 0xF0000388) \ |
| 217 /* VSX Vector Multiply-Subtract Type-A Single-Precision */ \ |
| 218 V(xvmsubasp, XVMSUBASP, 0xF0000288) \ |
| 219 /* VSX Vector Multiply-Subtract Type-M Double-Precision */ \ |
| 220 V(xvmsubmdp, XVMSUBMDP, 0xF00003C8) \ |
| 221 /* VSX Vector Multiply-Subtract Type-M Single-Precision */ \ |
| 222 V(xvmsubmsp, XVMSUBMSP, 0xF00002C8) \ |
| 223 /* VSX Vector Multiply Double-Precision */ \ |
| 224 V(xvmuldp, XVMULDP, 0xF0000380) \ |
| 225 /* VSX Vector Multiply Single-Precision */ \ |
| 226 V(xvmulsp, XVMULSP, 0xF0000280) \ |
| 227 /* VSX Vector Negative Multiply-Add Type-A Double-Precision */ \ |
| 228 V(xvnmaddadp, XVNMADDADP, 0xF0000708) \ |
| 229 /* VSX Vector Negative Multiply-Add Type-A Single-Precision */ \ |
| 230 V(xvnmaddasp, XVNMADDASP, 0xF0000608) \ |
| 231 /* VSX Vector Negative Multiply-Add Type-M Double-Precision */ \ |
| 232 V(xvnmaddmdp, XVNMADDMDP, 0xF0000748) \ |
| 233 /* VSX Vector Negative Multiply-Add Type-M Single-Precision */ \ |
| 234 V(xvnmaddmsp, XVNMADDMSP, 0xF0000648) \ |
| 235 /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */ \ |
| 236 V(xvnmsubadp, XVNMSUBADP, 0xF0000788) \ |
| 237 /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */ \ |
| 238 V(xvnmsubasp, XVNMSUBASP, 0xF0000688) \ |
| 239 /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */ \ |
| 240 V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8) \ |
| 241 /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */ \ |
| 242 V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \ |
| 243 /* VSX Vector Reciprocal Estimate Double-Precision */ \ |
| 244 V(xvredp, XVREDP, 0xF0000368) \ |
| 245 /* VSX Vector Reciprocal Estimate Single-Precision */ \ |
| 246 V(xvresp, XVRESP, 0xF0000268) \ |
| 247 /* VSX Vector Subtract Double-Precision */ \ |
| 248 V(xvsubdp, XVSUBDP, 0xF0000340) \ |
| 249 /* VSX Vector Subtract Single-Precision */ \ |
| 250 V(xvsubsp, XVSUBSP, 0xF0000240) \ |
| 251 /* VSX Vector Test for software Divide Double-Precision */ \ |
| 252 V(xvtdivdp, XVTDIVDP, 0xF00003E8) \ |
| 253 /* VSX Vector Test for software Divide Single-Precision */ \ |
| 254 V(xvtdivsp, XVTDIVSP, 0xF00002E8) \ |
| 255 /* VSX Logical AND */ \ |
| 256 V(xxland, XXLAND, 0xF0000410) \ |
| 257 /* VSX Logical AND with Complement */ \ |
| 258 V(xxlandc, XXLANDC, 0xF0000450) \ |
| 259 /* VSX Logical Equivalence */ \ |
| 260 V(xxleqv, XXLEQV, 0xF00005D0) \ |
| 261 /* VSX Logical NAND */ \ |
| 262 V(xxlnand, XXLNAND, 0xF0000590) \ |
| 263 /* VSX Logical NOR */ \ |
| 264 V(xxlnor, XXLNOR, 0xF0000510) \ |
| 265 /* VSX Logical OR */ \ |
| 266 V(xxlor, XXLOR, 0xF0000490) \ |
| 267 /* VSX Logical OR with Complement */ \ |
| 268 V(xxlorc, XXLORC, 0xF0000550) \ |
| 269 /* VSX Logical XOR */ \ |
| 270 V(xxlxor, XXLXOR, 0xF00004D0) \ |
| 271 /* VSX Merge High Word */ \ |
| 272 V(xxmrghw, XXMRGHW, 0xF0000090) \ |
| 273 /* VSX Merge Low Word */ \ |
| 274 V(xxmrglw, XXMRGLW, 0xF0000190) \ |
| 275 /* VSX Permute Doubleword Immediate */ \ |
| 276 V(xxpermdi, XXPERMDI, 0xF0000050) \ |
| 277 /* VSX Shift Left Double by Word Immediate */ \ |
| 278 V(xxsldwi, XXSLDWI, 0xF0000010) \ |
| 279 /* VSX Splat Word */ \ |
| 280 V(xxspltw, XXSPLTW, 0xF0000290) |
| 281 |
| 282 #define PPC_Z23_OPCODE_LIST(V) \ |
| 283 /* Decimal Quantize */ \ |
| 284 V(dqua, DQUA, 0xEC000006) \ |
| 285 /* Decimal Quantize Immediate */ \ |
| 286 V(dquai, DQUAI, 0xEC000086) \ |
| 287 /* Decimal Quantize Immediate Quad */ \ |
| 288 V(dquaiq, DQUAIQ, 0xFC000086) \ |
| 289 /* Decimal Quantize Quad */ \ |
| 290 V(dquaq, DQUAQ, 0xFC000006) \ |
| 291 /* Decimal Floating Round To FP Integer Without Inexact */ \ |
| 292 V(drintn, DRINTN, 0xEC0001C6) \ |
| 293 /* Decimal Floating Round To FP Integer Without Inexact Quad */ \ |
| 294 V(drintnq, DRINTNQ, 0xFC0001C6) \ |
| 295 /* Decimal Floating Round To FP Integer With Inexact */ \ |
| 296 V(drintx, DRINTX, 0xEC0000C6) \ |
| 297 /* Decimal Floating Round To FP Integer With Inexact Quad */ \ |
| 298 V(drintxq, DRINTXQ, 0xFC0000C6) \ |
| 299 /* Decimal Floating Reround */ \ |
| 300 V(drrnd, DRRND, 0xEC000046) \ |
| 301 /* Decimal Floating Reround Quad */ \ |
| 302 V(drrndq, DRRNDQ, 0xFC000046) |
| 303 |
| 304 #define PPC_Z22_OPCODE_LIST(V) \ |
| 305 /* Decimal Floating Shift Coefficient Left Immediate */ \ |
| 306 V(dscli, DSCLI, 0xEC000084) \ |
| 307 /* Decimal Floating Shift Coefficient Left Immediate Quad */ \ |
| 308 V(dscliq, DSCLIQ, 0xFC000084) \ |
| 309 /* Decimal Floating Shift Coefficient Right Immediate */ \ |
| 310 V(dscri, DSCRI, 0xEC0000C4) \ |
| 311 /* Decimal Floating Shift Coefficient Right Immediate Quad */ \ |
| 312 V(dscriq, DSCRIQ, 0xFC0000C4) \ |
| 313 /* Decimal Floating Test Data Class */ \ |
| 314 V(dtstdc, DTSTDC, 0xEC000184) \ |
| 315 /* Decimal Floating Test Data Class Quad */ \ |
| 316 V(dtstdcq, DTSTDCQ, 0xFC000184) \ |
| 317 /* Decimal Floating Test Data Group */ \ |
| 318 V(dtstdg, DTSTDG, 0xEC0001C4) \ |
| 319 /* Decimal Floating Test Data Group Quad */ \ |
| 320 V(dtstdgq, DTSTDGQ, 0xFC0001C4) |
| 321 |
| 322 #define PPC_XX2_OPCODE_LIST(V) \ |
| 323 /* Move To VSR Doubleword */ \ |
| 324 V(mtvsrd, MTVSRD, 0x7C000166) \ |
| 325 /* Move To VSR Word Algebraic */ \ |
| 326 V(mtvsrwa, MTVSRWA, 0x7C0001A6) \ |
| 327 /* Move To VSR Word and Zero */ \ |
| 328 V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \ |
| 329 /* VSX Scalar Absolute Value Double-Precision */ \ |
| 330 V(xsabsdp, XSABSDP, 0xF0000564) \ |
| 331 /* VSX Scalar Convert Double-Precision to Single-Precision */ \ |
| 332 V(xscvdpsp, XSCVDPSP, 0xF0000424) \ |
| 333 /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \ |
| 334 /* signalling */ \ |
| 335 V(xscvdpspn, XSCVDPSPN, 0xF000042C) \ |
| 336 /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \ |
| 337 /* Saturate */ \ |
| 338 V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \ |
| 339 /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */ \ |
| 340 /* Saturate */ \ |
| 341 V(xscvdpsxws, XSCVDPSXWS, 0xF0000160) \ |
| 342 /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */ \ |
| 343 /* Doubleword Saturate */ \ |
| 344 V(xscvdpuxds, XSCVDPUXDS, 0xF0000520) \ |
| 345 /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */ \ |
| 346 /* Saturate */ \ |
| 347 V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \ |
| 348 /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \ |
| 349 V(xscvspdp, XSCVSPDP, 0xF0000524) \ |
| 350 /* Scalar Convert Single-Precision to Double-Precision format Non- */ \ |
| 351 /* signalling */ \ |
| 352 V(xscvspdpn, XSCVSPDPN, 0xF000052C) \ |
| 353 /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \ |
| 354 V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \ |
| 355 /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \ |
| 356 V(xscvsxdsp, XSCVSXDSP, 0xF00004E0) \ |
| 357 /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */ \ |
| 358 /* Precision */ \ |
| 359 V(xscvuxddp, XSCVUXDDP, 0xF00005A0) \ |
| 360 /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */ \ |
| 361 /* Precision */ \ |
| 362 V(xscvuxdsp, XSCVUXDSP, 0xF00004A0) \ |
| 363 /* VSX Scalar Negative Absolute Value Double-Precision */ \ |
| 364 V(xsnabsdp, XSNABSDP, 0xF00005A4) \ |
| 365 /* VSX Scalar Negate Double-Precision */ \ |
| 366 V(xsnegdp, XSNEGDP, 0xF00005E4) \ |
| 367 /* VSX Scalar Round to Double-Precision Integer */ \ |
| 368 V(xsrdpi, XSRDPI, 0xF0000124) \ |
| 369 /* VSX Scalar Round to Double-Precision Integer using Current rounding */ \ |
| 370 /* mode */ \ |
| 371 V(xsrdpic, XSRDPIC, 0xF00001AC) \ |
| 372 /* VSX Scalar Round to Double-Precision Integer toward -Infinity */ \ |
| 373 V(xsrdpim, XSRDPIM, 0xF00001E4) \ |
| 374 /* VSX Scalar Round to Double-Precision Integer toward +Infinity */ \ |
| 375 V(xsrdpip, XSRDPIP, 0xF00001A4) \ |
| 376 /* VSX Scalar Round to Double-Precision Integer toward Zero */ \ |
| 377 V(xsrdpiz, XSRDPIZ, 0xF0000164) \ |
| 378 /* VSX Scalar Round to Single-Precision */ \ |
| 379 V(xsrsp, XSRSP, 0xF0000464) \ |
| 380 /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \ |
| 381 V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \ |
| 382 /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \ |
| 383 V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \ |
| 384 /* VSX Scalar Square Root Double-Precision */ \ |
| 385 V(xssqrtdp, XSSQRTDP, 0xF000012C) \ |
| 386 /* VSX Scalar Square Root Single-Precision */ \ |
| 387 V(xssqrtsp, XSSQRTSP, 0xF000002C) \ |
| 388 /* VSX Scalar Test for software Square Root Double-Precision */ \ |
| 389 V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \ |
| 390 /* VSX Vector Absolute Value Double-Precision */ \ |
| 391 V(xvabsdp, XVABSDP, 0xF0000764) \ |
| 392 /* VSX Vector Absolute Value Single-Precision */ \ |
| 393 V(xvabssp, XVABSSP, 0xF0000664) \ |
| 394 /* VSX Vector Convert Double-Precision to Single-Precision */ \ |
| 395 V(xvcvdpsp, XVCVDPSP, 0xF0000624) \ |
| 396 /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \ |
| 397 /* Saturate */ \ |
| 398 V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \ |
| 399 /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \ |
| 400 /* Saturate */ \ |
| 401 V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \ |
| 402 /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \ |
| 403 /* Doubleword Saturate */ \ |
| 404 V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \ |
| 405 /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \ |
| 406 /* Saturate */ \ |
| 407 V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320) \ |
| 408 /* VSX Vector Convert Single-Precision to Double-Precision */ \ |
| 409 V(xvcvspdp, XVCVSPDP, 0xF0000724) \ |
| 410 /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \ |
| 411 /* Saturate */ \ |
| 412 V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \ |
| 413 /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \ |
| 414 /* Saturate */ \ |
| 415 V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \ |
| 416 /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \ |
| 417 /* Doubleword Saturate */ \ |
| 418 V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \ |
| 419 /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \ |
| 420 /* Saturate */ \ |
| 421 V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \ |
| 422 /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \ |
| 423 V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \ |
| 424 /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \ |
| 425 V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \ |
| 426 /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \ |
| 427 V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \ |
| 428 /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \ |
| 429 V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \ |
| 430 /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \ |
| 431 /* Precision */ \ |
| 432 V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \ |
| 433 /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \ |
| 434 /* Precision */ \ |
| 435 V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \ |
| 436 /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \ |
| 437 V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \ |
| 438 /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \ |
| 439 V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \ |
| 440 /* VSX Vector Negative Absolute Value Double-Precision */ \ |
| 441 V(xvnabsdp, XVNABSDP, 0xF00007A4) \ |
| 442 /* VSX Vector Negative Absolute Value Single-Precision */ \ |
| 443 V(xvnabssp, XVNABSSP, 0xF00006A4) \ |
| 444 /* VSX Vector Negate Double-Precision */ \ |
| 445 V(xvnegdp, XVNEGDP, 0xF00007E4) \ |
| 446 /* VSX Vector Negate Single-Precision */ \ |
| 447 V(xvnegsp, XVNEGSP, 0xF00006E4) \ |
| 448 /* VSX Vector Round to Double-Precision Integer */ \ |
| 449 V(xvrdpi, XVRDPI, 0xF0000324) \ |
| 450 /* VSX Vector Round to Double-Precision Integer using Current rounding */ \ |
| 451 /* mode */ \ |
| 452 V(xvrdpic, XVRDPIC, 0xF00003AC) \ |
| 453 /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \ |
| 454 V(xvrdpim, XVRDPIM, 0xF00003E4) \ |
| 455 /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \ |
| 456 V(xvrdpip, XVRDPIP, 0xF00003A4) \ |
| 457 /* VSX Vector Round to Double-Precision Integer toward Zero */ \ |
| 458 V(xvrdpiz, XVRDPIZ, 0xF0000364) \ |
| 459 /* VSX Vector Round to Single-Precision Integer */ \ |
| 460 V(xvrspi, XVRSPI, 0xF0000224) \ |
| 461 /* VSX Vector Round to Single-Precision Integer using Current rounding */ \ |
| 462 /* mode */ \ |
| 463 V(xvrspic, XVRSPIC, 0xF00002AC) \ |
| 464 /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \ |
| 465 V(xvrspim, XVRSPIM, 0xF00002E4) \ |
| 466 /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \ |
| 467 V(xvrspip, XVRSPIP, 0xF00002A4) \ |
| 468 /* VSX Vector Round to Single-Precision Integer toward Zero */ \ |
| 469 V(xvrspiz, XVRSPIZ, 0xF0000264) \ |
| 470 /* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \ |
| 471 V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \ |
| 472 /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \ |
| 473 V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \ |
| 474 /* VSX Vector Square Root Double-Precision */ \ |
| 475 V(xvsqrtdp, XVSQRTDP, 0xF000032C) \ |
| 476 /* VSX Vector Square Root Single-Precision */ \ |
| 477 V(xvsqrtsp, XVSQRTSP, 0xF000022C) \ |
| 478 /* VSX Vector Test for software Square Root Double-Precision */ \ |
| 479 V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \ |
| 480 /* VSX Vector Test for software Square Root Single-Precision */ \ |
| 481 V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8) |
| 482 |
| 483 #define PPC_EVX_OPCODE_LIST(V) \ |
| 484 /* Vector Load Double Word into Double Word by External PID Indexed */ \ |
| 485 V(evlddepx, EVLDDEPX, 0x7C00063E) \ |
| 486 /* Vector Store Double of Double by External PID Indexed */ \ |
| 487 V(evstddepx, EVSTDDEPX, 0x7C00073E) \ |
| 488 /* Bit Reversed Increment */ \ |
| 489 V(brinc, BRINC, 0x1000020F) \ |
| 490 /* Vector Absolute Value */ \ |
| 491 V(evabs, EVABS, 0x10000208) \ |
| 492 /* Vector Add Immediate Word */ \ |
| 493 V(evaddiw, EVADDIW, 0x10000202) \ |
| 494 /* Vector Add Signed, Modulo, Integer to Accumulator Word */ \ |
| 495 V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9) \ |
| 496 /* Vector Add Signed, Saturate, Integer to Accumulator Word */ \ |
| 497 V(evaddssiaaw, EVADDSSIAAW, 0x100004C1) \ |
| 498 /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */ \ |
| 499 V(evaddumiaaw, EVADDUMIAAW, 0x100004C8) \ |
| 500 /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */ \ |
| 501 V(evaddusiaaw, EVADDUSIAAW, 0x100004C0) \ |
| 502 /* Vector Add Word */ \ |
| 503 V(evaddw, EVADDW, 0x10000200) \ |
| 504 /* Vector AND */ \ |
| 505 V(evand, EVAND, 0x10000211) \ |
| 506 /* Vector AND with Complement */ \ |
| 507 V(evandc, EVANDC, 0x10000212) \ |
| 508 /* Vector Compare Equal */ \ |
| 509 V(evcmpeq, EVCMPEQ, 0x10000234) \ |
| 510 /* Vector Compare Greater Than Signed */ \ |
| 511 V(evcmpgts, EVCMPGTS, 0x10000231) \ |
| 512 /* Vector Compare Greater Than Unsigned */ \ |
| 513 V(evcmpgtu, EVCMPGTU, 0x10000230) \ |
| 514 /* Vector Compare Less Than Signed */ \ |
| 515 V(evcmplts, EVCMPLTS, 0x10000233) \ |
| 516 /* Vector Compare Less Than Unsigned */ \ |
| 517 V(evcmpltu, EVCMPLTU, 0x10000232) \ |
| 518 /* Vector Count Leading Signed Bits Word */ \ |
| 519 V(evcntlsw, EVCNTLSW, 0x1000020E) \ |
| 520 /* Vector Count Leading Zeros Word */ \ |
| 521 V(evcntlzw, EVCNTLZW, 0x1000020D) \ |
| 522 /* Vector Divide Word Signed */ \ |
| 523 V(evdivws, EVDIVWS, 0x100004C6) \ |
| 524 /* Vector Divide Word Unsigned */ \ |
| 525 V(evdivwu, EVDIVWU, 0x100004C7) \ |
| 526 /* Vector Equivalent */ \ |
| 527 V(eveqv, EVEQV, 0x10000219) \ |
| 528 /* Vector Extend Sign Byte */ \ |
| 529 V(evextsb, EVEXTSB, 0x1000020A) \ |
| 530 /* Vector Extend Sign Half Word */ \ |
| 531 V(evextsh, EVEXTSH, 0x1000020B) \ |
| 532 /* Vector Load Double Word into Double Word */ \ |
| 533 V(evldd, EVLDD, 0x10000301) \ |
| 534 /* Vector Load Double Word into Double Word Indexed */ \ |
| 535 V(evlddx, EVLDDX, 0x10000300) \ |
| 536 /* Vector Load Double into Four Half Words */ \ |
| 537 V(evldh, EVLDH, 0x10000305) \ |
| 538 /* Vector Load Double into Four Half Words Indexed */ \ |
| 539 V(evldhx, EVLDHX, 0x10000304) \ |
| 540 /* Vector Load Double into Two Words */ \ |
| 541 V(evldw, EVLDW, 0x10000303) \ |
| 542 /* Vector Load Double into Two Words Indexed */ \ |
| 543 V(evldwx, EVLDWX, 0x10000302) \ |
| 544 /* Vector Load Half Word into Half Words Even and Splat */ \ |
| 545 V(evlhhesplat, EVLHHESPLAT, 0x10000309) \ |
| 546 /* Vector Load Half Word into Half Words Even and Splat Indexed */ \ |
| 547 V(evlhhesplatx, EVLHHESPLATX, 0x10000308) \ |
| 548 /* Vector Load Half Word into Half Word Odd Signed and Splat */ \ |
| 549 V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F) \ |
| 550 /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */ \ |
| 551 V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E) \ |
| 552 /* Vector Load Half Word into Half Word Odd Unsigned and Splat */ \ |
| 553 V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D) \ |
| 554 /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */ \ |
| 555 V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \ |
| 556 /* Vector Load Word into Two Half Words Even */ \ |
| 557 V(evlwhe, EVLWHE, 0x10000311) \ |
| 558 /* Vector Load Word into Two Half Words Even Indexed */ \ |
| 559 V(evlwhex, EVLWHEX, 0x10000310) \ |
| 560 /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \ |
| 561 V(evlwhos, EVLWHOS, 0x10000317) \ |
| 562 /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \ |
| 563 /* extension) */ \ |
| 564 V(evlwhosx, EVLWHOSX, 0x10000316) \ |
| 565 /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */ \ |
| 566 V(evlwhou, EVLWHOU, 0x10000315) \ |
| 567 /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */ \ |
| 568 /* extended) */ \ |
| 569 V(evlwhoux, EVLWHOUX, 0x10000314) \ |
| 570 /* Vector Load Word into Two Half Words and Splat */ \ |
| 571 V(evlwhsplat, EVLWHSPLAT, 0x1000031D) \ |
| 572 /* Vector Load Word into Two Half Words and Splat Indexed */ \ |
| 573 V(evlwhsplatx, EVLWHSPLATX, 0x1000031C) \ |
| 574 /* Vector Load Word into Word and Splat */ \ |
| 575 V(evlwwsplat, EVLWWSPLAT, 0x10000319) \ |
| 576 /* Vector Load Word into Word and Splat Indexed */ \ |
| 577 V(evlwwsplatx, EVLWWSPLATX, 0x10000318) \ |
| 578 /* Vector Merge High */ \ |
| 579 V(evmergehi, EVMERGEHI, 0x1000022C) \ |
| 580 /* Vector Merge High/Low */ \ |
| 581 V(evmergehilo, EVMERGEHILO, 0x1000022E) \ |
| 582 /* Vector Merge Low */ \ |
| 583 V(evmergelo, EVMERGELO, 0x1000022D) \ |
| 584 /* Vector Merge Low/High */ \ |
| 585 V(evmergelohi, EVMERGELOHI, 0x1000022F) \ |
| 586 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \ |
| 587 /* and Accumulate */ \ |
| 588 V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B) \ |
| 589 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \ |
| 590 /* and Accumulate Negative */ \ |
| 591 V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB) \ |
| 592 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \ |
| 593 /* and Accumulate */ \ |
| 594 V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529) \ |
| 595 /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \ |
| 596 /* and Accumulate Negative */ \ |
| 597 V(evmhegsmian, EVMHEGSMIAN, 0x100005A9) \ |
| 598 /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \ |
| 599 /* and Accumulate */ \ |
| 600 V(evmhegumiaa, EVMHEGUMIAA, 0x10000528) \ |
| 601 /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \ |
| 602 /* and Accumulate Negative */ \ |
| 603 V(evmhegumian, EVMHEGUMIAN, 0x100005A8) \ |
| 604 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */ \ |
| 605 V(evmhesmf, EVMHESMF, 0x1000040B) \ |
| 606 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */ \ |
| 607 /* Accumulator */ \ |
| 608 V(evmhesmfa, EVMHESMFA, 0x1000042B) \ |
| 609 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \ |
| 610 /* Accumulate into Words */ \ |
| 611 V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B) \ |
| 612 /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \ |
| 613 /* Accumulate Negative into Words */ \ |
| 614 V(evmhesmfanw, EVMHESMFANW, 0x1000058B) \ |
| 615 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */ \ |
| 616 V(evmhesmi, EVMHESMI, 0x10000409) \ |
| 617 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */ \ |
| 618 /* Accumulator */ \ |
| 619 V(evmhesmia, EVMHESMIA, 0x10000429) \ |
| 620 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \ |
| 621 /* Accumulate into Words */ \ |
| 622 V(evmhesmiaaw, EVMHESMIAAW, 0x10000509) \ |
| 623 /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \ |
| 624 /* Accumulate Negative into Words */ \ |
| 625 V(evmhesmianw, EVMHESMIANW, 0x10000589) \ |
| 626 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */ \ |
| 627 V(evmhessf, EVMHESSF, 0x10000403) \ |
| 628 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */ \ |
| 629 /* Accumulator */ \ |
| 630 V(evmhessfa, EVMHESSFA, 0x10000423) \ |
| 631 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \ |
| 632 /* Accumulate into Words */ \ |
| 633 V(evmhessfaaw, EVMHESSFAAW, 0x10000503) \ |
| 634 /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \ |
| 635 /* Accumulate Negative into Words */ \ |
| 636 V(evmhessfanw, EVMHESSFANW, 0x10000583) \ |
| 637 /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \ |
| 638 /* Accumulate into Words */ \ |
| 639 V(evmhessiaaw, EVMHESSIAAW, 0x10000501) \ |
| 640 /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \ |
| 641 /* Accumulate Negative into Words */ \ |
| 642 V(evmhessianw, EVMHESSIANW, 0x10000581) \ |
| 643 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */ \ |
| 644 V(evmheumi, EVMHEUMI, 0x10000408) \ |
| 645 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */ \ |
| 646 /* Accumulator */ \ |
| 647 V(evmheumia, EVMHEUMIA, 0x10000428) \ |
| 648 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \ |
| 649 /* Accumulate into Words */ \ |
| 650 V(evmheumiaaw, EVMHEUMIAAW, 0x10000508) \ |
| 651 /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \ |
| 652 /* Accumulate Negative into Words */ \ |
| 653 V(evmheumianw, EVMHEUMIANW, 0x10000588) \ |
| 654 /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \ |
| 655 /* Accumulate into Words */ \ |
| 656 V(evmheusiaaw, EVMHEUSIAAW, 0x10000500) \ |
| 657 /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \ |
| 658 /* Accumulate Negative into Words */ \ |
| 659 V(evmheusianw, EVMHEUSIANW, 0x10000580) \ |
| 660 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \ |
| 661 /* and Accumulate */ \ |
| 662 V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F) \ |
| 663 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \ |
| 664 /* and Accumulate Negative */ \ |
| 665 V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF) \ |
| 666 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */ \ |
| 667 /* and Accumulate */ \ |
| 668 V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D) \ |
| 669 /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \ |
| 670 /* Accumulate Negative */ \ |
| 671 V(evmhogsmian, EVMHOGSMIAN, 0x100005AD) \ |
| 672 /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \ |
| 673 /* and Accumulate */ \ |
| 674 V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C) \ |
| 675 /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \ |
| 676 /* and Accumulate Negative */ \ |
| 677 V(evmhogumian, EVMHOGUMIAN, 0x100005AC) \ |
| 678 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */ \ |
| 679 V(evmhosmf, EVMHOSMF, 0x1000040F) \ |
| 680 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */ \ |
| 681 /* Accumulator */ \ |
| 682 V(evmhosmfa, EVMHOSMFA, 0x1000042F) \ |
| 683 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \ |
| 684 /* Accumulate into Words */ \ |
| 685 V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F) \ |
| 686 /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \ |
| 687 /* Accumulate Negative into Words */ \ |
| 688 V(evmhosmfanw, EVMHOSMFANW, 0x1000058F) \ |
| 689 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */ \ |
| 690 V(evmhosmi, EVMHOSMI, 0x1000040D) \ |
| 691 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */ \ |
| 692 /* Accumulator */ \ |
| 693 V(evmhosmia, EVMHOSMIA, 0x1000042D) \ |
| 694 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \ |
| 695 /* Accumulate into Words */ \ |
| 696 V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D) \ |
| 697 /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \ |
| 698 /* Accumulate Negative into Words */ \ |
| 699 V(evmhosmianw, EVMHOSMIANW, 0x1000058D) \ |
| 700 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */ \ |
| 701 V(evmhossf, EVMHOSSF, 0x10000407) \ |
| 702 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */ \ |
| 703 /* Accumulator */ \ |
| 704 V(evmhossfa, EVMHOSSFA, 0x10000427) \ |
| 705 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \ |
| 706 /* Accumulate into Words */ \ |
| 707 V(evmhossfaaw, EVMHOSSFAAW, 0x10000507) \ |
| 708 /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \ |
| 709 /* Accumulate Negative into Words */ \ |
| 710 V(evmhossfanw, EVMHOSSFANW, 0x10000587) \ |
| 711 /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \ |
| 712 /* Accumulate into Words */ \ |
| 713 V(evmhossiaaw, EVMHOSSIAAW, 0x10000505) \ |
| 714 /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \ |
| 715 /* Accumulate Negative into Words */ \ |
| 716 V(evmhossianw, EVMHOSSIANW, 0x10000585) \ |
| 717 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */ \ |
| 718 V(evmhoumi, EVMHOUMI, 0x1000040C) \ |
| 719 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */ \ |
| 720 /* Accumulator */ \ |
| 721 V(evmhoumia, EVMHOUMIA, 0x1000042C) \ |
| 722 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \ |
| 723 /* Accumulate into Words */ \ |
| 724 V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C) \ |
| 725 /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \ |
| 726 /* Accumulate Negative into Words */ \ |
| 727 V(evmhoumianw, EVMHOUMIANW, 0x1000058C) \ |
| 728 /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \ |
| 729 /* Accumulate into Words */ \ |
| 730 V(evmhousiaaw, EVMHOUSIAAW, 0x10000504) \ |
| 731 /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \ |
| 732 /* Accumulate Negative into Words */ \ |
| 733 V(evmhousianw, EVMHOUSIANW, 0x10000584) \ |
| 734 /* Initialize Accumulator */ \ |
| 735 V(evmra, EVMRA, 0x100004C4) \ |
| 736 /* Vector Multiply Word High Signed, Modulo, Fractional */ \ |
| 737 V(evmwhsmf, EVMWHSMF, 0x1000044F) \ |
| 738 /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */ \ |
| 739 V(evmwhsmfa, EVMWHSMFA, 0x1000046F) \ |
| 740 /* Vector Multiply Word High Signed, Modulo, Integer */ \ |
| 741 V(evmwhsmi, EVMWHSMI, 0x1000044D) \ |
| 742 /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */ \ |
| 743 V(evmwhsmia, EVMWHSMIA, 0x1000046D) \ |
| 744 /* Vector Multiply Word High Signed, Saturate, Fractional */ \ |
| 745 V(evmwhssf, EVMWHSSF, 0x10000447) \ |
| 746 /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \ |
| 747 V(evmwhssfa, EVMWHSSFA, 0x10000467) \ |
| 748 /* Vector Multiply Word High Unsigned, Modulo, Integer */ \ |
| 749 V(evmwhumi, EVMWHUMI, 0x1000044C) \ |
| 750 /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */ \ |
| 751 V(evmwhumia, EVMWHUMIA, 0x1000046C) \ |
| 752 /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */ \ |
| 753 /* Words */ \ |
| 754 V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549) \ |
| 755 /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */ \ |
| 756 /* Negative in Words */ \ |
| 757 V(evmwlsmianw, EVMWLSMIANW, 0x100005C9) \ |
| 758 /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */ \ |
| 759 /* Words */ \ |
| 760 V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541) \ |
| 761 /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */ \ |
| 762 /* Negative in Words */ \ |
| 763 V(evmwlssianw, EVMWLSSIANW, 0x100005C1) \ |
| 764 /* Vector Multiply Word Low Unsigned, Modulo, Integer */ \ |
| 765 V(evmwlumi, EVMWLUMI, 0x10000448) \ |
| 766 /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */ \ |
| 767 V(evmwlumia, EVMWLUMIA, 0x10000468) \ |
| 768 /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */ \ |
| 769 /* Words */ \ |
| 770 V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548) \ |
| 771 /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */ \ |
| 772 /* Negative in Words */ \ |
| 773 V(evmwlumianw, EVMWLUMIANW, 0x100005C8) \ |
| 774 /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \ |
| 775 /* in Words */ \ |
| 776 V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540) \ |
| 777 /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \ |
| 778 /* Negative in Words */ \ |
| 779 V(evmwlusianw, EVMWLUSIANW, 0x100005C0) \ |
| 780 /* Vector Multiply Word Signed, Modulo, Fractional */ \ |
| 781 V(evmwsmf, EVMWSMF, 0x1000045B) \ |
| 782 /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */ \ |
| 783 V(evmwsmfa, EVMWSMFA, 0x1000047B) \ |
| 784 /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \ |
| 785 V(evmwsmfaa, EVMWSMFAA, 0x1000055B) \ |
| 786 /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \ |
| 787 /* Negative */ \ |
| 788 V(evmwsmfan, EVMWSMFAN, 0x100005DB) \ |
| 789 /* Vector Multiply Word Signed, Modulo, Integer */ \ |
| 790 V(evmwsmi, EVMWSMI, 0x10000459) \ |
| 791 /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */ \ |
| 792 V(evmwsmia, EVMWSMIA, 0x10000479) \ |
| 793 /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */ \ |
| 794 V(evmwsmiaa, EVMWSMIAA, 0x10000559) \ |
| 795 /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */ \ |
| 796 V(evmwsmian, EVMWSMIAN, 0x100005D9) \ |
| 797 /* Vector Multiply Word Signed, Saturate, Fractional */ \ |
| 798 V(evmwssf, EVMWSSF, 0x10000453) \ |
| 799 /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */ \ |
| 800 V(evmwssfa, EVMWSSFA, 0x10000473) \ |
| 801 /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \ |
| 802 V(evmwssfaa, EVMWSSFAA, 0x10000553) \ |
| 803 /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \ |
| 804 /* Negative */ \ |
| 805 V(evmwssfan, EVMWSSFAN, 0x100005D3) \ |
| 806 /* Vector Multiply Word Unsigned, Modulo, Integer */ \ |
| 807 V(evmwumi, EVMWUMI, 0x10000458) \ |
| 808 /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */ \ |
| 809 V(evmwumia, EVMWUMIA, 0x10000478) \ |
| 810 /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \ |
| 811 V(evmwumiaa, EVMWUMIAA, 0x10000558) \ |
| 812 /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \ |
| 813 /* Negative */ \ |
| 814 V(evmwumian, EVMWUMIAN, 0x100005D8) \ |
| 815 /* Vector NAND */ \ |
| 816 V(evnand, EVNAND, 0x1000021E) \ |
| 817 /* Vector Negate */ \ |
| 818 V(evneg, EVNEG, 0x10000209) \ |
| 819 /* Vector NOR */ \ |
| 820 V(evnor, EVNOR, 0x10000218) \ |
| 821 /* Vector OR */ \ |
| 822 V(evor, EVOR, 0x10000217) \ |
| 823 /* Vector OR with Complement */ \ |
| 824 V(evorc, EVORC, 0x1000021B) \ |
| 825 /* Vector Rotate Left Word */ \ |
| 826 V(evrlw, EVRLW, 0x10000228) \ |
| 827 /* Vector Rotate Left Word Immediate */ \ |
| 828 V(evrlwi, EVRLWI, 0x1000022A) \ |
| 829 /* Vector Round Word */ \ |
| 830 V(evrndw, EVRNDW, 0x1000020C) \ |
| 831 /* Vector Shift Left Word */ \ |
| 832 V(evslw, EVSLW, 0x10000224) \ |
| 833 /* Vector Shift Left Word Immediate */ \ |
| 834 V(evslwi, EVSLWI, 0x10000226) \ |
| 835 /* Vector Splat Fractional Immediate */ \ |
| 836 V(evsplatfi, EVSPLATFI, 0x1000022B) \ |
| 837 /* Vector Splat Immediate */ \ |
| 838 V(evsplati, EVSPLATI, 0x10000229) \ |
| 839 /* Vector Shift Right Word Immediate Signed */ \ |
| 840 V(evsrwis, EVSRWIS, 0x10000223) \ |
| 841 /* Vector Shift Right Word Immediate Unsigned */ \ |
| 842 V(evsrwiu, EVSRWIU, 0x10000222) \ |
| 843 /* Vector Shift Right Word Signed */ \ |
| 844 V(evsrws, EVSRWS, 0x10000221) \ |
| 845 /* Vector Shift Right Word Unsigned */ \ |
| 846 V(evsrwu, EVSRWU, 0x10000220) \ |
| 847 /* Vector Store Double of Double */ \ |
| 848 V(evstdd, EVSTDD, 0x10000321) \ |
| 849 /* Vector Store Double of Double Indexed */ \ |
| 850 V(evstddx, EVSTDDX, 0x10000320) \ |
| 851 /* Vector Store Double of Four Half Words */ \ |
| 852 V(evstdh, EVSTDH, 0x10000325) \ |
| 853 /* Vector Store Double of Four Half Words Indexed */ \ |
| 854 V(evstdhx, EVSTDHX, 0x10000324) \ |
| 855 /* Vector Store Double of Two Words */ \ |
| 856 V(evstdw, EVSTDW, 0x10000323) \ |
| 857 /* Vector Store Double of Two Words Indexed */ \ |
| 858 V(evstdwx, EVSTDWX, 0x10000322) \ |
| 859 /* Vector Store Word of Two Half Words from Even */ \ |
| 860 V(evstwhe, EVSTWHE, 0x10000331) \ |
| 861 /* Vector Store Word of Two Half Words from Even Indexed */ \ |
| 862 V(evstwhex, EVSTWHEX, 0x10000330) \ |
| 863 /* Vector Store Word of Two Half Words from Odd */ \ |
| 864 V(evstwho, EVSTWHO, 0x10000335) \ |
| 865 /* Vector Store Word of Two Half Words from Odd Indexed */ \ |
| 866 V(evstwhox, EVSTWHOX, 0x10000334) \ |
| 867 /* Vector Store Word of Word from Even */ \ |
| 868 V(evstwwe, EVSTWWE, 0x10000339) \ |
| 869 /* Vector Store Word of Word from Even Indexed */ \ |
| 870 V(evstwwex, EVSTWWEX, 0x10000338) \ |
| 871 /* Vector Store Word of Word from Odd */ \ |
| 872 V(evstwwo, EVSTWWO, 0x1000033D) \ |
| 873 /* Vector Store Word of Word from Odd Indexed */ \ |
| 874 V(evstwwox, EVSTWWOX, 0x1000033C) \ |
| 875 /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */ \ |
| 876 V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB) \ |
| 877 /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */ \ |
| 878 V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3) \ |
| 879 /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */ \ |
| 880 V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA) \ |
| 881 /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */ \ |
| 882 V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2) \ |
| 883 /* Vector Subtract from Word */ \ |
| 884 V(evsubfw, EVSUBFW, 0x10000204) \ |
| 885 /* Vector Subtract Immediate from Word */ \ |
| 886 V(evsubifw, EVSUBIFW, 0x10000206) \ |
| 887 /* Vector XOR */ \ |
| 888 V(evxor, EVXOR, 0x10000216) \ |
| 889 /* Floating-Point Double-Precision Absolute Value */ \ |
| 890 V(efdabs, EFDABS, 0x100002E4) \ |
| 891 /* Floating-Point Double-Precision Add */ \ |
| 892 V(efdadd, EFDADD, 0x100002E0) \ |
| 893 /* Floating-Point Double-Precision Convert from Single-Precision */ \ |
| 894 V(efdcfs, EFDCFS, 0x100002EF) \ |
| 895 /* Convert Floating-Point Double-Precision from Signed Fraction */ \ |
| 896 V(efdcfsf, EFDCFSF, 0x100002F3) \ |
| 897 /* Convert Floating-Point Double-Precision from Signed Integer */ \ |
| 898 V(efdcfsi, EFDCFSI, 0x100002F1) \ |
| 899 /* Convert Floating-Point Double-Precision from Signed Integer */ \ |
| 900 /* Doubleword */ \ |
| 901 V(efdcfsid, EFDCFSID, 0x100002E3) \ |
| 902 /* Convert Floating-Point Double-Precision from Unsigned Fraction */ \ |
| 903 V(efdcfuf, EFDCFUF, 0x100002F2) \ |
| 904 /* Convert Floating-Point Double-Precision from Unsigned Integer */ \ |
| 905 V(efdcfui, EFDCFUI, 0x100002F0) \ |
| 906 /* Convert Floating-Point Double-Precision fromUnsigned Integer */ \ |
| 907 /* Doubleword */ \ |
| 908 V(efdcfuid, EFDCFUID, 0x100002E2) \ |
| 909 /* Floating-Point Double-Precision Compare Equal */ \ |
| 910 V(efdcmpeq, EFDCMPEQ, 0x100002EE) \ |
| 911 /* Floating-Point Double-Precision Compare Greater Than */ \ |
| 912 V(efdcmpgt, EFDCMPGT, 0x100002EC) \ |
| 913 /* Floating-Point Double-Precision Compare Less Than */ \ |
| 914 V(efdcmplt, EFDCMPLT, 0x100002ED) \ |
| 915 /* Convert Floating-Point Double-Precision to Signed Fraction */ \ |
| 916 V(efdctsf, EFDCTSF, 0x100002F7) \ |
| 917 /* Convert Floating-Point Double-Precision to Signed Integer */ \ |
| 918 V(efdctsi, EFDCTSI, 0x100002F5) \ |
| 919 /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */ \ |
| 920 /* with Round toward Zero */ \ |
| 921 V(efdctsidz, EFDCTSIDZ, 0x100002EB) \ |
| 922 /* Convert Floating-Point Double-Precision to Signed Integer with Round */ \ |
| 923 /* toward Zero */ \ |
| 924 V(efdctsiz, EFDCTSIZ, 0x100002FA) \ |
| 925 /* Convert Floating-Point Double-Precision to Unsigned Fraction */ \ |
| 926 V(efdctuf, EFDCTUF, 0x100002F6) \ |
| 927 /* Convert Floating-Point Double-Precision to Unsigned Integer */ \ |
| 928 V(efdctui, EFDCTUI, 0x100002F4) \ |
| 929 /* Convert Floating-Point Double-Precision to Unsigned Integer */ \ |
| 930 /* Doubleword with Round toward Zero */ \ |
| 931 V(efdctuidz, EFDCTUIDZ, 0x100002EA) \ |
| 932 /* Convert Floating-Point Double-Precision to Unsigned Integer with */ \ |
| 933 /* Round toward Zero */ \ |
| 934 V(efdctuiz, EFDCTUIZ, 0x100002F8) \ |
| 935 /* Floating-Point Double-Precision Divide */ \ |
| 936 V(efddiv, EFDDIV, 0x100002E9) \ |
| 937 /* Floating-Point Double-Precision Multiply */ \ |
| 938 V(efdmul, EFDMUL, 0x100002E8) \ |
| 939 /* Floating-Point Double-Precision Negative Absolute Value */ \ |
| 940 V(efdnabs, EFDNABS, 0x100002E5) \ |
| 941 /* Floating-Point Double-Precision Negate */ \ |
| 942 V(efdneg, EFDNEG, 0x100002E6) \ |
| 943 /* Floating-Point Double-Precision Subtract */ \ |
| 944 V(efdsub, EFDSUB, 0x100002E1) \ |
| 945 /* Floating-Point Double-Precision Test Equal */ \ |
| 946 V(efdtsteq, EFDTSTEQ, 0x100002FE) \ |
| 947 /* Floating-Point Double-Precision Test Greater Than */ \ |
| 948 V(efdtstgt, EFDTSTGT, 0x100002FC) \ |
| 949 /* Floating-Point Double-Precision Test Less Than */ \ |
| 950 V(efdtstlt, EFDTSTLT, 0x100002FD) \ |
| 951 /* Floating-Point Single-Precision Convert from Double-Precision */ \ |
| 952 V(efscfd, EFSCFD, 0x100002CF) \ |
| 953 /* Floating-Point Absolute Value */ \ |
| 954 V(efsabs, EFSABS, 0x100002C4) \ |
| 955 /* Floating-Point Add */ \ |
| 956 V(efsadd, EFSADD, 0x100002C0) \ |
| 957 /* Convert Floating-Point from Signed Fraction */ \ |
| 958 V(efscfsf, EFSCFSF, 0x100002D3) \ |
| 959 /* Convert Floating-Point from Signed Integer */ \ |
| 960 V(efscfsi, EFSCFSI, 0x100002D1) \ |
| 961 /* Convert Floating-Point from Unsigned Fraction */ \ |
| 962 V(efscfuf, EFSCFUF, 0x100002D2) \ |
| 963 /* Convert Floating-Point from Unsigned Integer */ \ |
| 964 V(efscfui, EFSCFUI, 0x100002D0) \ |
| 965 /* Floating-Point Compare Equal */ \ |
| 966 V(efscmpeq, EFSCMPEQ, 0x100002CE) \ |
| 967 /* Floating-Point Compare Greater Than */ \ |
| 968 V(efscmpgt, EFSCMPGT, 0x100002CC) \ |
| 969 /* Floating-Point Compare Less Than */ \ |
| 970 V(efscmplt, EFSCMPLT, 0x100002CD) \ |
| 971 /* Convert Floating-Point to Signed Fraction */ \ |
| 972 V(efsctsf, EFSCTSF, 0x100002D7) \ |
| 973 /* Convert Floating-Point to Signed Integer */ \ |
| 974 V(efsctsi, EFSCTSI, 0x100002D5) \ |
| 975 /* Convert Floating-Point to Signed Integer with Round toward Zero */ \ |
| 976 V(efsctsiz, EFSCTSIZ, 0x100002DA) \ |
| 977 /* Convert Floating-Point to Unsigned Fraction */ \ |
| 978 V(efsctuf, EFSCTUF, 0x100002D6) \ |
| 979 /* Convert Floating-Point to Unsigned Integer */ \ |
| 980 V(efsctui, EFSCTUI, 0x100002D4) \ |
| 981 /* Convert Floating-Point to Unsigned Integer with Round toward Zero */ \ |
| 982 V(efsctuiz, EFSCTUIZ, 0x100002D8) \ |
| 983 /* Floating-Point Divide */ \ |
| 984 V(efsdiv, EFSDIV, 0x100002C9) \ |
| 985 /* Floating-Point Multiply */ \ |
| 986 V(efsmul, EFSMUL, 0x100002C8) \ |
| 987 /* Floating-Point Negative Absolute Value */ \ |
| 988 V(efsnabs, EFSNABS, 0x100002C5) \ |
| 989 /* Floating-Point Negate */ \ |
| 990 V(efsneg, EFSNEG, 0x100002C6) \ |
| 991 /* Floating-Point Subtract */ \ |
| 992 V(efssub, EFSSUB, 0x100002C1) \ |
| 993 /* Floating-Point Test Equal */ \ |
| 994 V(efststeq, EFSTSTEQ, 0x100002DE) \ |
| 995 /* Floating-Point Test Greater Than */ \ |
| 996 V(efststgt, EFSTSTGT, 0x100002DC) \ |
| 997 /* Floating-Point Test Less Than */ \ |
| 998 V(efststlt, EFSTSTLT, 0x100002DD) \ |
| 999 /* Vector Floating-Point Absolute Value */ \ |
| 1000 V(evfsabs, EVFSABS, 0x10000284) \ |
| 1001 /* Vector Floating-Point Add */ \ |
| 1002 V(evfsadd, EVFSADD, 0x10000280) \ |
| 1003 /* Vector Convert Floating-Point from Signed Fraction */ \ |
| 1004 V(evfscfsf, EVFSCFSF, 0x10000293) \ |
| 1005 /* Vector Convert Floating-Point from Signed Integer */ \ |
| 1006 V(evfscfsi, EVFSCFSI, 0x10000291) \ |
| 1007 /* Vector Convert Floating-Point from Unsigned Fraction */ \ |
| 1008 V(evfscfuf, EVFSCFUF, 0x10000292) \ |
| 1009 /* Vector Convert Floating-Point from Unsigned Integer */ \ |
| 1010 V(evfscfui, EVFSCFUI, 0x10000290) \ |
| 1011 /* Vector Floating-Point Compare Equal */ \ |
| 1012 V(evfscmpeq, EVFSCMPEQ, 0x1000028E) \ |
| 1013 /* Vector Floating-Point Compare Greater Than */ \ |
| 1014 V(evfscmpgt, EVFSCMPGT, 0x1000028C) \ |
| 1015 /* Vector Floating-Point Compare Less Than */ \ |
| 1016 V(evfscmplt, EVFSCMPLT, 0x1000028D) \ |
| 1017 /* Vector Convert Floating-Point to Signed Fraction */ \ |
| 1018 V(evfsctsf, EVFSCTSF, 0x10000297) \ |
| 1019 /* Vector Convert Floating-Point to Signed Integer */ \ |
| 1020 V(evfsctsi, EVFSCTSI, 0x10000295) \ |
| 1021 /* Vector Convert Floating-Point to Signed Integer with Round toward */ \ |
| 1022 /* Zero */ \ |
| 1023 V(evfsctsiz, EVFSCTSIZ, 0x1000029A) \ |
| 1024 /* Vector Convert Floating-Point to Unsigned Fraction */ \ |
| 1025 V(evfsctuf, EVFSCTUF, 0x10000296) \ |
| 1026 /* Vector Convert Floating-Point to Unsigned Integer */ \ |
| 1027 V(evfsctui, EVFSCTUI, 0x10000294) \ |
| 1028 /* Vector Convert Floating-Point to Unsigned Integer with Round toward */ \ |
| 1029 /* Zero */ \ |
| 1030 V(evfsctuiz, EVFSCTUIZ, 0x10000298) \ |
| 1031 /* Vector Floating-Point Divide */ \ |
| 1032 V(evfsdiv, EVFSDIV, 0x10000289) \ |
| 1033 /* Vector Floating-Point Multiply */ \ |
| 1034 V(evfsmul, EVFSMUL, 0x10000288) \ |
| 1035 /* Vector Floating-Point Negative Absolute Value */ \ |
| 1036 V(evfsnabs, EVFSNABS, 0x10000285) \ |
| 1037 /* Vector Floating-Point Negate */ \ |
| 1038 V(evfsneg, EVFSNEG, 0x10000286) \ |
| 1039 /* Vector Floating-Point Subtract */ \ |
| 1040 V(evfssub, EVFSSUB, 0x10000281) \ |
| 1041 /* Vector Floating-Point Test Equal */ \ |
| 1042 V(evfststeq, EVFSTSTEQ, 0x1000029E) \ |
| 1043 /* Vector Floating-Point Test Greater Than */ \ |
| 1044 V(evfststgt, EVFSTSTGT, 0x1000029C) \ |
| 1045 /* Vector Floating-Point Test Less Than */ \ |
| 1046 V(evfststlt, EVFSTSTLT, 0x1000029D) |
| 1047 |
| 1048 #define PPC_VC_OPCODE_LIST(V) \ |
| 1049 /* Vector Compare Bounds Single-Precision */ \ |
| 1050 V(vcmpbfp, VCMPBFP, 0x100003C6) \ |
| 1051 /* Vector Compare Equal To Single-Precision */ \ |
| 1052 V(vcmpeqfp, VCMPEQFP, 0x100000C6) \ |
| 1053 /* Vector Compare Equal To Unsigned Byte */ \ |
| 1054 V(vcmpequb, VCMPEQUB, 0x10000006) \ |
| 1055 /* Vector Compare Equal To Unsigned Doubleword */ \ |
| 1056 V(vcmpequd, VCMPEQUD, 0x100000C7) \ |
| 1057 /* Vector Compare Equal To Unsigned Halfword */ \ |
| 1058 V(vcmpequh, VCMPEQUH, 0x10000046) \ |
| 1059 /* Vector Compare Equal To Unsigned Word */ \ |
| 1060 V(vcmpequw, VCMPEQUW, 0x10000086) \ |
| 1061 /* Vector Compare Greater Than or Equal To Single-Precision */ \ |
| 1062 V(vcmpgefp, VCMPGEFP, 0x100001C6) \ |
| 1063 /* Vector Compare Greater Than Single-Precision */ \ |
| 1064 V(vcmpgtfp, VCMPGTFP, 0x100002C6) \ |
| 1065 /* Vector Compare Greater Than Signed Byte */ \ |
| 1066 V(vcmpgtsb, VCMPGTSB, 0x10000306) \ |
| 1067 /* Vector Compare Greater Than Signed Doubleword */ \ |
| 1068 V(vcmpgtsd, VCMPGTSD, 0x100003C7) \ |
| 1069 /* Vector Compare Greater Than Signed Halfword */ \ |
| 1070 V(vcmpgtsh, VCMPGTSH, 0x10000346) \ |
| 1071 /* Vector Compare Greater Than Signed Word */ \ |
| 1072 V(vcmpgtsw, VCMPGTSW, 0x10000386) \ |
| 1073 /* Vector Compare Greater Than Unsigned Byte */ \ |
| 1074 V(vcmpgtub, VCMPGTUB, 0x10000206) \ |
| 1075 /* Vector Compare Greater Than Unsigned Doubleword */ \ |
| 1076 V(vcmpgtud, VCMPGTUD, 0x100002C7) \ |
| 1077 /* Vector Compare Greater Than Unsigned Halfword */ \ |
| 1078 V(vcmpgtuh, VCMPGTUH, 0x10000246) \ |
| 1079 /* Vector Compare Greater Than Unsigned Word */ \ |
| 1080 V(vcmpgtuw, VCMPGTUW, 0x10000286) |
| 1081 |
| 1082 #define PPC_X_OPCODE_LIST(V) \ |
| 1083 /* Bit Permute Doubleword */ \ |
| 1084 V(bpermd, BPERMD, 0x7C0001F8) \ |
| 1085 /* Count Leading Zeros Doubleword */ \ |
| 1086 V(cntlzd, CNTLZDX, 0x7C000074) \ |
| 1087 /* Extend Sign Word */ \ |
| 1088 V(extsw, EXTSW, 0x7C0007B4) \ |
| 1089 /* Load Doubleword And Reserve Indexed */ \ |
| 1090 V(ldarx, LDARX, 0x7C0000A8) \ |
| 1091 /* Load Doubleword Byte-Reverse Indexed */ \ |
| 1092 V(ldbrx, LDBRX, 0x7C000428) \ |
| 1093 /* Load Doubleword with Update Indexed */ \ |
| 1094 V(ldux, LDUX, 0x7C00006A) \ |
| 1095 /* Load Doubleword Indexed */ \ |
| 1096 V(ldx, LDX, 0x7C00002A) \ |
| 1097 /* Load Word Algebraic with Update Indexed */ \ |
| 1098 V(lwaux, LWAUX, 0x7C0002EA) \ |
| 1099 /* Load Word Algebraic Indexed */ \ |
| 1100 V(lwax, LWAX, 0x7C0002AA) \ |
| 1101 /* Modulo Signed Dword */ \ |
| 1102 V(modsd, MODSD, 0x7C000612) \ |
| 1103 /* Modulo Unsigned Dword */ \ |
| 1104 V(modud, MODUD, 0x7C000212) \ |
| 1105 /* Population Count Doubleword */ \ |
| 1106 V(popcntd, POPCNTD, 0x7C0003F4) \ |
| 1107 /* Parity Doubleword */ \ |
| 1108 V(prtyd, PRTYD, 0x7C000174) \ |
| 1109 /* Shift Left Doubleword */ \ |
| 1110 V(sld, SLDX, 0x7C000036) \ |
| 1111 /* Shift Right Algebraic Doubleword */ \ |
| 1112 V(srad, SRAD, 0x7C000634) \ |
| 1113 /* Shift Right Doubleword */ \ |
| 1114 V(srd, SRDX, 0x7C000436) \ |
| 1115 /* Store Doubleword Byte-Reverse Indexed */ \ |
| 1116 V(stdbrx, STDBRX, 0x7C000528) \ |
| 1117 /* Store Doubleword Conditional Indexed & record CR0 */ \ |
| 1118 V(stdcx, STDCX, 0x7C0001AD) \ |
| 1119 /* Store Doubleword with Update Indexed */ \ |
| 1120 V(stdux, STDUX, 0x7C00016A) \ |
| 1121 /* Store Doubleword Indexed */ \ |
| 1122 V(stdx, STDX, 0x7C00012A) \ |
| 1123 /* Trap Doubleword */ \ |
| 1124 V(td, TD, 0x7C000088) \ |
| 1125 /* AND */ \ |
| 1126 V(andx, ANDX, 0x7C000038) \ |
| 1127 /* AND with Complement */ \ |
| 1128 V(andc, ANDCX, 0x7C000078) \ |
| 1129 /* Branch Conditional to Branch Target Address Register */ \ |
| 1130 V(bctar, BCTAR, 0x4C000460) \ |
| 1131 /* Compare */ \ |
| 1132 V(cmp, CMP, 0x7C000000) \ |
| 1133 /* Compare Byte */ \ |
| 1134 V(cmpb, CMPB, 0x7C0003F8) \ |
| 1135 /* Compare Logical */ \ |
| 1136 V(cmpl, CMPL, 0x7C000040) \ |
| 1137 /* Count Leading Zeros Word */ \ |
| 1138 V(cntlzw, CNTLZWX, 0x7C000034) \ |
| 1139 /* Data Cache Block Flush */ \ |
| 1140 V(dcbf, DCBF, 0x7C0000AC) \ |
| 1141 /* Data Cache Block Store */ \ |
| 1142 V(dcbst, DCBST, 0x7C00006C) \ |
| 1143 /* Data Cache Block Touch */ \ |
| 1144 V(dcbt, DCBT, 0x7C00022C) \ |
| 1145 /* Data Cache Block Touch for Store */ \ |
| 1146 V(dcbtst, DCBTST, 0x7C0001EC) \ |
| 1147 /* Data Cache Block Zero */ \ |
| 1148 V(dcbz, DCBZ, 0x7C0007EC) \ |
| 1149 /* Equivalent */ \ |
| 1150 V(eqv, EQV, 0x7C000238) \ |
| 1151 /* Extend Sign Byte */ \ |
| 1152 V(extsb, EXTSB, 0x7C000774) \ |
| 1153 /* Extend Sign Halfword */ \ |
| 1154 V(extsh, EXTSH, 0x7C000734) \ |
| 1155 /* Instruction Cache Block Invalidate */ \ |
| 1156 V(icbi, ICBI, 0x7C0007AC) \ |
| 1157 /* Load Byte And Reserve Indexed */ \ |
| 1158 V(lbarx, LBARX, 0x7C000068) \ |
| 1159 /* Load Byte and Zero with Update Indexed */ \ |
| 1160 V(lbzux, LBZUX, 0x7C0000EE) \ |
| 1161 /* Load Byte and Zero Indexed */ \ |
| 1162 V(lbzx, LBZX, 0x7C0000AE) \ |
| 1163 /* Load Halfword And Reserve Indexed Xform */ \ |
| 1164 V(lharx, LHARX, 0x7C0000E8) \ |
| 1165 /* Load Halfword Algebraic with Update Indexed */ \ |
| 1166 V(lhaux, LHAUX, 0x7C0002EE) \ |
| 1167 /* Load Halfword Algebraic Indexed */ \ |
| 1168 V(lhax, LHAX, 0x7C0002AE) \ |
| 1169 /* Load Halfword Byte-Reverse Indexed */ \ |
| 1170 V(lhbrx, LHBRX, 0x7C00062C) \ |
| 1171 /* Load Halfword and Zero with Update Indexed */ \ |
| 1172 V(lhzux, LHZUX, 0x7C00026E) \ |
| 1173 /* Load Halfword and Zero Indexed */ \ |
| 1174 V(lhzx, LHZX, 0x7C00022E) \ |
| 1175 /* Load Word and Reserve Indexed */ \ |
| 1176 V(lwarx, LWARX, 0x7C000028) \ |
| 1177 /* Load Word Byte-Reverse Indexed */ \ |
| 1178 V(lwbrx, LWBRX, 0x7C00042C) \ |
| 1179 /* Load Word and Zero with Update Indexed */ \ |
| 1180 V(lwzux, LWZUX, 0x7C00006E) \ |
| 1181 /* Load Word and Zero Indexed */ \ |
| 1182 V(lwzx, LWZX, 0x7C00002E) \ |
| 1183 /* Modulo Signed Word */ \ |
| 1184 V(mods, MODSW, 0x7C000616) \ |
| 1185 /* Modulo Unsigned Word */ \ |
| 1186 V(moduw, MODUW, 0x7C000216) \ |
| 1187 /* NAND */ \ |
| 1188 V(nand, NAND, 0x7C0003B8) \ |
| 1189 /* NOR */ \ |
| 1190 V(nor, NORX, 0x7C0000F8) \ |
| 1191 /* OR */ \ |
| 1192 V(orx, ORX, 0x7C000378) \ |
| 1193 /* OR with Complement */ \ |
| 1194 V(orc, ORC, 0x7C000338) \ |
| 1195 /* Population Count Byte-wise */ \ |
| 1196 V(popcntb, POPCNTB, 0x7C0000F4) \ |
| 1197 /* Population Count Words */ \ |
| 1198 V(popcntw, POPCNTW, 0x7C0002F4) \ |
| 1199 /* Parity Word */ \ |
| 1200 V(prtyw, PRTYW, 0x7C000134) \ |
| 1201 /* Shift Left Word */ \ |
| 1202 V(slw, SLWX, 0x7C000030) \ |
| 1203 /* Shift Right Algebraic Word */ \ |
| 1204 V(sraw, SRAW, 0x7C000630) \ |
| 1205 /* Shift Right Algebraic Word Immediate */ \ |
| 1206 V(srawi, SRAWIX, 0x7C000670) \ |
| 1207 /* Shift Right Word */ \ |
| 1208 V(srw, SRWX, 0x7C000430) \ |
| 1209 /* Store Byte Conditional Indexed */ \ |
| 1210 V(stbcx, STBCX, 0x7C00056D) \ |
| 1211 /* Store Byte with Update Indexed */ \ |
| 1212 V(stbux, STBUX, 0x7C0001EE) \ |
| 1213 /* Store Byte Indexed */ \ |
| 1214 V(stbx, STBX, 0x7C0001AE) \ |
| 1215 /* Store Halfword Byte-Reverse Indexed */ \ |
| 1216 V(sthbrx, STHBRX, 0x7C00072C) \ |
| 1217 /* Store Halfword Conditional Indexed Xform */ \ |
| 1218 V(sthcx, STHCX, 0x7C0005AD) \ |
| 1219 /* Store Halfword with Update Indexed */ \ |
| 1220 V(sthux, STHUX, 0x7C00036E) \ |
| 1221 /* Store Halfword Indexed */ \ |
| 1222 V(sthx, STHX, 0x7C00032E) \ |
| 1223 /* Store Word Byte-Reverse Indexed */ \ |
| 1224 V(stwbrx, STWBRX, 0x7C00052C) \ |
| 1225 /* Store Word Conditional Indexed & record CR0 */ \ |
| 1226 V(stwcx, STWCX, 0x7C00012D) \ |
| 1227 /* Store Word with Update Indexed */ \ |
| 1228 V(stwux, STWUX, 0x7C00016E) \ |
| 1229 /* Store Word Indexed */ \ |
| 1230 V(stwx, STWX, 0x7C00012E) \ |
| 1231 /* Synchronize */ \ |
| 1232 V(sync, SYNC, 0x7C0004AC) \ |
| 1233 /* Trap Word */ \ |
| 1234 V(tw, TW, 0x7C000008) \ |
| 1235 /* ExecuExecuted No Operation */ \ |
| 1236 V(xnop, XNOP, 0x68000000) \ |
| 1237 /* XOR */ \ |
| 1238 V(xorx, XORX, 0x7C000278) \ |
| 1239 /* Convert Binary Coded Decimal To Declets */ \ |
| 1240 V(cbcdtd, CBCDTD, 0x7C000274) \ |
| 1241 /* Convert Declets To Binary Coded Decimal */ \ |
| 1242 V(cdtbcd, CDTBCD, 0x7C000234) \ |
| 1243 /* Decimal Floating Add */ \ |
| 1244 V(dadd, DADD, 0xEC000004) \ |
| 1245 /* Decimal Floating Add Quad */ \ |
| 1246 V(daddq, DADDQ, 0xFC000004) \ |
| 1247 /* Decimal Floating Convert From Fixed */ \ |
| 1248 V(dcffix, DCFFIX, 0xEC000644) \ |
| 1249 /* Decimal Floating Convert From Fixed Quad */ \ |
| 1250 V(dcffixq, DCFFIXQ, 0xFC000644) \ |
| 1251 /* Decimal Floating Compare Ordered */ \ |
| 1252 V(dcmpo, DCMPO, 0xEC000104) \ |
| 1253 /* Decimal Floating Compare Ordered Quad */ \ |
| 1254 V(dcmpoq, DCMPOQ, 0xFC000104) \ |
| 1255 /* Decimal Floating Compare Unordered */ \ |
| 1256 V(dcmpu, DCMPU, 0xEC000504) \ |
| 1257 /* Decimal Floating Compare Unordered Quad */ \ |
| 1258 V(dcmpuq, DCMPUQ, 0xFC000504) \ |
| 1259 /* Decimal Floating Convert To DFP Long */ \ |
| 1260 V(dctdp, DCTDP, 0xEC000204) \ |
| 1261 /* Decimal Floating Convert To Fixed */ \ |
| 1262 V(dctfix, DCTFIX, 0xEC000244) \ |
| 1263 /* Decimal Floating Convert To Fixed Quad */ \ |
| 1264 V(dctfixq, DCTFIXQ, 0xFC000244) \ |
| 1265 /* Decimal Floating Convert To DFP Extended */ \ |
| 1266 V(dctqpq, DCTQPQ, 0xFC000204) \ |
| 1267 /* Decimal Floating Decode DPD To BCD */ \ |
| 1268 V(ddedpd, DDEDPD, 0xEC000284) \ |
| 1269 /* Decimal Floating Decode DPD To BCD Quad */ \ |
| 1270 V(ddedpdq, DDEDPDQ, 0xFC000284) \ |
| 1271 /* Decimal Floating Divide */ \ |
| 1272 V(ddiv, DDIV, 0xEC000444) \ |
| 1273 /* Decimal Floating Divide Quad */ \ |
| 1274 V(ddivq, DDIVQ, 0xFC000444) \ |
| 1275 /* Decimal Floating Encode BCD To DPD */ \ |
| 1276 V(denbcd, DENBCD, 0xEC000684) \ |
| 1277 /* Decimal Floating Encode BCD To DPD Quad */ \ |
| 1278 V(denbcdq, DENBCDQ, 0xFC000684) \ |
| 1279 /* Decimal Floating Insert Exponent */ \ |
| 1280 V(diex, DIEX, 0xEC0006C4) \ |
| 1281 /* Decimal Floating Insert Exponent Quad */ \ |
| 1282 V(diexq, DIEXQ, 0xFC0006C4) \ |
| 1283 /* Decimal Floating Multiply */ \ |
| 1284 V(dmul, DMUL, 0xEC000044) \ |
| 1285 /* Decimal Floating Multiply Quad */ \ |
| 1286 V(dmulq, DMULQ, 0xFC000044) \ |
| 1287 /* Decimal Floating Round To DFP Long */ \ |
| 1288 V(drdpq, DRDPQ, 0xFC000604) \ |
| 1289 /* Decimal Floating Round To DFP Short */ \ |
| 1290 V(drsp, DRSP, 0xEC000604) \ |
| 1291 /* Decimal Floating Subtract */ \ |
| 1292 V(dsub, DSUB, 0xEC000404) \ |
| 1293 /* Decimal Floating Subtract Quad */ \ |
| 1294 V(dsubq, DSUBQ, 0xFC000404) \ |
| 1295 /* Decimal Floating Test Exponent */ \ |
| 1296 V(dtstex, DTSTEX, 0xEC000144) \ |
| 1297 /* Decimal Floating Test Exponent Quad */ \ |
| 1298 V(dtstexq, DTSTEXQ, 0xFC000144) \ |
| 1299 /* Decimal Floating Test Significance */ \ |
| 1300 V(dtstsf, DTSTSF, 0xEC000544) \ |
| 1301 /* Decimal Floating Test Significance Quad */ \ |
| 1302 V(dtstsfq, DTSTSFQ, 0xFC000544) \ |
| 1303 /* Decimal Floating Extract Exponent */ \ |
| 1304 V(dxex, DXEX, 0xEC0002C4) \ |
| 1305 /* Decimal Floating Extract Exponent Quad */ \ |
| 1306 V(dxexq, DXEXQ, 0xFC0002C4) \ |
| 1307 /* Decorated Storage Notify */ \ |
| 1308 V(dsn, DSN, 0x7C0003C6) \ |
| 1309 /* Load Byte with Decoration Indexed */ \ |
| 1310 V(lbdx, LBDX, 0x7C000406) \ |
| 1311 /* Load Doubleword with Decoration Indexed */ \ |
| 1312 V(lddx, LDDX, 0x7C0004C6) \ |
| 1313 /* Load Floating Doubleword with Decoration Indexed */ \ |
| 1314 V(lfddx, LFDDX, 0x7C000646) \ |
| 1315 /* Load Halfword with Decoration Indexed */ \ |
| 1316 V(lhdx, LHDX, 0x7C000446) \ |
| 1317 /* Load Word with Decoration Indexed */ \ |
| 1318 V(lwdx, LWDX, 0x7C000486) \ |
| 1319 /* Store Byte with Decoration Indexed */ \ |
| 1320 V(stbdx, STBDX, 0x7C000506) \ |
| 1321 /* Store Doubleword with Decoration Indexed */ \ |
| 1322 V(stddx, STDDX, 0x7C0005C6) \ |
| 1323 /* Store Floating Doubleword with Decoration Indexed */ \ |
| 1324 V(stfddx, STFDDX, 0x7C000746) \ |
| 1325 /* Store Halfword with Decoration Indexed */ \ |
| 1326 V(sthdx, STHDX, 0x7C000546) \ |
| 1327 /* Store Word with Decoration Indexed */ \ |
| 1328 V(stwdx, STWDX, 0x7C000586) \ |
| 1329 /* Data Cache Block Allocate */ \ |
| 1330 V(dcba, DCBA, 0x7C0005EC) \ |
| 1331 /* Data Cache Block Invalidate */ \ |
| 1332 V(dcbi, DCBI, 0x7C0003AC) \ |
| 1333 /* Instruction Cache Block Touch */ \ |
| 1334 V(icbt, ICBT, 0x7C00002C) \ |
| 1335 /* Memory Barrier */ \ |
| 1336 V(mbar, MBAR, 0x7C0006AC) \ |
| 1337 /* Move to Condition Register from XER */ \ |
| 1338 V(mcrxr, MCRXR, 0x7C000400) \ |
| 1339 /* TLB Invalidate Local Indexed */ \ |
| 1340 V(tlbilx, TLBILX, 0x7C000024) \ |
| 1341 /* TLB Invalidate Virtual Address Indexed */ \ |
| 1342 V(tlbivax, TLBIVAX, 0x7C000624) \ |
| 1343 /* TLB Read Entry */ \ |
| 1344 V(tlbre, TLBRE, 0x7C000764) \ |
| 1345 /* TLB Search Indexed */ \ |
| 1346 V(tlbsx, TLBSX, 0x7C000724) \ |
| 1347 /* TLB Write Entry */ \ |
| 1348 V(tlbwe, TLBWE, 0x7C0007A4) \ |
| 1349 /* Write External Enable */ \ |
| 1350 V(wrtee, WRTEE, 0x7C000106) \ |
| 1351 /* Write External Enable Immediate */ \ |
| 1352 V(wrteei, WRTEEI, 0x7C000146) \ |
| 1353 /* Data Cache Read */ \ |
| 1354 V(dcread, DCREAD, 0x7C00028C) \ |
| 1355 /* Instruction Cache Read */ \ |
| 1356 V(icread, ICREAD, 0x7C0007CC) \ |
| 1357 /* Data Cache Invalidate */ \ |
| 1358 V(dci, DCI, 0x7C00038C) \ |
| 1359 /* Instruction Cache Invalidate */ \ |
| 1360 V(ici, ICI, 0x7C00078C) \ |
| 1361 /* Move From Device Control Register User Mode Indexed */ \ |
| 1362 V(mfdcrux, MFDCRUX, 0x7C000246) \ |
| 1363 /* Move From Device Control Register Indexed */ \ |
| 1364 V(mfdcrx, MFDCRX, 0x7C000206) \ |
| 1365 /* Move To Device Control Register User Mode Indexed */ \ |
| 1366 V(mtdcrux, MTDCRUX, 0x7C000346) \ |
| 1367 /* Move To Device Control Register Indexed */ \ |
| 1368 V(mtdcrx, MTDCRX, 0x7C000306) \ |
| 1369 /* Return From Debug Interrupt */ \ |
| 1370 V(rfdi, RFDI, 0x4C00004E) \ |
| 1371 /* Data Cache Block Flush by External PID */ \ |
| 1372 V(dcbfep, DCBFEP, 0x7C0000FE) \ |
| 1373 /* Data Cache Block Store by External PID */ \ |
| 1374 V(dcbstep, DCBSTEP, 0x7C00007E) \ |
| 1375 /* Data Cache Block Touch by External PID */ \ |
| 1376 V(dcbtep, DCBTEP, 0x7C00027E) \ |
| 1377 /* Data Cache Block Touch for Store by External PID */ \ |
| 1378 V(dcbtstep, DCBTSTEP, 0x7C0001FE) \ |
| 1379 /* Data Cache Block Zero by External PID */ \ |
| 1380 V(dcbzep, DCBZEP, 0x7C0007FE) \ |
| 1381 /* Instruction Cache Block Invalidate by External PID */ \ |
| 1382 V(icbiep, ICBIEP, 0x7C0007BE) \ |
| 1383 /* Load Byte and Zero by External PID Indexed */ \ |
| 1384 V(lbepx, LBEPX, 0x7C0000BE) \ |
| 1385 /* Load Floating-Point Double by External PID Indexed */ \ |
| 1386 V(lfdepx, LFDEPX, 0x7C0004BE) \ |
| 1387 /* Load Halfword and Zero by External PID Indexed */ \ |
| 1388 V(lhepx, LHEPX, 0x7C00023E) \ |
| 1389 /* Load Vector by External PID Indexed */ \ |
| 1390 V(lvepx, LVEPX, 0x7C00024E) \ |
| 1391 /* Load Vector by External PID Indexed Last */ \ |
| 1392 V(lvepxl, LVEPXL, 0x7C00020E) \ |
| 1393 /* Load Word and Zero by External PID Indexed */ \ |
| 1394 V(lwepx, LWEPX, 0x7C00003E) \ |
| 1395 /* Store Byte by External PID Indexed */ \ |
| 1396 V(stbepx, STBEPX, 0x7C0001BE) \ |
| 1397 /* Store Floating-Point Double by External PID Indexed */ \ |
| 1398 V(stfdepx, STFDEPX, 0x7C0005BE) \ |
| 1399 /* Store Halfword by External PID Indexed */ \ |
| 1400 V(sthepx, STHEPX, 0x7C00033E) \ |
| 1401 /* Store Vector by External PID Indexed */ \ |
| 1402 V(stvepx, STVEPX, 0x7C00064E) \ |
| 1403 /* Store Vector by External PID Indexed Last */ \ |
| 1404 V(stvepxl, STVEPXL, 0x7C00060E) \ |
| 1405 /* Store Word by External PID Indexed */ \ |
| 1406 V(stwepx, STWEPX, 0x7C00013E) \ |
| 1407 /* Load Doubleword by External PID Indexed */ \ |
| 1408 V(ldepx, LDEPX, 0x7C00003A) \ |
| 1409 /* Store Doubleword by External PID Indexed */ \ |
| 1410 V(stdepx, STDEPX, 0x7C00013A) \ |
| 1411 /* TLB Search and Reserve Indexed */ \ |
| 1412 V(tlbsrx, TLBSRX, 0x7C0006A5) \ |
| 1413 /* External Control In Word Indexed */ \ |
| 1414 V(eciwx, ECIWX, 0x7C00026C) \ |
| 1415 /* External Control Out Word Indexed */ \ |
| 1416 V(ecowx, ECOWX, 0x7C00036C) \ |
| 1417 /* Data Cache Block Lock Clear */ \ |
| 1418 V(dcblc, DCBLC, 0x7C00030C) \ |
| 1419 /* Data Cache Block Lock Query */ \ |
| 1420 V(dcblq, DCBLQ, 0x7C00034D) \ |
| 1421 /* Data Cache Block Touch and Lock Set */ \ |
| 1422 V(dcbtls, DCBTLS, 0x7C00014C) \ |
| 1423 /* Data Cache Block Touch for Store and Lock Set */ \ |
| 1424 V(dcbtstls, DCBTSTLS, 0x7C00010C) \ |
| 1425 /* Instruction Cache Block Lock Clear */ \ |
| 1426 V(icblc, ICBLC, 0x7C0001CC) \ |
| 1427 /* Instruction Cache Block Lock Query */ \ |
| 1428 V(icblq, ICBLQ, 0x7C00018D) \ |
| 1429 /* Instruction Cache Block Touch and Lock Set */ \ |
| 1430 V(icbtls, ICBTLS, 0x7C0003CC) \ |
| 1431 /* Floating Compare Ordered */ \ |
| 1432 V(fcmpo, FCMPO, 0xFC000040) \ |
| 1433 /* Floating Compare Unordered */ \ |
| 1434 V(fcmpu, FCMPU, 0xFC000000) \ |
| 1435 /* Floating Test for software Divide */ \ |
| 1436 V(ftdiv, FTDIV, 0xFC000100) \ |
| 1437 /* Floating Test for software Square Root */ \ |
| 1438 V(ftsqrt, FTSQRT, 0xFC000140) \ |
| 1439 /* Load Floating-Point Double with Update Indexed */ \ |
| 1440 V(lfdux, LFDUX, 0x7C0004EE) \ |
| 1441 /* Load Floating-Point Double Indexed */ \ |
| 1442 V(lfdx, LFDX, 0x7C0004AE) \ |
| 1443 /* Load Floating-Point as Integer Word Algebraic Indexed */ \ |
| 1444 V(lfiwax, LFIWAX, 0x7C0006AE) \ |
| 1445 /* Load Floating-Point as Integer Word and Zero Indexed */ \ |
| 1446 V(lfiwzx, LFIWZX, 0x7C0006EE) \ |
| 1447 /* Load Floating-Point Single with Update Indexed */ \ |
| 1448 V(lfsux, LFSUX, 0x7C00046E) \ |
| 1449 /* Load Floating-Point Single Indexed */ \ |
| 1450 V(lfsx, LFSX, 0x7C00042E) \ |
| 1451 /* Move To Condition Register from FPSCR */ \ |
| 1452 V(mcrfs, MCRFS, 0xFC000080) \ |
| 1453 /* Store Floating-Point Double with Update Indexed */ \ |
| 1454 V(stfdux, STFDUX, 0x7C0005EE) \ |
| 1455 /* Store Floating-Point Double Indexed */ \ |
| 1456 V(stfdx, STFDX, 0x7C0005AE) \ |
| 1457 /* Store Floating-Point as Integer Word Indexed */ \ |
| 1458 V(stfiwx, STFIWX, 0x7C0007AE) \ |
| 1459 /* Store Floating-Point Single with Update Indexed */ \ |
| 1460 V(stfsux, STFSUX, 0x7C00056E) \ |
| 1461 /* Store Floating-Point Single Indexed */ \ |
| 1462 V(stfsx, STFSX, 0x7C00052E) \ |
| 1463 /* Load Floating-Point Double Pair Indexed */ \ |
| 1464 V(lfdpx, LFDPX, 0x7C00062E) \ |
| 1465 /* Store Floating-Point Double Pair Indexed */ \ |
| 1466 V(stfdpx, STFDPX, 0x7C00072E) \ |
| 1467 /* Floating Absolute Value */ \ |
| 1468 V(fabs, FABS, 0xFC000210) \ |
| 1469 /* Floating Convert From Integer Doubleword */ \ |
| 1470 V(fcfid, FCFID, 0xFC00069C) \ |
| 1471 /* Floating Convert From Integer Doubleword Single */ \ |
| 1472 V(fcfids, FCFIDS, 0xEC00069C) \ |
| 1473 /* Floating Convert From Integer Doubleword Unsigned */ \ |
| 1474 V(fcfidu, FCFIDU, 0xFC00079C) \ |
| 1475 /* Floating Convert From Integer Doubleword Unsigned Single */ \ |
| 1476 V(fcfidus, FCFIDUS, 0xEC00079C) \ |
| 1477 /* Floating Copy Sign */ \ |
| 1478 V(fcpsgn, FCPSGN, 0xFC000010) \ |
| 1479 /* Floating Convert To Integer Doubleword */ \ |
| 1480 V(fctid, FCTID, 0xFC00065C) \ |
| 1481 /* Floating Convert To Integer Doubleword Unsigned */ \ |
| 1482 V(fctidu, FCTIDU, 0xFC00075C) \ |
| 1483 /* Floating Convert To Integer Doubleword Unsigned with round toward */ \ |
| 1484 /* Zero */ \ |
| 1485 V(fctiduz, FCTIDUZ, 0xFC00075E) \ |
| 1486 /* Floating Convert To Integer Doubleword with round toward Zero */ \ |
| 1487 V(fctidz, FCTIDZ, 0xFC00065E) \ |
| 1488 /* Floating Convert To Integer Word */ \ |
| 1489 V(fctiw, FCTIW, 0xFC00001C) \ |
| 1490 /* Floating Convert To Integer Word Unsigned */ \ |
| 1491 V(fctiwu, FCTIWU, 0xFC00011C) \ |
| 1492 /* Floating Convert To Integer Word Unsigned with round toward Zero */ \ |
| 1493 V(fctiwuz, FCTIWUZ, 0xFC00011E) \ |
| 1494 /* Floating Convert To Integer Word with round to Zero */ \ |
| 1495 V(fctiwz, FCTIWZ, 0xFC00001E) \ |
| 1496 /* Floating Move Register */ \ |
| 1497 V(fmr, FMR, 0xFC000090) \ |
| 1498 /* Floating Negative Absolute Value */ \ |
| 1499 V(fnabs, FNABS, 0xFC000110) \ |
| 1500 /* Floating Negate */ \ |
| 1501 V(fneg, FNEG, 0xFC000050) \ |
| 1502 /* Floating Round to Single-Precision */ \ |
| 1503 V(frsp, FRSP, 0xFC000018) \ |
| 1504 /* Move From FPSCR */ \ |
| 1505 V(mffs, MFFS, 0xFC00048E) \ |
| 1506 /* Move To FPSCR Bit 0 */ \ |
| 1507 V(mtfsb0, MTFSB0, 0xFC00008C) \ |
| 1508 /* Move To FPSCR Bit 1 */ \ |
| 1509 V(mtfsb1, MTFSB1, 0xFC00004C) \ |
| 1510 /* Move To FPSCR Field Immediate */ \ |
| 1511 V(mtfsfi, MTFSFI, 0xFC00010C) \ |
| 1512 /* Floating Round To Integer Minus */ \ |
| 1513 V(frim, FRIM, 0xFC0003D0) \ |
| 1514 /* Floating Round To Integer Nearest */ \ |
| 1515 V(frin, FRIN, 0xFC000310) \ |
| 1516 /* Floating Round To Integer Plus */ \ |
| 1517 V(frip, FRIP, 0xFC000390) \ |
| 1518 /* Floating Round To Integer toward Zero */ \ |
| 1519 V(friz, FRIZ, 0xFC000350) \ |
| 1520 /* Multiply Cross Halfword to Word Signed */ \ |
| 1521 V(mulchw, MULCHW, 0x10000150) \ |
| 1522 /* Multiply Cross Halfword to Word Unsigned */ \ |
| 1523 V(mulchwu, MULCHWU, 0x10000110) \ |
| 1524 /* Multiply High Halfword to Word Signed */ \ |
| 1525 V(mulhhw, MULHHW, 0x10000050) \ |
| 1526 /* Multiply High Halfword to Word Unsigned */ \ |
| 1527 V(mulhhwu, MULHHWU, 0x10000010) \ |
| 1528 /* Multiply Low Halfword to Word Signed */ \ |
| 1529 V(mullhw, MULLHW, 0x10000350) \ |
| 1530 /* Multiply Low Halfword to Word Unsigned */ \ |
| 1531 V(mullhwu, MULLHWU, 0x10000310) \ |
| 1532 /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \ |
| 1533 V(dlmzb, DLMZB, 0x7C00009C) \ |
| 1534 /* Load Quadword And Reserve Indexed */ \ |
| 1535 V(lqarx, LQARX, 0x7C000228) \ |
| 1536 /* Store Quadword Conditional Indexed and record CR0 */ \ |
| 1537 V(stqcx, STQCX, 0x7C00016D) \ |
| 1538 /* Load String Word Immediate */ \ |
| 1539 V(lswi, LSWI, 0x7C0004AA) \ |
| 1540 /* Load String Word Indexed */ \ |
| 1541 V(lswx, LSWX, 0x7C00042A) \ |
| 1542 /* Store String Word Immediate */ \ |
| 1543 V(stswi, STSWI, 0x7C0005AA) \ |
| 1544 /* Store String Word Indexed */ \ |
| 1545 V(stswx, STSWX, 0x7C00052A) \ |
| 1546 /* Clear BHRB */ \ |
| 1547 V(clrbhrb, CLRBHRB, 0x7C00035C) \ |
| 1548 /* Enforce In-order Execution of I/O */ \ |
| 1549 V(eieio, EIEIO, 0x7C0006AC) \ |
| 1550 /* Load Byte and Zero Caching Inhibited Indexed */ \ |
| 1551 V(lbzcix, LBZCIX, 0x7C0006AA) \ |
| 1552 /* Load Doubleword Caching Inhibited Indexed */ \ |
| 1553 V(ldcix, LDCIX, 0x7C0006EA) \ |
| 1554 /* Load Halfword and Zero Caching Inhibited Indexed */ \ |
| 1555 V(lhzcix, LHZCIX, 0x7C00066A) \ |
| 1556 /* Load Word and Zero Caching Inhibited Indexed */ \ |
| 1557 V(lwzcix, LWZCIX, 0x7C00062A) \ |
| 1558 /* Move From Segment Register */ \ |
| 1559 V(mfsr, MFSR, 0x7C0004A6) \ |
| 1560 /* Move From Segment Register Indirect */ \ |
| 1561 V(mfsrin, MFSRIN, 0x7C000526) \ |
| 1562 /* Move To Machine State Register Doubleword */ \ |
| 1563 V(mtmsrd, MTMSRD, 0x7C000164) \ |
| 1564 /* Move To Split Little Endian */ \ |
| 1565 V(mtsle, MTSLE, 0x7C000126) \ |
| 1566 /* Move To Segment Register */ \ |
| 1567 V(mtsr, MTSR, 0x7C0001A4) \ |
| 1568 /* Move To Segment Register Indirect */ \ |
| 1569 V(mtsrin, MTSRIN, 0x7C0001E4) \ |
| 1570 /* SLB Find Entry ESID */ \ |
| 1571 V(slbfee, SLBFEE, 0x7C0007A7) \ |
| 1572 /* SLB Invalidate All */ \ |
| 1573 V(slbia, SLBIA, 0x7C0003E4) \ |
| 1574 /* SLB Invalidate Entry */ \ |
| 1575 V(slbie, SLBIE, 0x7C000364) \ |
| 1576 /* SLB Move From Entry ESID */ \ |
| 1577 V(slbmfee, SLBMFEE, 0x7C000726) \ |
| 1578 /* SLB Move From Entry VSID */ \ |
| 1579 V(slbmfev, SLBMFEV, 0x7C0006A6) \ |
| 1580 /* SLB Move To Entry */ \ |
| 1581 V(slbmte, SLBMTE, 0x7C000324) \ |
| 1582 /* Store Byte Caching Inhibited Indexed */ \ |
| 1583 V(stbcix, STBCIX, 0x7C0007AA) \ |
| 1584 /* Store Doubleword Caching Inhibited Indexed */ \ |
| 1585 V(stdcix, STDCIX, 0x7C0007EA) \ |
| 1586 /* Store Halfword and Zero Caching Inhibited Indexed */ \ |
| 1587 V(sthcix, STHCIX, 0x7C00076A) \ |
| 1588 /* Store Word and Zero Caching Inhibited Indexed */ \ |
| 1589 V(stwcix, STWCIX, 0x7C00072A) \ |
| 1590 /* TLB Invalidate All */ \ |
| 1591 V(tlbia, TLBIA, 0x7C0002E4) \ |
| 1592 /* TLB Invalidate Entry */ \ |
| 1593 V(tlbie, TLBIE, 0x7C000264) \ |
| 1594 /* TLB Invalidate Entry Local */ \ |
| 1595 V(tlbiel, TLBIEL, 0x7C000224) \ |
| 1596 /* Message Clear Privileged */ \ |
| 1597 V(msgclrp, MSGCLRP, 0x7C00015C) \ |
| 1598 /* Message Send Privileged */ \ |
| 1599 V(msgsndp, MSGSNDP, 0x7C00011C) \ |
| 1600 /* Message Clear */ \ |
| 1601 V(msgclr, MSGCLR, 0x7C0001DC) \ |
| 1602 /* Message Send */ \ |
| 1603 V(msgsnd, MSGSND, 0x7C00019C) \ |
| 1604 /* Move From Machine State Register */ \ |
| 1605 V(mfmsr, MFMSR, 0x7C0000A6) \ |
| 1606 /* Move To Machine State Register */ \ |
| 1607 V(mtmsr, MTMSR, 0x7C000124) \ |
| 1608 /* TLB Synchronize */ \ |
| 1609 V(tlbsync, TLBSYNC, 0x7C00046C) \ |
| 1610 /* Transaction Abort */ \ |
| 1611 V(tabort, TABORT, 0x7C00071D) \ |
| 1612 /* Transaction Abort Doubleword Conditional */ \ |
| 1613 V(tabortdc, TABORTDC, 0x7C00065D) \ |
| 1614 /* Transaction Abort Doubleword Conditional Immediate */ \ |
| 1615 V(tabortdci, TABORTDCI, 0x7C0006DD) \ |
| 1616 /* Transaction Abort Word Conditional */ \ |
| 1617 V(tabortwc, TABORTWC, 0x7C00061D) \ |
| 1618 /* Transaction Abort Word Conditional Immediate */ \ |
| 1619 V(tabortwci, TABORTWCI, 0x7C00069D) \ |
| 1620 /* Transaction Begin */ \ |
| 1621 V(tbegin, TBEGIN, 0x7C00051D) \ |
| 1622 /* Transaction Check */ \ |
| 1623 V(tcheck, TCHECK, 0x7C00059C) \ |
| 1624 /* Transaction End */ \ |
| 1625 V(tend, TEND, 0x7C00055C) \ |
| 1626 /* Transaction Recheckpoint */ \ |
| 1627 V(trechkpt, TRECHKPT, 0x7C0007DD) \ |
| 1628 /* Transaction Reclaim */ \ |
| 1629 V(treclaim, TRECLAIM, 0x7C00075D) \ |
| 1630 /* Transaction Suspend or Resume */ \ |
| 1631 V(tsr, TSR, 0x7C0005DC) \ |
| 1632 /* Load Vector Element Byte Indexed */ \ |
| 1633 V(lvebx, LVEBX, 0x7C00000E) \ |
| 1634 /* Load Vector Element Halfword Indexed */ \ |
| 1635 V(lvehx, LVEHX, 0x7C00004E) \ |
| 1636 /* Load Vector Element Word Indexed */ \ |
| 1637 V(lvewx, LVEWX, 0x7C00008E) \ |
| 1638 /* Load Vector for Shift Left */ \ |
| 1639 V(lvsl, LVSL, 0x7C00000C) \ |
| 1640 /* Load Vector for Shift Right */ \ |
| 1641 V(lvsr, LVSR, 0x7C00004C) \ |
| 1642 /* Load Vector Indexed */ \ |
| 1643 V(lvx, LVX, 0x7C0000CE) \ |
| 1644 /* Load Vector Indexed Last */ \ |
| 1645 V(lvxl, LVXL, 0x7C0002CE) \ |
| 1646 /* Store Vector Element Byte Indexed */ \ |
| 1647 V(stvebx, STVEBX, 0x7C00010E) \ |
| 1648 /* Store Vector Element Halfword Indexed */ \ |
| 1649 V(stvehx, STVEHX, 0x7C00014E) \ |
| 1650 /* Store Vector Element Word Indexed */ \ |
| 1651 V(stvewx, STVEWX, 0x7C00018E) \ |
| 1652 /* Store Vector Indexed */ \ |
| 1653 V(stvx, STVX, 0x7C0001CE) \ |
| 1654 /* Store Vector Indexed Last */ \ |
| 1655 V(stvxl, STVXL, 0x7C0003CE) \ |
| 1656 /* Vector Minimum Signed Doubleword */ \ |
| 1657 V(vminsd, VMINSD, 0x100003C2) \ |
| 1658 /* Floating Merge Even Word */ \ |
| 1659 V(fmrgew, FMRGEW, 0xFC00078C) \ |
| 1660 /* Floating Merge Odd Word */ \ |
| 1661 V(fmrgow, FMRGOW, 0xFC00068C) \ |
| 1662 /* Wait for Interrupt */ \ |
| 1663 V(wait, WAIT, 0x7C00007C) |
| 1664 |
| 1665 #define PPC_EVS_OPCODE_LIST(V) \ |
| 1666 /* Vector Select */ \ |
| 1667 V(evsel, EVSEL, 0x10000278) |
| 1668 |
| 1669 #define PPC_DS_OPCODE_LIST(V) \ |
| 1670 /* Load Doubleword */ \ |
| 1671 V(ld, LD, 0xE8000000) \ |
| 1672 /* Load Doubleword with Update */ \ |
| 1673 V(ldu, LDU, 0xE8000001) \ |
| 1674 /* Load Word Algebraic */ \ |
| 1675 V(lwa, LWA, 0xE8000002) \ |
| 1676 /* Store Doubleword */ \ |
| 1677 V(std, STD, 0xF8000000) \ |
| 1678 /* Store Doubleword with Update */ \ |
| 1679 V(stdu, STDU, 0xF8000001) \ |
| 1680 /* Load Floating-Point Double Pair */ \ |
| 1681 V(lfdp, LFDP, 0xE4000000) \ |
| 1682 /* Store Floating-Point Double Pair */ \ |
| 1683 V(stfdp, STFDP, 0xF4000000) \ |
| 1684 /* Store Quadword */ \ |
| 1685 V(stq, STQ, 0xF8000002) |
| 1686 |
| 1687 #define PPC_D_OPCODE_LIST(V) \ |
| 1688 /* Trap Doubleword Immediate */ \ |
| 1689 V(tdi, TDI, 0x08000000) \ |
| 1690 /* Add Immediate */ \ |
| 1691 V(addi, ADDI, 0x38000000) \ |
| 1692 /* Add Immediate Carrying */ \ |
| 1693 V(addic, ADDIC, 0x30000000) \ |
| 1694 /* Add Immediate Carrying & record CR0 */ \ |
| 1695 V(addicx, ADDICx, 0x34000000) \ |
| 1696 /* Add Immediate Shifted */ \ |
| 1697 V(addis, ADDIS, 0x3C000000) \ |
| 1698 /* AND Immediate & record CR0 */ \ |
| 1699 V(andix, ANDIx, 0x70000000) \ |
| 1700 /* AND Immediate Shifted & record CR0 */ \ |
| 1701 V(andisx, ANDISx, 0x74000000) \ |
| 1702 /* Compare Immediate */ \ |
| 1703 V(cmpi, CMPI, 0x2C000000) \ |
| 1704 /* Compare Logical Immediate */ \ |
| 1705 V(cmpli, CMPLI, 0x28000000) \ |
| 1706 /* Load Byte and Zero */ \ |
| 1707 V(lbz, LBZ, 0x88000000) \ |
| 1708 /* Load Byte and Zero with Update */ \ |
| 1709 V(lbzu, LBZU, 0x8C000000) \ |
| 1710 /* Load Halfword Algebraic */ \ |
| 1711 V(lha, LHA, 0xA8000000) \ |
| 1712 /* Load Halfword Algebraic with Update */ \ |
| 1713 V(lhau, LHAU, 0xAC000000) \ |
| 1714 /* Load Halfword and Zero */ \ |
| 1715 V(lhz, LHZ, 0xA0000000) \ |
| 1716 /* Load Halfword and Zero with Update */ \ |
| 1717 V(lhzu, LHZU, 0xA4000000) \ |
| 1718 /* Load Multiple Word */ \ |
| 1719 V(lmw, LMW, 0xB8000000) \ |
| 1720 /* Load Word and Zero */ \ |
| 1721 V(lwz, LWZ, 0x80000000) \ |
| 1722 /* Load Word and Zero with Update */ \ |
| 1723 V(lwzu, LWZU, 0x84000000) \ |
| 1724 /* Multiply Low Immediate */ \ |
| 1725 V(mulli, MULLI, 0x1C000000) \ |
| 1726 /* OR Immediate */ \ |
| 1727 V(ori, ORI, 0x60000000) \ |
| 1728 /* OR Immediate Shifted */ \ |
| 1729 V(oris, ORIS, 0x64000000) \ |
| 1730 /* Store Byte */ \ |
| 1731 V(stb, STB, 0x98000000) \ |
| 1732 /* Store Byte with Update */ \ |
| 1733 V(stbu, STBU, 0x9C000000) \ |
| 1734 /* Store Halfword */ \ |
| 1735 V(sth, STH, 0xB0000000) \ |
| 1736 /* Store Halfword with Update */ \ |
| 1737 V(sthu, STHU, 0xB4000000) \ |
| 1738 /* Store Multiple Word */ \ |
| 1739 V(stmw, STMW, 0xBC000000) \ |
| 1740 /* Store Word */ \ |
| 1741 V(stw, STW, 0x90000000) \ |
| 1742 /* Store Word with Update */ \ |
| 1743 V(stwu, STWU, 0x94000000) \ |
| 1744 /* Subtract From Immediate Carrying */ \ |
| 1745 V(subfic, SUBFIC, 0x20000000) \ |
| 1746 /* Trap Word Immediate */ \ |
| 1747 V(twi, TWI, 0x0C000000) \ |
| 1748 /* XOR Immediate */ \ |
| 1749 V(xori, XORI, 0x68000000) \ |
| 1750 /* XOR Immediate Shifted */ \ |
| 1751 V(xoris, XORIS, 0x6C000000) \ |
| 1752 /* Load Floating-Point Double */ \ |
| 1753 V(lfd, LFD, 0xC8000000) \ |
| 1754 /* Load Floating-Point Double with Update */ \ |
| 1755 V(lfdu, LFDU, 0xCC000000) \ |
| 1756 /* Load Floating-Point Single */ \ |
| 1757 V(lfs, LFS, 0xC0000000) \ |
| 1758 /* Load Floating-Point Single with Update */ \ |
| 1759 V(lfsu, LFSU, 0xC4000000) \ |
| 1760 /* Store Floating-Point Double */ \ |
| 1761 V(stfd, STFD, 0xD8000000) \ |
| 1762 /* Store Floating-Point Double with Update */ \ |
| 1763 V(stfdu, STFDU, 0xDC000000) \ |
| 1764 /* Store Floating-Point Single */ \ |
| 1765 V(stfs, STFS, 0xD0000000) \ |
| 1766 /* Store Floating-Point Single with Update */ \ |
| 1767 V(stfsu, STFSU, 0xD4000000) |
| 1768 |
| 1769 #define PPC_XFL_OPCODE_LIST(V) \ |
| 1770 /* Move To FPSCR Fields */ \ |
| 1771 V(mtfsf, MTFSF, 0xFC00058E) |
| 1772 |
| 1773 #define PPC_XFX_OPCODE_LIST(V) \ |
| 1774 /* Move From Condition Register */ \ |
| 1775 V(mfcr, MFCR, 0x7C000026) \ |
| 1776 /* Move From One Condition Register Field */ \ |
| 1777 V(mfocrf, MFOCRF, 0x7C100026) \ |
| 1778 /* Move From Special Purpose Register */ \ |
| 1779 V(mfspr, MFSPR, 0x7C0002A6) \ |
| 1780 /* Move To Condition Register Fields */ \ |
| 1781 V(mtcrf, MTCRF, 0x7C000120) \ |
| 1782 /* Move To One Condition Register Field */ \ |
| 1783 V(mtocrf, MTOCRF, 0x7C100120) \ |
| 1784 /* Move To Special Purpose Register */ \ |
| 1785 V(mtspr, MTSPR, 0x7C0003A6) \ |
| 1786 /* Debugger Notify Halt */ \ |
| 1787 V(dnh, DNH, 0x4C00018C) \ |
| 1788 /* Move From Device Control Register */ \ |
| 1789 V(mfdcr, MFDCR, 0x7C000286) \ |
| 1790 /* Move To Device Control Register */ \ |
| 1791 V(mtdcr, MTDCR, 0x7C000386) \ |
| 1792 /* Move from Performance Monitor Register */ \ |
| 1793 V(mfpmr, MFPMR, 0x7C00029C) \ |
| 1794 /* Move To Performance Monitor Register */ \ |
| 1795 V(mtpmr, MTPMR, 0x7C00039C) \ |
| 1796 /* Move From Branch History Rolling Buffer */ \ |
| 1797 V(mfbhrbe, MFBHRBE, 0x7C00025C) \ |
| 1798 /* Move From Time Base */ \ |
| 1799 V(mftb, MFTB, 0x7C0002E6) |
| 1800 |
| 1801 #define PPC_MDS_OPCODE_LIST(V) \ |
| 1802 /* Rotate Left Doubleword then Clear Left */ \ |
| 1803 V(rldcl, RLDCL, 0x78000010) \ |
| 1804 /* Rotate Left Doubleword then Clear Right */ \ |
| 1805 V(rldcr, RLDCR, 0x78000012) |
| 1806 |
| 1807 #define PPC_A_OPCODE_LIST(V) \ |
| 1808 /* Integer Select */ \ |
| 1809 V(isel, ISEL, 0x7C00001E) \ |
| 1810 /* Floating Add */ \ |
| 1811 V(fadd, FADD, 0xFC00002A) \ |
| 1812 /* Floating Add Single */ \ |
| 1813 V(fadds, FADDS, 0xEC00002A) \ |
| 1814 /* Floating Divide */ \ |
| 1815 V(fdiv, FDIV, 0xFC000024) \ |
| 1816 /* Floating Divide Single */ \ |
| 1817 V(fdivs, FDIVS, 0xEC000024) \ |
| 1818 /* Floating Multiply-Add */ \ |
| 1819 V(fmadd, FMADD, 0xFC00003A) \ |
| 1820 /* Floating Multiply-Add Single */ \ |
| 1821 V(fmadds, FMADDS, 0xEC00003A) \ |
| 1822 /* Floating Multiply-Subtract */ \ |
| 1823 V(fmsub, FMSUB, 0xFC000038) \ |
| 1824 /* Floating Multiply-Subtract Single */ \ |
| 1825 V(fmsubs, FMSUBS, 0xEC000038) \ |
| 1826 /* Floating Multiply */ \ |
| 1827 V(fmul, FMUL, 0xFC000032) \ |
| 1828 /* Floating Multiply Single */ \ |
| 1829 V(fmuls, FMULS, 0xEC000032) \ |
| 1830 /* Floating Negative Multiply-Add */ \ |
| 1831 V(fnmadd, FNMADD, 0xFC00003E) \ |
| 1832 /* Floating Negative Multiply-Add Single */ \ |
| 1833 V(fnmadds, FNMADDS, 0xEC00003E) \ |
| 1834 /* Floating Negative Multiply-Subtract */ \ |
| 1835 V(fnmsub, FNMSUB, 0xFC00003C) \ |
| 1836 /* Floating Negative Multiply-Subtract Single */ \ |
| 1837 V(fnmsubs, FNMSUBS, 0xEC00003C) \ |
| 1838 /* Floating Reciprocal Estimate Single */ \ |
| 1839 V(fres, FRES, 0xEC000030) \ |
| 1840 /* Floating Reciprocal Square Root Estimate */ \ |
| 1841 V(frsqrte, FRSQRTE, 0xFC000034) \ |
| 1842 /* Floating Select */ \ |
| 1843 V(fsel, FSEL, 0xFC00002E) \ |
| 1844 /* Floating Square Root */ \ |
| 1845 V(fsqrt, FSQRT, 0xFC00002C) \ |
| 1846 /* Floating Square Root Single */ \ |
| 1847 V(fsqrts, FSQRTS, 0xEC00002C) \ |
| 1848 /* Floating Subtract */ \ |
| 1849 V(fsub, FSUB, 0xFC000028) \ |
| 1850 /* Floating Subtract Single */ \ |
| 1851 V(fsubs, FSUBS, 0xEC000028) \ |
| 1852 /* Floating Reciprocal Estimate */ \ |
| 1853 V(fre, FRE, 0xFC000030) \ |
| 1854 /* Floating Reciprocal Square Root Estimate Single */ \ |
| 1855 V(frsqrtes, FRSQRTES, 0xEC000034) |
| 1856 |
| 1857 #define PPC_VA_OPCODE_LIST(V) \ |
| 1858 /* Vector Add Extended & write Carry Unsigned Quadword */ \ |
| 1859 V(vaddecuq, VADDECUQ, 0x1000003D) \ |
| 1860 /* Vector Add Extended Unsigned Quadword Modulo */ \ |
| 1861 V(vaddeuqm, VADDEUQM, 0x1000003C) \ |
| 1862 /* Vector Multiply-Add Single-Precision */ \ |
| 1863 V(vmaddfp, VMADDFP, 0x1000002E) \ |
| 1864 /* Vector Multiply-High-Add Signed Halfword Saturate */ \ |
| 1865 V(vmhaddshs, VMHADDSHS, 0x10000020) \ |
| 1866 /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \ |
| 1867 V(vmhraddshs, VMHRADDSHS, 0x10000021) \ |
| 1868 /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \ |
| 1869 V(vmladduhm, VMLADDUHM, 0x10000022) \ |
| 1870 /* Vector Multiply-Sum Mixed Byte Modulo */ \ |
| 1871 V(vmsummbm, VMSUMMBM, 0x10000025) \ |
| 1872 /* Vector Multiply-Sum Signed Halfword Modulo */ \ |
| 1873 V(vmsumshm, VMSUMSHM, 0x10000028) \ |
| 1874 /* Vector Multiply-Sum Signed Halfword Saturate */ \ |
| 1875 V(vmsumshs, VMSUMSHS, 0x10000029) \ |
| 1876 /* Vector Multiply-Sum Unsigned Byte Modulo */ \ |
| 1877 V(vmsumubm, VMSUMUBM, 0x10000024) \ |
| 1878 /* Vector Multiply-Sum Unsigned Halfword Modulo */ \ |
| 1879 V(vmsumuhm, VMSUMUHM, 0x10000026) \ |
| 1880 /* Vector Multiply-Sum Unsigned Halfword Saturate */ \ |
| 1881 V(vmsumuhs, VMSUMUHS, 0x10000027) \ |
| 1882 /* Vector Negative Multiply-Subtract Single-Precision */ \ |
| 1883 V(vnmsubfp, VNMSUBFP, 0x1000002F) \ |
| 1884 /* Vector Permute */ \ |
| 1885 V(vperm, VPERM, 0x1000002B) \ |
| 1886 /* Vector Select */ \ |
| 1887 V(vsel, VSEL, 0x1000002A) \ |
| 1888 /* Vector Shift Left Double by Octet Immediate */ \ |
| 1889 V(vsldoi, VSLDOI, 0x1000002C) \ |
| 1890 /* Vector Subtract Extended & write Carry Unsigned Quadword */ \ |
| 1891 V(vsubecuq, VSUBECUQ, 0x1000003F) \ |
| 1892 /* Vector Subtract Extended Unsigned Quadword Modulo */ \ |
| 1893 V(vsubeuqm, VSUBEUQM, 0x1000003E) \ |
| 1894 /* Vector Permute and Exclusive-OR */ \ |
| 1895 V(vpermxor, VPERMXOR, 0x1000002D) |
| 1896 |
| 1897 #define PPC_XX1_OPCODE_LIST(V) \ |
| 1898 /* Load VSR Scalar Doubleword Indexed */ \ |
| 1899 V(lxsdx, LXSDX, 0x7C000498) \ |
| 1900 /* Load VSX Scalar as Integer Word Algebraic Indexed */ \ |
| 1901 V(lxsiwax, LXSIWAX, 0x7C000098) \ |
| 1902 /* Load VSX Scalar as Integer Word and Zero Indexed */ \ |
| 1903 V(lxsiwzx, LXSIWZX, 0x7C000018) \ |
| 1904 /* Load VSX Scalar Single-Precision Indexed */ \ |
| 1905 V(lxsspx, LXSSPX, 0x7C000418) \ |
| 1906 /* Load VSR Vector Doubleword*2 Indexed */ \ |
| 1907 V(lxvd, LXVD, 0x7C000698) \ |
| 1908 /* Load VSR Vector Doubleword & Splat Indexed */ \ |
| 1909 V(lxvdsx, LXVDSX, 0x7C000298) \ |
| 1910 /* Load VSR Vector Word*4 Indexed */ \ |
| 1911 V(lxvw, LXVW, 0x7C000618) \ |
| 1912 /* Move From VSR Doubleword */ \ |
| 1913 V(mfvsrd, MFVSRD, 0x7C000066) \ |
| 1914 /* Move From VSR Word and Zero */ \ |
| 1915 V(mfvsrwz, MFVSRWZ, 0x7C0000E6) \ |
| 1916 /* Store VSR Scalar Doubleword Indexed */ \ |
| 1917 V(stxsdx, STXSDX, 0x7C000598) \ |
| 1918 /* Store VSX Scalar as Integer Word Indexed */ \ |
| 1919 V(stxsiwx, STXSIWX, 0x7C000118) \ |
| 1920 /* Store VSR Scalar Word Indexed */ \ |
| 1921 V(stxsspx, STXSSPX, 0x7C000518) \ |
| 1922 /* Store VSR Vector Doubleword*2 Indexed */ \ |
| 1923 V(stxvd, STXVD, 0x7C000798) \ |
| 1924 /* Store VSR Vector Word*4 Indexed */ \ |
| 1925 V(stxvw, STXVW, 0x7C000718) |
| 1926 |
| 1927 #define PPC_B_OPCODE_LIST(V) \ |
| 1928 /* Branch Conditional */ \ |
| 1929 V(bc, BCX, 0x40000000) |
| 1930 |
| 1931 #define PPC_XO_OPCODE_LIST(V) \ |
| 1932 /* Divide Doubleword */ \ |
| 1933 V(divd, DIVD, 0x7C0003D2) \ |
| 1934 /* Divide Doubleword Extended */ \ |
| 1935 V(divde, DIVDE, 0x7C000352) \ |
| 1936 /* Divide Doubleword Extended & record OV */ \ |
| 1937 V(divdeo, DIVDEO, 0x7C000752) \ |
| 1938 /* Divide Doubleword Extended Unsigned */ \ |
| 1939 V(divdeu, DIVDEU, 0x7C000312) \ |
| 1940 /* Divide Doubleword Extended Unsigned & record OV */ \ |
| 1941 V(divdeuo, DIVDEUO, 0x7C000712) \ |
| 1942 /* Divide Doubleword & record OV */ \ |
| 1943 V(divdo, DIVDO, 0x7C0007D2) \ |
| 1944 /* Divide Doubleword Unsigned */ \ |
| 1945 V(divdu, DIVDU, 0x7C000392) \ |
| 1946 /* Divide Doubleword Unsigned & record OV */ \ |
| 1947 V(divduo, DIVDUO, 0x7C000792) \ |
| 1948 /* Multiply High Doubleword */ \ |
| 1949 V(mulhd, MULHD, 0x7C000092) \ |
| 1950 /* Multiply High Doubleword Unsigned */ \ |
| 1951 V(mulhdu, MULHDU, 0x7C000012) \ |
| 1952 /* Multiply Low Doubleword */ \ |
| 1953 V(mulld, MULLD, 0x7C0001D2) \ |
| 1954 /* Multiply Low Doubleword & record OV */ \ |
| 1955 V(mulldo, MULLDO, 0x7C0005D2) \ |
| 1956 /* Add */ \ |
| 1957 V(add, ADDX, 0x7C000214) \ |
| 1958 /* Add Carrying */ \ |
| 1959 V(addc, ADDCX, 0x7C000014) \ |
| 1960 /* Add Carrying & record OV */ \ |
| 1961 V(addco, ADDCO, 0x7C000414) \ |
| 1962 /* Add Extended */ \ |
| 1963 V(adde, ADDEX, 0x7C000114) \ |
| 1964 /* Add Extended & record OV & record OV */ \ |
| 1965 V(addeo, ADDEO, 0x7C000514) \ |
| 1966 /* Add to Minus One Extended */ \ |
| 1967 V(addme, ADDME, 0x7C0001D4) \ |
| 1968 /* Add to Minus One Extended & record OV */ \ |
| 1969 V(addmeo, ADDMEO, 0x7C0005D4) \ |
| 1970 /* Add & record OV */ \ |
| 1971 V(addo, ADDO, 0x7C000614) \ |
| 1972 /* Add to Zero Extended */ \ |
| 1973 V(addze, ADDZEX, 0x7C000194) \ |
| 1974 /* Add to Zero Extended & record OV */ \ |
| 1975 V(addzeo, ADDZEO, 0x7C000594) \ |
| 1976 /* Divide Word Format */ \ |
| 1977 V(divw, DIVW, 0x7C0003D6) \ |
| 1978 /* Divide Word Extended */ \ |
| 1979 V(divwe, DIVWE, 0x7C000356) \ |
| 1980 /* Divide Word Extended & record OV */ \ |
| 1981 V(divweo, DIVWEO, 0x7C000756) \ |
| 1982 /* Divide Word Extended Unsigned */ \ |
| 1983 V(divweu, DIVWEU, 0x7C000316) \ |
| 1984 /* Divide Word Extended Unsigned & record OV */ \ |
| 1985 V(divweuo, DIVWEUO, 0x7C000716) \ |
| 1986 /* Divide Word & record OV */ \ |
| 1987 V(divwo, DIVWO, 0x7C0007D6) \ |
| 1988 /* Divide Word Unsigned */ \ |
| 1989 V(divwu, DIVWU, 0x7C000396) \ |
| 1990 /* Divide Word Unsigned & record OV */ \ |
| 1991 V(divwuo, DIVWUO, 0x7C000796) \ |
| 1992 /* Multiply High Word */ \ |
| 1993 V(mulhw, MULHWX, 0x7C000096) \ |
| 1994 /* Multiply High Word Unsigned */ \ |
| 1995 V(mulhwu, MULHWUX, 0x7C000016) \ |
| 1996 /* Multiply Low Word */ \ |
| 1997 V(mullw, MULLW, 0x7C0001D6) \ |
| 1998 /* Multiply Low Word & record OV */ \ |
| 1999 V(mullwo, MULLWO, 0x7C0005D6) \ |
| 2000 /* Negate */ \ |
| 2001 V(neg, NEGX, 0x7C0000D0) \ |
| 2002 /* Negate & record OV */ \ |
| 2003 V(nego, NEGO, 0x7C0004D0) \ |
| 2004 /* Subtract From */ \ |
| 2005 V(subf, SUBFX, 0x7C000050) \ |
| 2006 /* Subtract From Carrying */ \ |
| 2007 V(subfc, SUBFCX, 0x7C000010) \ |
| 2008 /* Subtract From Carrying & record OV */ \ |
| 2009 V(subfco, SUBFCO, 0x7C000410) \ |
| 2010 /* Subtract From Extended */ \ |
| 2011 V(subfe, SUBFEX, 0x7C000110) \ |
| 2012 /* Subtract From Extended & record OV */ \ |
| 2013 V(subfeo, SUBFEO, 0x7C000510) \ |
| 2014 /* Subtract From Minus One Extended */ \ |
| 2015 V(subfme, SUBFME, 0x7C0001D0) \ |
| 2016 /* Subtract From Minus One Extended & record OV */ \ |
| 2017 V(subfmeo, SUBFMEO, 0x7C0005D0) \ |
| 2018 /* Subtract From & record OV */ \ |
| 2019 V(subfo, SUBFO, 0x7C000450) \ |
| 2020 /* Subtract From Zero Extended */ \ |
| 2021 V(subfze, SUBFZE, 0x7C000190) \ |
| 2022 /* Subtract From Zero Extended & record OV */ \ |
| 2023 V(subfzeo, SUBFZEO, 0x7C000590) \ |
| 2024 /* Add and Generate Sixes */ \ |
| 2025 V(addg, ADDG, 0x7C000094) \ |
| 2026 /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \ |
| 2027 V(macchw, MACCHW, 0x10000158) \ |
| 2028 /* Multiply Accumulate Cross Halfword to Word Modulo Signed & record OV */ \ |
| 2029 V(macchwo, MACCHWO, 0x10000158) \ |
| 2030 /* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \ |
| 2031 V(macchws, MACCHWS, 0x100001D8) \ |
| 2032 /* Multiply Accumulate Cross Halfword to Word Saturate Signed & record */ \ |
| 2033 /* OV */ \ |
| 2034 V(macchwso, MACCHWSO, 0x100001D8) \ |
| 2035 /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \ |
| 2036 V(macchwsu, MACCHWSU, 0x10000198) \ |
| 2037 /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned & record */ \ |
| 2038 /* OV */ \ |
| 2039 V(macchwsuo, MACCHWSUO, 0x10000198) \ |
| 2040 /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \ |
| 2041 V(macchwu, MACCHWU, 0x10000118) \ |
| 2042 /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned & record */ \ |
| 2043 /* OV */ \ |
| 2044 V(macchwuo, MACCHWUO, 0x10000118) \ |
| 2045 /* Multiply Accumulate High Halfword to Word Modulo Signed */ \ |
| 2046 V(machhw, MACHHW, 0x10000058) \ |
| 2047 /* Multiply Accumulate High Halfword to Word Modulo Signed & record OV */ \ |
| 2048 V(machhwo, MACHHWO, 0x10000058) \ |
| 2049 /* Multiply Accumulate High Halfword to Word Saturate Signed */ \ |
| 2050 V(machhws, MACHHWS, 0x100000D8) \ |
| 2051 /* Multiply Accumulate High Halfword to Word Saturate Signed & record OV */ \ |
| 2052 V(machhwso, MACHHWSO, 0x100000D8) \ |
| 2053 /* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \ |
| 2054 V(machhwsu, MACHHWSU, 0x10000098) \ |
| 2055 /* Multiply Accumulate High Halfword to Word Saturate Unsigned & record */ \ |
| 2056 /* OV */ \ |
| 2057 V(machhwsuo, MACHHWSUO, 0x10000098) \ |
| 2058 /* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \ |
| 2059 V(machhwu, MACHHWU, 0x10000018) \ |
| 2060 /* Multiply Accumulate High Halfword to Word Modulo Unsigned & record OV */ \ |
| 2061 V(machhwuo, MACHHWUO, 0x10000018) \ |
| 2062 /* Multiply Accumulate Low Halfword to Word Modulo Signed */ \ |
| 2063 V(maclhw, MACLHW, 0x10000358) \ |
| 2064 /* Multiply Accumulate Low Halfword to Word Modulo Signed & record OV */ \ |
| 2065 V(maclhwo, MACLHWO, 0x10000358) \ |
| 2066 /* Multiply Accumulate Low Halfword to Word Saturate Signed */ \ |
| 2067 V(maclhws, MACLHWS, 0x100003D8) \ |
| 2068 /* Multiply Accumulate Low Halfword to Word Saturate Signed & record OV */ \ |
| 2069 V(maclhwso, MACLHWSO, 0x100003D8) \ |
| 2070 /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \ |
| 2071 V(maclhwsu, MACLHWSU, 0x10000398) \ |
| 2072 /* Multiply Accumulate Low Halfword to Word Saturate Unsigned & record */ \ |
| 2073 /* OV */ \ |
| 2074 V(maclhwsuo, MACLHWSUO, 0x10000398) \ |
| 2075 /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \ |
| 2076 V(maclhwu, MACLHWU, 0x10000318) \ |
| 2077 /* Multiply Accumulate Low Halfword to Word Modulo Unsigned & record OV */ \ |
| 2078 V(maclhwuo, MACLHWUO, 0x10000318) \ |
| 2079 /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \ |
| 2080 V(nmacchw, NMACCHW, 0x1000015C) \ |
| 2081 /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed & */ \ |
| 2082 /* record OV */ \ |
| 2083 V(nmacchwo, NMACCHWO, 0x1000015C) \ |
| 2084 /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \ |
| 2085 V(nmacchws, NMACCHWS, 0x100001DC) \ |
| 2086 /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed & */ \ |
| 2087 /* record OV */ \ |
| 2088 V(nmacchwso, NMACCHWSO, 0x100001DC) \ |
| 2089 /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \ |
| 2090 V(nmachhw, NMACHHW, 0x1000005C) \ |
| 2091 /* Negative Multiply Accumulate High Halfword to Word Modulo Signed & */ \ |
| 2092 /* record OV */ \ |
| 2093 V(nmachhwo, NMACHHWO, 0x1000005C) \ |
| 2094 /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \ |
| 2095 V(nmachhws, NMACHHWS, 0x100000DC) \ |
| 2096 /* Negative Multiply Accumulate High Halfword to Word Saturate Signed & */ \ |
| 2097 /* record OV */ \ |
| 2098 V(nmachhwso, NMACHHWSO, 0x100000DC) \ |
| 2099 /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \ |
| 2100 V(nmaclhw, NMACLHW, 0x1000035C) \ |
| 2101 /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed & */ \ |
| 2102 /* record OV */ \ |
| 2103 V(nmaclhwo, NMACLHWO, 0x1000035C) \ |
| 2104 /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \ |
| 2105 V(nmaclhws, NMACLHWS, 0x100003DC) \ |
| 2106 /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed & */ \ |
| 2107 /* record OV */ \ |
| 2108 V(nmaclhwso, NMACLHWSO, 0x100003DC) |
| 2109 |
| 2110 #define PPC_XL_OPCODE_LIST(V) \ |
| 2111 /* Branch Conditional to Count Register */ \ |
| 2112 V(bcctr, BCCTRX, 0x4C000420) \ |
| 2113 /* Branch Conditional to Link Register */ \ |
| 2114 V(bclr, BCLRX, 0x4C000020) \ |
| 2115 /* Condition Register AND */ \ |
| 2116 V(crand, CRAND, 0x4C000202) \ |
| 2117 /* Condition Register AND with Complement */ \ |
| 2118 V(crandc, CRANDC, 0x4C000102) \ |
| 2119 /* Condition Register Equivalent */ \ |
| 2120 V(creqv, CREQV, 0x4C000242) \ |
| 2121 /* Condition Register NAND */ \ |
| 2122 V(crnand, CRNAND, 0x4C0001C2) \ |
| 2123 /* Condition Register NOR */ \ |
| 2124 V(crnor, CRNOR, 0x4C000042) \ |
| 2125 /* Condition Register OR */ \ |
| 2126 V(cror, CROR, 0x4C000382) \ |
| 2127 /* Condition Register OR with Complement */ \ |
| 2128 V(crorc, CRORC, 0x4C000342) \ |
| 2129 /* Condition Register XOR */ \ |
| 2130 V(crxor, CRXOR, 0x4C000182) \ |
| 2131 /* Instruction Synchronize */ \ |
| 2132 V(isync, ISYNC, 0x4C00012C) \ |
| 2133 /* Move Condition Register Field */ \ |
| 2134 V(mcrf, MCRF, 0x4C000000) \ |
| 2135 /* Return From Critical Interrupt */ \ |
| 2136 V(rfci, RFCI, 0x4C000066) \ |
| 2137 /* Return From Interrupt */ \ |
| 2138 V(rfi, RFI, 0x4C000064) \ |
| 2139 /* Return From Machine Check Interrupt */ \ |
| 2140 V(rfmci, RFMCI, 0x4C00004C) \ |
| 2141 /* Embedded Hypervisor Privilege */ \ |
| 2142 V(ehpriv, EHPRIV, 0x7C00021C) \ |
| 2143 /* Return From Guest Interrupt */ \ |
| 2144 V(rfgi, RFGI, 0x4C0000CC) \ |
| 2145 /* Doze */ \ |
| 2146 V(doze, DOZE, 0x4C000324) \ |
| 2147 /* Return From Interrupt Doubleword Hypervisor */ \ |
| 2148 V(hrfid, HRFID, 0x4C000224) \ |
| 2149 /* Nap */ \ |
| 2150 V(nap, NAP, 0x4C000364) \ |
| 2151 /* Return from Event Based Branch */ \ |
| 2152 V(rfebb, RFEBB, 0x4C000124) \ |
| 2153 /* Return from Interrupt Doubleword */ \ |
| 2154 V(rfid, RFID, 0x4C000024) \ |
| 2155 /* Rip Van Winkle */ \ |
| 2156 V(rvwinkle, RVWINKLE, 0x4C0003E4) \ |
| 2157 /* Sleep */ \ |
| 2158 V(sleep, SLEEP, 0x4C0003A4) |
| 2159 |
| 2160 #define PPC_XX4_OPCODE_LIST(V) \ |
| 2161 /* VSX Select */ \ |
| 2162 V(xxsel, XXSEL, 0xF0000030) |
| 2163 |
| 2164 #define PPC_I_OPCODE_LIST(V) \ |
| 2165 /* Branch */ \ |
| 2166 V(b, BX, 0x48000000) |
| 2167 |
| 2168 #define PPC_M_OPCODE_LIST(V) \ |
| 2169 /* Rotate Left Word Immediate then Mask Insert */ \ |
| 2170 V(rlwimi, RLWIMIX, 0x50000000) \ |
| 2171 /* Rotate Left Word Immediate then AND with Mask */ \ |
| 2172 V(rlwinm, RLWINMX, 0x54000000) \ |
| 2173 /* Rotate Left Word then AND with Mask */ \ |
| 2174 V(rlwnm, RLWNMX, 0x5C000000) |
| 2175 |
| 2176 #define PPC_VX_OPCODE_LIST(V) \ |
| 2177 /* Decimal Add Modulo */ \ |
| 2178 V(bcdadd, BCDADD, 0xF0000400) \ |
| 2179 /* Decimal Subtract Modulo */ \ |
| 2180 V(bcdsub, BCDSUB, 0xF0000440) \ |
| 2181 /* Move From Vector Status and Control Register */ \ |
| 2182 V(mfvscr, MFVSCR, 0x10000604) \ |
| 2183 /* Move To Vector Status and Control Register */ \ |
| 2184 V(mtvscr, MTVSCR, 0x10000644) \ |
| 2185 /* Vector Add & write Carry Unsigned Quadword */ \ |
| 2186 V(vaddcuq, VADDCUQ, 0x10000140) \ |
| 2187 /* Vector Add and Write Carry-Out Unsigned Word */ \ |
| 2188 V(vaddcuw, VADDCUW, 0x10000180) \ |
| 2189 /* Vector Add Single-Precision */ \ |
| 2190 V(vaddfp, VADDFP, 0x1000000A) \ |
| 2191 /* Vector Add Signed Byte Saturate */ \ |
| 2192 V(vaddsbs, VADDSBS, 0x10000300) \ |
| 2193 /* Vector Add Signed Halfword Saturate */ \ |
| 2194 V(vaddshs, VADDSHS, 0x10000340) \ |
| 2195 /* Vector Add Signed Word Saturate */ \ |
| 2196 V(vaddsws, VADDSWS, 0x10000380) \ |
| 2197 /* Vector Add Unsigned Byte Modulo */ \ |
| 2198 V(vaddubm, VADDUBM, 0x10000000) \ |
| 2199 /* Vector Add Unsigned Byte Saturate */ \ |
| 2200 V(vaddubs, VADDUBS, 0x10000200) \ |
| 2201 /* Vector Add Unsigned Doubleword Modulo */ \ |
| 2202 V(vaddudm, VADDUDM, 0x100000C0) \ |
| 2203 /* Vector Add Unsigned Halfword Modulo */ \ |
| 2204 V(vadduhm, VADDUHM, 0x10000040) \ |
| 2205 /* Vector Add Unsigned Halfword Saturate */ \ |
| 2206 V(vadduhs, VADDUHS, 0x10000240) \ |
| 2207 /* Vector Add Unsigned Quadword Modulo */ \ |
| 2208 V(vadduqm, VADDUQM, 0x10000100) \ |
| 2209 /* Vector Add Unsigned Word Modulo */ \ |
| 2210 V(vadduwm, VADDUWM, 0x10000080) \ |
| 2211 /* Vector Add Unsigned Word Saturate */ \ |
| 2212 V(vadduws, VADDUWS, 0x10000280) \ |
| 2213 /* Vector Logical AND */ \ |
| 2214 V(vand, VAND, 0x10000404) \ |
| 2215 /* Vector Logical AND with Complement */ \ |
| 2216 V(vandc, VANDC, 0x10000444) \ |
| 2217 /* Vector Average Signed Byte */ \ |
| 2218 V(vavgsb, VAVGSB, 0x10000502) \ |
| 2219 /* Vector Average Signed Halfword */ \ |
| 2220 V(vavgsh, VAVGSH, 0x10000542) \ |
| 2221 /* Vector Average Signed Word */ \ |
| 2222 V(vavgsw, VAVGSW, 0x10000582) \ |
| 2223 /* Vector Average Unsigned Byte */ \ |
| 2224 V(vavgub, VAVGUB, 0x10000402) \ |
| 2225 /* Vector Average Unsigned Halfword */ \ |
| 2226 V(vavguh, VAVGUH, 0x10000442) \ |
| 2227 /* Vector Average Unsigned Word */ \ |
| 2228 V(vavguw, VAVGUW, 0x10000482) \ |
| 2229 /* Vector Bit Permute Quadword */ \ |
| 2230 V(vbpermq, VBPERMQ, 0x1000054C) \ |
| 2231 /* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \ |
| 2232 V(vcfsx, VCFSX, 0x1000034A) \ |
| 2233 /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \ |
| 2234 V(vcfux, VCFUX, 0x1000030A) \ |
| 2235 /* Vector Count Leading Zeros Byte */ \ |
| 2236 V(vclzb, VCLZB, 0x10000702) \ |
| 2237 /* Vector Count Leading Zeros Doubleword */ \ |
| 2238 V(vclzd, VCLZD, 0x100007C2) \ |
| 2239 /* Vector Count Leading Zeros Halfword */ \ |
| 2240 V(vclzh, VCLZH, 0x10000742) \ |
| 2241 /* Vector Count Leading Zeros Word */ \ |
| 2242 V(vclzw, VCLZW, 0x10000782) \ |
| 2243 /* Vector Convert From Single-Precision To Signed Fixed-Point Word */ \ |
| 2244 /* Saturate */ \ |
| 2245 V(vctsxs, VCTSXS, 0x100003CA) \ |
| 2246 /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \ |
| 2247 /* Saturate */ \ |
| 2248 V(vctuxs, VCTUXS, 0x1000038A) \ |
| 2249 /* Vector Equivalence */ \ |
| 2250 V(veqv, VEQV, 0x10000684) \ |
| 2251 /* Vector 2 Raised to the Exponent Estimate Single-Precision */ \ |
| 2252 V(vexptefp, VEXPTEFP, 0x1000018A) \ |
| 2253 /* Vector Gather Bits by Byte by Doubleword */ \ |
| 2254 V(vgbbd, VGBBD, 0x1000050C) \ |
| 2255 /* Vector Log Base 2 Estimate Single-Precision */ \ |
| 2256 V(vlogefp, VLOGEFP, 0x100001CA) \ |
| 2257 /* Vector Maximum Single-Precision */ \ |
| 2258 V(vmaxfp, VMAXFP, 0x1000040A) \ |
| 2259 /* Vector Maximum Signed Byte */ \ |
| 2260 V(vmaxsb, VMAXSB, 0x10000102) \ |
| 2261 /* Vector Maximum Signed Doubleword */ \ |
| 2262 V(vmaxsd, VMAXSD, 0x100001C2) \ |
| 2263 /* Vector Maximum Signed Halfword */ \ |
| 2264 V(vmaxsh, VMAXSH, 0x10000142) \ |
| 2265 /* Vector Maximum Signed Word */ \ |
| 2266 V(vmaxsw, VMAXSW, 0x10000182) \ |
| 2267 /* Vector Maximum Unsigned Byte */ \ |
| 2268 V(vmaxub, VMAXUB, 0x10000002) \ |
| 2269 /* Vector Maximum Unsigned Doubleword */ \ |
| 2270 V(vmaxud, VMAXUD, 0x100000C2) \ |
| 2271 /* Vector Maximum Unsigned Halfword */ \ |
| 2272 V(vmaxuh, VMAXUH, 0x10000042) \ |
| 2273 /* Vector Maximum Unsigned Word */ \ |
| 2274 V(vmaxuw, VMAXUW, 0x10000082) \ |
| 2275 /* Vector Minimum Single-Precision */ \ |
| 2276 V(vminfp, VMINFP, 0x1000044A) \ |
| 2277 /* Vector Minimum Signed Byte */ \ |
| 2278 V(vminsb, VMINSB, 0x10000302) \ |
| 2279 /* Vector Minimum Signed Halfword */ \ |
| 2280 V(vminsh, VMINSH, 0x10000342) \ |
| 2281 /* Vector Minimum Signed Word */ \ |
| 2282 V(vminsw, VMINSW, 0x10000382) \ |
| 2283 /* Vector Minimum Unsigned Byte */ \ |
| 2284 V(vminub, VMINUB, 0x10000202) \ |
| 2285 /* Vector Minimum Unsigned Doubleword */ \ |
| 2286 V(vminud, VMINUD, 0x100002C2) \ |
| 2287 /* Vector Minimum Unsigned Halfword */ \ |
| 2288 V(vminuh, VMINUH, 0x10000242) \ |
| 2289 /* Vector Minimum Unsigned Word */ \ |
| 2290 V(vminuw, VMINUW, 0x10000282) \ |
| 2291 /* Vector Merge High Byte */ \ |
| 2292 V(vmrghb, VMRGHB, 0x1000000C) \ |
| 2293 /* Vector Merge High Halfword */ \ |
| 2294 V(vmrghh, VMRGHH, 0x1000004C) \ |
| 2295 /* Vector Merge High Word */ \ |
| 2296 V(vmrghw, VMRGHW, 0x1000008C) \ |
| 2297 /* Vector Merge Low Byte */ \ |
| 2298 V(vmrglb, VMRGLB, 0x1000010C) \ |
| 2299 /* Vector Merge Low Halfword */ \ |
| 2300 V(vmrglh, VMRGLH, 0x1000014C) \ |
| 2301 /* Vector Merge Low Word */ \ |
| 2302 V(vmrglw, VMRGLW, 0x1000018C) \ |
| 2303 /* Vector Multiply Even Signed Byte */ \ |
| 2304 V(vmulesb, VMULESB, 0x10000308) \ |
| 2305 /* Vector Multiply Even Signed Halfword */ \ |
| 2306 V(vmulesh, VMULESH, 0x10000348) \ |
| 2307 /* Vector Multiply Even Signed Word */ \ |
| 2308 V(vmulesw, VMULESW, 0x10000388) \ |
| 2309 /* Vector Multiply Even Unsigned Byte */ \ |
| 2310 V(vmuleub, VMULEUB, 0x10000208) \ |
| 2311 /* Vector Multiply Even Unsigned Halfword */ \ |
| 2312 V(vmuleuh, VMULEUH, 0x10000248) \ |
| 2313 /* Vector Multiply Even Unsigned Word */ \ |
| 2314 V(vmuleuw, VMULEUW, 0x10000288) \ |
| 2315 /* Vector Multiply Odd Signed Byte */ \ |
| 2316 V(vmulosb, VMULOSB, 0x10000108) \ |
| 2317 /* Vector Multiply Odd Signed Halfword */ \ |
| 2318 V(vmulosh, VMULOSH, 0x10000148) \ |
| 2319 /* Vector Multiply Odd Signed Word */ \ |
| 2320 V(vmulosw, VMULOSW, 0x10000188) \ |
| 2321 /* Vector Multiply Odd Unsigned Byte */ \ |
| 2322 V(vmuloub, VMULOUB, 0x10000008) \ |
| 2323 /* Vector Multiply Odd Unsigned Halfword */ \ |
| 2324 V(vmulouh, VMULOUH, 0x10000048) \ |
| 2325 /* Vector Multiply Odd Unsigned Word */ \ |
| 2326 V(vmulouw, VMULOUW, 0x10000088) \ |
| 2327 /* Vector Multiply Unsigned Word Modulo */ \ |
| 2328 V(vmuluwm, VMULUWM, 0x10000089) \ |
| 2329 /* Vector NAND */ \ |
| 2330 V(vnand, VNAND, 0x10000584) \ |
| 2331 /* Vector Logical NOR */ \ |
| 2332 V(vnor, VNOR, 0x10000504) \ |
| 2333 /* Vector Logical OR */ \ |
| 2334 V(vor, VOR, 0x10000484) \ |
| 2335 /* Vector OR with Complement */ \ |
| 2336 V(vorc, VORC, 0x10000544) \ |
| 2337 /* Vector Pack Pixel */ \ |
| 2338 V(vpkpx, VPKPX, 0x1000030E) \ |
| 2339 /* Vector Pack Signed Doubleword Signed Saturate */ \ |
| 2340 V(vpksdss, VPKSDSS, 0x100005CE) \ |
| 2341 /* Vector Pack Signed Doubleword Unsigned Saturate */ \ |
| 2342 V(vpksdus, VPKSDUS, 0x1000054E) \ |
| 2343 /* Vector Pack Signed Halfword Signed Saturate */ \ |
| 2344 V(vpkshss, VPKSHSS, 0x1000018E) \ |
| 2345 /* Vector Pack Signed Halfword Unsigned Saturate */ \ |
| 2346 V(vpkshus, VPKSHUS, 0x1000010E) \ |
| 2347 /* Vector Pack Signed Word Signed Saturate */ \ |
| 2348 V(vpkswss, VPKSWSS, 0x100001CE) \ |
| 2349 /* Vector Pack Signed Word Unsigned Saturate */ \ |
| 2350 V(vpkswus, VPKSWUS, 0x1000014E) \ |
| 2351 /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \ |
| 2352 V(vpkudum, VPKUDUM, 0x1000044E) \ |
| 2353 /* Vector Pack Unsigned Doubleword Unsigned Saturate */ \ |
| 2354 V(vpkudus, VPKUDUS, 0x100004CE) \ |
| 2355 /* Vector Pack Unsigned Halfword Unsigned Modulo */ \ |
| 2356 V(vpkuhum, VPKUHUM, 0x1000000E) \ |
| 2357 /* Vector Pack Unsigned Halfword Unsigned Saturate */ \ |
| 2358 V(vpkuhus, VPKUHUS, 0x1000008E) \ |
| 2359 /* Vector Pack Unsigned Word Unsigned Modulo */ \ |
| 2360 V(vpkuwum, VPKUWUM, 0x1000004E) \ |
| 2361 /* Vector Pack Unsigned Word Unsigned Saturate */ \ |
| 2362 V(vpkuwus, VPKUWUS, 0x100000CE) \ |
| 2363 /* Vector Polynomial Multiply-Sum Byte */ \ |
| 2364 V(vpmsumb, VPMSUMB, 0x10000408) \ |
| 2365 /* Vector Polynomial Multiply-Sum Doubleword */ \ |
| 2366 V(vpmsumd, VPMSUMD, 0x100004C8) \ |
| 2367 /* Vector Polynomial Multiply-Sum Halfword */ \ |
| 2368 V(vpmsumh, VPMSUMH, 0x10000448) \ |
| 2369 /* Vector Polynomial Multiply-Sum Word */ \ |
| 2370 V(vpmsumw, VPMSUMW, 0x10000488) \ |
| 2371 /* Vector Population Count Byte */ \ |
| 2372 V(vpopcntb, VPOPCNTB, 0x10000703) \ |
| 2373 /* Vector Population Count Doubleword */ \ |
| 2374 V(vpopcntd, VPOPCNTD, 0x100007C3) \ |
| 2375 /* Vector Population Count Halfword */ \ |
| 2376 V(vpopcnth, VPOPCNTH, 0x10000743) \ |
| 2377 /* Vector Population Count Word */ \ |
| 2378 V(vpopcntw, VPOPCNTW, 0x10000783) \ |
| 2379 /* Vector Reciprocal Estimate Single-Precision */ \ |
| 2380 V(vrefp, VREFP, 0x1000010A) \ |
| 2381 /* Vector Round to Single-Precision Integer toward -Infinity */ \ |
| 2382 V(vrfim, VRFIM, 0x100002CA) \ |
| 2383 /* Vector Round to Single-Precision Integer Nearest */ \ |
| 2384 V(vrfin, VRFIN, 0x1000020A) \ |
| 2385 /* Vector Round to Single-Precision Integer toward +Infinity */ \ |
| 2386 V(vrfip, VRFIP, 0x1000028A) \ |
| 2387 /* Vector Round to Single-Precision Integer toward Zero */ \ |
| 2388 V(vrfiz, VRFIZ, 0x1000024A) \ |
| 2389 /* Vector Rotate Left Byte */ \ |
| 2390 V(vrlb, VRLB, 0x10000004) \ |
| 2391 /* Vector Rotate Left Doubleword */ \ |
| 2392 V(vrld, VRLD, 0x100000C4) \ |
| 2393 /* Vector Rotate Left Halfword */ \ |
| 2394 V(vrlh, VRLH, 0x10000044) \ |
| 2395 /* Vector Rotate Left Word */ \ |
| 2396 V(vrlw, VRLW, 0x10000084) \ |
| 2397 /* Vector Reciprocal Square Root Estimate Single-Precision */ \ |
| 2398 V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \ |
| 2399 /* Vector Shift Left */ \ |
| 2400 V(vsl, VSL, 0x100001C4) \ |
| 2401 /* Vector Shift Left Byte */ \ |
| 2402 V(vslb, VSLB, 0x10000104) \ |
| 2403 /* Vector Shift Left Doubleword */ \ |
| 2404 V(vsld, VSLD, 0x100005C4) \ |
| 2405 /* Vector Shift Left Halfword */ \ |
| 2406 V(vslh, VSLH, 0x10000144) \ |
| 2407 /* Vector Shift Left by Octet */ \ |
| 2408 V(vslo, VSLO, 0x1000040C) \ |
| 2409 /* Vector Shift Left Word */ \ |
| 2410 V(vslw, VSLW, 0x10000184) \ |
| 2411 /* Vector Splat Byte */ \ |
| 2412 V(vspltb, VSPLTB, 0x1000020C) \ |
| 2413 /* Vector Splat Halfword */ \ |
| 2414 V(vsplth, VSPLTH, 0x1000024C) \ |
| 2415 /* Vector Splat Immediate Signed Byte */ \ |
| 2416 V(vspltisb, VSPLTISB, 0x1000030C) \ |
| 2417 /* Vector Splat Immediate Signed Halfword */ \ |
| 2418 V(vspltish, VSPLTISH, 0x1000034C) \ |
| 2419 /* Vector Splat Immediate Signed Word */ \ |
| 2420 V(vspltisw, VSPLTISW, 0x1000038C) \ |
| 2421 /* Vector Splat Word */ \ |
| 2422 V(vspltw, VSPLTW, 0x1000028C) \ |
| 2423 /* Vector Shift Right */ \ |
| 2424 V(vsr, VSR, 0x100002C4) \ |
| 2425 /* Vector Shift Right Algebraic Byte */ \ |
| 2426 V(vsrab, VSRAB, 0x10000304) \ |
| 2427 /* Vector Shift Right Algebraic Doubleword */ \ |
| 2428 V(vsrad, VSRAD, 0x100003C4) \ |
| 2429 /* Vector Shift Right Algebraic Halfword */ \ |
| 2430 V(vsrah, VSRAH, 0x10000344) \ |
| 2431 /* Vector Shift Right Algebraic Word */ \ |
| 2432 V(vsraw, VSRAW, 0x10000384) \ |
| 2433 /* Vector Shift Right Byte */ \ |
| 2434 V(vsrb, VSRB, 0x10000204) \ |
| 2435 /* Vector Shift Right Doubleword */ \ |
| 2436 V(vsrd, VSRD, 0x100006C4) \ |
| 2437 /* Vector Shift Right Halfword */ \ |
| 2438 V(vsrh, VSRH, 0x10000244) \ |
| 2439 /* Vector Shift Right by Octet */ \ |
| 2440 V(vsro, VSRO, 0x1000044C) \ |
| 2441 /* Vector Shift Right Word */ \ |
| 2442 V(vsrw, VSRW, 0x10000284) \ |
| 2443 /* Vector Subtract & write Carry Unsigned Quadword */ \ |
| 2444 V(vsubcuq, VSUBCUQ, 0x10000540) \ |
| 2445 /* Vector Subtract and Write Carry-Out Unsigned Word */ \ |
| 2446 V(vsubcuw, VSUBCUW, 0x10000580) \ |
| 2447 /* Vector Subtract Single-Precision */ \ |
| 2448 V(vsubfp, VSUBFP, 0x1000004A) \ |
| 2449 /* Vector Subtract Signed Byte Saturate */ \ |
| 2450 V(vsubsbs, VSUBSBS, 0x10000700) \ |
| 2451 /* Vector Subtract Signed Halfword Saturate */ \ |
| 2452 V(vsubshs, VSUBSHS, 0x10000740) \ |
| 2453 /* Vector Subtract Signed Word Saturate */ \ |
| 2454 V(vsubsws, VSUBSWS, 0x10000780) \ |
| 2455 /* Vector Subtract Unsigned Byte Modulo */ \ |
| 2456 V(vsububm, VSUBUBM, 0x10000400) \ |
| 2457 /* Vector Subtract Unsigned Byte Saturate */ \ |
| 2458 V(vsububs, VSUBUBS, 0x10000600) \ |
| 2459 /* Vector Subtract Unsigned Doubleword Modulo */ \ |
| 2460 V(vsubudm, VSUBUDM, 0x100004C0) \ |
| 2461 /* Vector Subtract Unsigned Halfword Modulo */ \ |
| 2462 V(vsubuhm, VSUBUHM, 0x10000440) \ |
| 2463 /* Vector Subtract Unsigned Halfword Saturate */ \ |
| 2464 V(vsubuhs, VSUBUHS, 0x10000640) \ |
| 2465 /* Vector Subtract Unsigned Quadword Modulo */ \ |
| 2466 V(vsubuqm, VSUBUQM, 0x10000500) \ |
| 2467 /* Vector Subtract Unsigned Word Modulo */ \ |
| 2468 V(vsubuwm, VSUBUWM, 0x10000480) \ |
| 2469 /* Vector Subtract Unsigned Word Saturate */ \ |
| 2470 V(vsubuws, VSUBUWS, 0x10000680) \ |
| 2471 /* Vector Sum across Half Signed Word Saturate */ \ |
| 2472 V(vsum2sws, VSUM2SWS, 0x10000688) \ |
| 2473 /* Vector Sum across Quarter Signed Byte Saturate */ \ |
| 2474 V(vsum4sbs, VSUM4SBS, 0x10000708) \ |
| 2475 /* Vector Sum across Quarter Signed Halfword Saturate */ \ |
| 2476 V(vsum4shs, VSUM4SHS, 0x10000648) \ |
| 2477 /* Vector Sum across Quarter Unsigned Byte Saturate */ \ |
| 2478 V(vsum4bus, VSUM4BUS, 0x10000608) \ |
| 2479 /* Vector Sum across Signed Word Saturate */ \ |
| 2480 V(vsumsws, VSUMSWS, 0x10000788) \ |
| 2481 /* Vector Unpack High Pixel */ \ |
| 2482 V(vupkhpx, VUPKHPX, 0x1000034E) \ |
| 2483 /* Vector Unpack High Signed Byte */ \ |
| 2484 V(vupkhsb, VUPKHSB, 0x1000020E) \ |
| 2485 /* Vector Unpack High Signed Halfword */ \ |
| 2486 V(vupkhsh, VUPKHSH, 0x1000024E) \ |
| 2487 /* Vector Unpack High Signed Word */ \ |
| 2488 V(vupkhsw, VUPKHSW, 0x1000064E) \ |
| 2489 /* Vector Unpack Low Pixel */ \ |
| 2490 V(vupklpx, VUPKLPX, 0x100003CE) \ |
| 2491 /* Vector Unpack Low Signed Byte */ \ |
| 2492 V(vupklsb, VUPKLSB, 0x1000028E) \ |
| 2493 /* Vector Unpack Low Signed Halfword */ \ |
| 2494 V(vupklsh, VUPKLSH, 0x100002CE) \ |
| 2495 /* Vector Unpack Low Signed Word */ \ |
| 2496 V(vupklsw, VUPKLSW, 0x100006CE) \ |
| 2497 /* Vector Logical XOR */ \ |
| 2498 V(vxor, VXOR, 0x100004C4) \ |
| 2499 /* Vector AES Cipher */ \ |
| 2500 V(vcipher, VCIPHER, 0x10000508) \ |
| 2501 /* Vector AES Cipher Last */ \ |
| 2502 V(vcipherlast, VCIPHERLAST, 0x10000509) \ |
| 2503 /* Vector AES Inverse Cipher */ \ |
| 2504 V(vncipher, VNCIPHER, 0x10000548) \ |
| 2505 /* Vector AES Inverse Cipher Last */ \ |
| 2506 V(vncipherlast, VNCIPHERLAST, 0x10000549) \ |
| 2507 /* Vector AES S-Box */ \ |
| 2508 V(vsbox, VSBOX, 0x100005C8) \ |
| 2509 /* Vector SHA-512 Sigma Doubleword */ \ |
| 2510 V(vshasigmad, VSHASIGMAD, 0x100006C2) \ |
| 2511 /* Vector SHA-256 Sigma Word */ \ |
| 2512 V(vshasigmaw, VSHASIGMAW, 0x10000682) \ |
| 2513 /* Vector Merge Even Word */ \ |
| 2514 V(vmrgew, VMRGEW, 0x1000078C) \ |
| 2515 /* Vector Merge Odd Word */ \ |
| 2516 V(vmrgow, VMRGOW, 0x1000068C) |
| 2517 |
| 2518 #define PPC_XS_OPCODE_LIST(V) \ |
| 2519 /* Shift Right Algebraic Doubleword Immediate */ \ |
| 2520 V(sradi, SRADIX, 0x7C000674) |
| 2521 |
| 2522 #define PPC_MD_OPCODE_LIST(V) \ |
| 2523 /* Rotate Left Doubleword Immediate then Clear */ \ |
| 2524 V(rldic, RLDIC, 0x78000008) \ |
| 2525 /* Rotate Left Doubleword Immediate then Clear Left */ \ |
| 2526 V(rldicl, RLDICL, 0x78000000) \ |
| 2527 /* Rotate Left Doubleword Immediate then Clear Right */ \ |
| 2528 V(rldicr, RLDICR, 0x78000004) \ |
| 2529 /* Rotate Left Doubleword Immediate then Mask Insert */ \ |
| 2530 V(rldimi, RLDIMI, 0x7800000C) |
| 2531 |
| 2532 #define PPC_SC_OPCODE_LIST(V) \ |
| 2533 /* System Call */ \ |
| 2534 V(sc, SC, 0x44000002) |
| 2535 |
| 2536 |
| 2537 #define PPC_OPCODE_LIST(V) \ |
| 2538 PPC_X_OPCODE_LIST(V) \ |
| 2539 PPC_XO_OPCODE_LIST(V) \ |
| 2540 PPC_DS_OPCODE_LIST(V) \ |
| 2541 PPC_MDS_OPCODE_LIST(V) \ |
| 2542 PPC_MD_OPCODE_LIST(V) \ |
| 2543 PPC_XS_OPCODE_LIST(V) \ |
| 2544 PPC_D_OPCODE_LIST(V) \ |
| 2545 PPC_I_OPCODE_LIST(V) \ |
| 2546 PPC_B_OPCODE_LIST(V) \ |
| 2547 PPC_XL_OPCODE_LIST(V) \ |
| 2548 PPC_A_OPCODE_LIST(V) \ |
| 2549 PPC_XFX_OPCODE_LIST(V) \ |
| 2550 PPC_M_OPCODE_LIST(V) \ |
| 2551 PPC_SC_OPCODE_LIST(V) \ |
| 2552 PPC_Z23_OPCODE_LIST(V) \ |
| 2553 PPC_Z22_OPCODE_LIST(V) \ |
| 2554 PPC_EVX_OPCODE_LIST(V) \ |
| 2555 PPC_XFL_OPCODE_LIST(V) \ |
| 2556 PPC_EVS_OPCODE_LIST(V) \ |
| 2557 PPC_VX_OPCODE_LIST(V) \ |
| 2558 PPC_VA_OPCODE_LIST(V) \ |
| 2559 PPC_VC_OPCODE_LIST(V) \ |
| 2560 PPC_XX1_OPCODE_LIST(V) \ |
| 2561 PPC_XX2_OPCODE_LIST(V) \ |
| 2562 PPC_XX3_OPCODE_LIST(V) \ |
| 2563 PPC_XX4_OPCODE_LIST(V) |
| 2564 |
| 2565 |
| 2566 enum Opcode : uint32_t { |
| 2567 #define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \ |
| 2568 opcode_name = opcode_value, |
| 2569 PPC_OPCODE_LIST(DECLARE_INSTRUCTION) |
| 2570 #undef DECLARE_INSTRUCTION |
| 2571 EXT1 = 0x4C000000, // Extended code set 1 |
| 2572 EXT2 = 0x7C000000, // Extended code set 2 |
| 2573 EXT3 = 0xEC000000, // Extended code set 3 |
| 2574 EXT4 = 0xFC000000, // Extended code set 4 |
| 2575 EXT5 = 0x78000000, // Extended code set 5 - 64bit only |
| 2576 EXT6 = 0xF0000000, // Extended code set 6 |
151 }; | 2577 }; |
152 | 2578 |
153 // Bits 10-1 | |
154 enum OpcodeExt1 { | |
155 MCRF = 0 << 1, // Move Condition Register Field | |
156 BCLRX = 16 << 1, // Branch Conditional Link Register | |
157 CRNOR = 33 << 1, // Condition Register NOR) | |
158 RFI = 50 << 1, // Return from Interrupt | |
159 CRANDC = 129 << 1, // Condition Register AND with Complement | |
160 ISYNC = 150 << 1, // Instruction Synchronize | |
161 CRXOR = 193 << 1, // Condition Register XOR | |
162 CRNAND = 225 << 1, // Condition Register NAND | |
163 CRAND = 257 << 1, // Condition Register AND | |
164 CREQV = 289 << 1, // Condition Register Equivalent | |
165 CRORC = 417 << 1, // Condition Register OR with Complement | |
166 CROR = 449 << 1, // Condition Register OR | |
167 BCCTRX = 528 << 1 // Branch Conditional to Count Register | |
168 }; | |
169 | |
170 // Bits 9-1 or 10-1 | |
171 enum OpcodeExt2 { | |
172 CMP = 0 << 1, | |
173 TW = 4 << 1, | |
174 SUBFCX = 8 << 1, | |
175 ADDCX = 10 << 1, | |
176 MULHWUX = 11 << 1, | |
177 ISEL = 15 << 1, | |
178 MFCR = 19 << 1, | |
179 LWARX = 20 << 1, | |
180 LDX = 21 << 1, | |
181 LWZX = 23 << 1, // load word zero w/ x-form | |
182 SLWX = 24 << 1, | |
183 CNTLZWX = 26 << 1, | |
184 SLDX = 27 << 1, | |
185 ANDX = 28 << 1, | |
186 CMPL = 32 << 1, | |
187 SUBFX = 40 << 1, | |
188 MFVSRD = 51 << 1, // Move From VSR Doubleword | |
189 LDUX = 53 << 1, | |
190 DCBST = 54 << 1, | |
191 LWZUX = 55 << 1, // load word zero w/ update x-form | |
192 CNTLZDX = 58 << 1, | |
193 ANDCX = 60 << 1, | |
194 MULHWX = 75 << 1, | |
195 DCBF = 86 << 1, | |
196 LBZX = 87 << 1, // load byte zero w/ x-form | |
197 NEGX = 104 << 1, | |
198 MFVSRWZ = 115 << 1, // Move From VSR Word And Zero | |
199 LBZUX = 119 << 1, // load byte zero w/ update x-form | |
200 NORX = 124 << 1, | |
201 SUBFEX = 136 << 1, | |
202 ADDEX = 138 << 1, | |
203 STDX = 149 << 1, | |
204 STWX = 151 << 1, // store word w/ x-form | |
205 MTVSRD = 179 << 1, // Move To VSR Doubleword | |
206 STDUX = 181 << 1, | |
207 STWUX = 183 << 1, // store word w/ update x-form | |
208 /* | |
209 MTCRF | |
210 MTMSR | |
211 STWCXx | |
212 SUBFZEX | |
213 */ | |
214 ADDZEX = 202 << 1, // Add to Zero Extended | |
215 /* | |
216 MTSR | |
217 */ | |
218 | |
219 MTVSRWA = 211 << 1, // Move To VSR Word Algebraic | |
220 STBX = 215 << 1, // store byte w/ x-form | |
221 MULLD = 233 << 1, // Multiply Low Double Word | |
222 MULLW = 235 << 1, // Multiply Low Word | |
223 MTVSRWZ = 243 << 1, // Move To VSR Word And Zero | |
224 STBUX = 247 << 1, // store byte w/ update x-form | |
225 MODUD = 265 << 1, // Modulo Unsigned Dword | |
226 ADDX = 266 << 1, // Add | |
227 MODUW = 267 << 1, // Modulo Unsigned Word | |
228 LHZX = 279 << 1, // load half-word zero w/ x-form | |
229 LHZUX = 311 << 1, // load half-word zero w/ update x-form | |
230 LWAX = 341 << 1, // load word algebraic w/ x-form | |
231 LHAX = 343 << 1, // load half-word algebraic w/ x-form | |
232 LHAUX = 375 << 1, // load half-word algebraic w/ update x-form | |
233 XORX = 316 << 1, // Exclusive OR | |
234 MFSPR = 339 << 1, // Move from Special-Purpose-Register | |
235 POPCNTW = 378 << 1, // Population Count Words | |
236 STHX = 407 << 1, // store half-word w/ x-form | |
237 ORC = 412 << 1, // Or with Complement | |
238 STHUX = 439 << 1, // store half-word w/ update x-form | |
239 ORX = 444 << 1, // Or | |
240 DIVDU = 457 << 1, // Divide Double Word Unsigned | |
241 DIVWU = 459 << 1, // Divide Word Unsigned | |
242 MTSPR = 467 << 1, // Move to Special-Purpose-Register | |
243 DIVD = 489 << 1, // Divide Double Word | |
244 DIVW = 491 << 1, // Divide Word | |
245 POPCNTD = 506 << 1, // Population Count Doubleword | |
246 | |
247 // Below represent bits 10-1 (any value >= 512) | |
248 LDBRX = 532 << 1, // load double word byte reversed w/ x-form | |
249 LWBRX = 534 << 1, // load word byte reversed w/ x-form | |
250 LFSX = 535 << 1, // load float-single w/ x-form | |
251 SRWX = 536 << 1, // Shift Right Word | |
252 SRDX = 539 << 1, // Shift Right Double Word | |
253 LFSUX = 567 << 1, // load float-single w/ update x-form | |
254 SYNC = 598 << 1, // Synchronize | |
255 LFDX = 599 << 1, // load float-double w/ x-form | |
256 LFDUX = 631 << 1, // load float-double w/ update X-form | |
257 STFSX = 663 << 1, // store float-single w/ x-form | |
258 STFSUX = 695 << 1, // store float-single w/ update x-form | |
259 STFDX = 727 << 1, // store float-double w/ x-form | |
260 STFDUX = 759 << 1, // store float-double w/ update x-form | |
261 MODSD = 777 << 1, // Modulo Signed Dword | |
262 MODSW = 779 << 1, // Modulo Signed Word | |
263 LHBRX = 790 << 1, // load half word byte reversed w/ x-form | |
264 SRAW = 792 << 1, // Shift Right Algebraic Word | |
265 SRAD = 794 << 1, // Shift Right Algebraic Double Word | |
266 SRAWIX = 824 << 1, // Shift Right Algebraic Word Immediate | |
267 SRADIX = 413 << 2, // Shift Right Algebraic Double Word Immediate | |
268 EXTSH = 922 << 1, // Extend Sign Halfword | |
269 EXTSB = 954 << 1, // Extend Sign Byte | |
270 ICBI = 982 << 1, // Instruction Cache Block Invalidate | |
271 EXTSW = 986 << 1 // Extend Sign Word | |
272 }; | |
273 | |
274 // Some use Bits 10-1 and other only 5-1 for the opcode | |
275 enum OpcodeExt4 { | |
276 // Bits 5-1 | |
277 FDIV = 18 << 1, // Floating Divide | |
278 FSUB = 20 << 1, // Floating Subtract | |
279 FADD = 21 << 1, // Floating Add | |
280 FSQRT = 22 << 1, // Floating Square Root | |
281 FSEL = 23 << 1, // Floating Select | |
282 FMUL = 25 << 1, // Floating Multiply | |
283 FMSUB = 28 << 1, // Floating Multiply-Subtract | |
284 FMADD = 29 << 1, // Floating Multiply-Add | |
285 | |
286 // Bits 10-1 | |
287 FCMPU = 0 << 1, // Floating Compare Unordered | |
288 FRSP = 12 << 1, // Floating-Point Rounding | |
289 FCTIW = 14 << 1, // Floating Convert to Integer Word X-form | |
290 FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero | |
291 MTFSB1 = 38 << 1, // Move to FPSCR Bit 1 | |
292 FNEG = 40 << 1, // Floating Negate | |
293 MCRFS = 64 << 1, // Move to Condition Register from FPSCR | |
294 MTFSB0 = 70 << 1, // Move to FPSCR Bit 0 | |
295 FMR = 72 << 1, // Floating Move Register | |
296 MTFSFI = 134 << 1, // Move to FPSCR Field Immediate | |
297 FABS = 264 << 1, // Floating Absolute Value | |
298 FRIN = 392 << 1, // Floating Round to Integer Nearest | |
299 FRIZ = 424 << 1, // Floating Round to Integer Toward Zero | |
300 FRIP = 456 << 1, // Floating Round to Integer Plus | |
301 FRIM = 488 << 1, // Floating Round to Integer Minus | |
302 MFFS = 583 << 1, // move from FPSCR x-form | |
303 MTFSF = 711 << 1, // move to FPSCR fields XFL-form | |
304 FCTID = 814 << 1, // Floating convert to integer doubleword | |
305 FCTIDZ = 815 << 1, // ^^^ with round toward zero | |
306 FCFID = 846 << 1, // Floating convert from integer doubleword | |
307 FCTIDU = 942 << 1, // Floating convert to integer doubleword unsigned | |
308 FCTIDUZ = 943 << 1, // ^^^ with round toward zero | |
309 FCFIDU = 974 << 1 // Floating convert from integer doubleword unsigned | |
310 }; | |
311 | |
312 enum OpcodeExt5 { | |
313 // Bits 4-2 | |
314 RLDICL = 0 << 1, // Rotate Left Double Word Immediate then Clear Left | |
315 RLDICR = 2 << 1, // Rotate Left Double Word Immediate then Clear Right | |
316 RLDIC = 4 << 1, // Rotate Left Double Word Immediate then Clear | |
317 RLDIMI = 6 << 1, // Rotate Left Double Word Immediate then Mask Insert | |
318 // Bits 4-1 | |
319 RLDCL = 8 << 1, // Rotate Left Double Word then Clear Left | |
320 RLDCR = 9 << 1 // Rotate Left Double Word then Clear Right | |
321 }; | |
322 | |
323 // Bits 10-3 | |
324 #define XX3_OPCODE_LIST(V) \ | |
325 V(xsaddsp, XSADDSP, 0 << 3) /* VSX Scalar Add SP */ \ | |
326 V(xssubsp, XSSUBSP, 8 << 3) /* VSX Scalar Subtract SP */ \ | |
327 V(xsmulsp, XSMULSP, 16 << 3) /* VSX Scalar Multiply SP */ \ | |
328 V(xsdivsp, XSDIVSP, 24 << 3) /* VSX Scalar Divide SP */ \ | |
329 V(xsadddp, XSADDDP, 32 << 3) /* VSX Scalar Add DP */ \ | |
330 V(xssubdp, XSSUBDP, 40 << 3) /* VSX Scalar Subtract DP */ \ | |
331 V(xsmuldp, XSMULDP, 48 << 3) /* VSX Scalar Multiply DP */ \ | |
332 V(xsdivdp, XSDIVDP, 56 << 3) /* VSX Scalar Divide DP */ \ | |
333 V(xsmaxdp, XSMAXDP, 160 << 3) /* VSX Scalar Maximum DP */ \ | |
334 V(xsmindp, XSMINDP, 168 << 3) /* VSX Scalar Minimum DP */ | |
335 | |
336 // Bits 10-2 | |
337 #define XX2_OPCODE_LIST(V) \ | |
338 V(XSCVDPSP, XSCVDPSP, 265 << 2) /* VSX Scalar Convert DP to SP */ \ | |
339 V(XSCVSPDP, XSCVSPDP, 329 << 2) /* VSX Scalar Convert SP to DP */ | |
340 | |
341 enum OpcodeExt6 { | |
342 #define DECLARE_OPCODES(name, opcode_name, opcode_value) \ | |
343 opcode_name = opcode_value, | |
344 XX3_OPCODE_LIST(DECLARE_OPCODES) XX2_OPCODE_LIST(DECLARE_OPCODES) | |
345 #undef DECLARE_OPCODES | |
346 }; | |
347 | |
348 // Instruction encoding bits and masks. | 2579 // Instruction encoding bits and masks. |
349 enum { | 2580 enum { |
350 // Instruction encoding bit | 2581 // Instruction encoding bit |
351 B1 = 1 << 1, | 2582 B1 = 1 << 1, |
352 B2 = 1 << 2, | 2583 B2 = 1 << 2, |
353 B3 = 1 << 3, | 2584 B3 = 1 << 3, |
354 B4 = 1 << 4, | 2585 B4 = 1 << 4, |
355 B5 = 1 << 5, | 2586 B5 = 1 << 5, |
356 B7 = 1 << 7, | 2587 B7 = 1 << 7, |
357 B8 = 1 << 8, | 2588 B8 = 1 << 8, |
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
549 | 2780 |
550 // Read one particular bit out of the instruction bits. | 2781 // Read one particular bit out of the instruction bits. |
551 inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; } | 2782 inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; } |
552 | 2783 |
553 // Read a bit field's value out of the instruction bits. | 2784 // Read a bit field's value out of the instruction bits. |
554 inline int Bits(int hi, int lo) const { | 2785 inline int Bits(int hi, int lo) const { |
555 return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1); | 2786 return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1); |
556 } | 2787 } |
557 | 2788 |
558 // Read a bit field out of the instruction bits. | 2789 // Read a bit field out of the instruction bits. |
559 inline int BitField(int hi, int lo) const { | 2790 inline uint32_t BitField(int hi, int lo) const { |
560 return InstructionBits() & (((2 << (hi - lo)) - 1) << lo); | 2791 return InstructionBits() & (((2 << (hi - lo)) - 1) << lo); |
561 } | 2792 } |
562 | 2793 |
563 // Static support. | 2794 // Static support. |
564 | 2795 |
565 // Read one particular bit out of the instruction bits. | 2796 // Read one particular bit out of the instruction bits. |
566 static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; } | 2797 static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; } |
567 | 2798 |
568 // Read the value of a bit field out of the instruction bits. | 2799 // Read the value of a bit field out of the instruction bits. |
569 static inline int Bits(Instr instr, int hi, int lo) { | 2800 static inline int Bits(Instr instr, int hi, int lo) { |
570 return (instr >> lo) & ((2 << (hi - lo)) - 1); | 2801 return (instr >> lo) & ((2 << (hi - lo)) - 1); |
571 } | 2802 } |
572 | 2803 |
573 | 2804 |
574 // Read a bit field out of the instruction bits. | 2805 // Read a bit field out of the instruction bits. |
575 static inline int BitField(Instr instr, int hi, int lo) { | 2806 static inline uint32_t BitField(Instr instr, int hi, int lo) { |
576 return instr & (((2 << (hi - lo)) - 1) << lo); | 2807 return instr & (((2 << (hi - lo)) - 1) << lo); |
577 } | 2808 } |
578 | 2809 |
579 inline int RSValue() const { return Bits(25, 21); } | 2810 inline int RSValue() const { return Bits(25, 21); } |
580 inline int RTValue() const { return Bits(25, 21); } | 2811 inline int RTValue() const { return Bits(25, 21); } |
581 inline int RAValue() const { return Bits(20, 16); } | 2812 inline int RAValue() const { return Bits(20, 16); } |
582 DECLARE_STATIC_ACCESSOR(RAValue); | 2813 DECLARE_STATIC_ACCESSOR(RAValue); |
583 inline int RBValue() const { return Bits(15, 11); } | 2814 inline int RBValue() const { return Bits(15, 11); } |
584 DECLARE_STATIC_ACCESSOR(RBValue); | 2815 DECLARE_STATIC_ACCESSOR(RBValue); |
585 inline int RCValue() const { return Bits(10, 6); } | 2816 inline int RCValue() const { return Bits(10, 6); } |
586 DECLARE_STATIC_ACCESSOR(RCValue); | 2817 DECLARE_STATIC_ACCESSOR(RCValue); |
587 | 2818 |
588 inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); } | 2819 inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); } |
589 inline Opcode OpcodeField() const { | 2820 inline Opcode OpcodeField() const { |
590 return static_cast<Opcode>(BitField(24, 21)); | 2821 return static_cast<Opcode>(BitField(31, 26)); |
591 } | 2822 } |
592 | 2823 |
593 // Fields used in Software interrupt instructions | 2824 // Fields used in Software interrupt instructions |
594 inline SoftwareInterruptCodes SvcValue() const { | 2825 inline SoftwareInterruptCodes SvcValue() const { |
595 return static_cast<SoftwareInterruptCodes>(Bits(23, 0)); | 2826 return static_cast<SoftwareInterruptCodes>(Bits(23, 0)); |
596 } | 2827 } |
597 | 2828 |
598 // Instructions are read of out a code stream. The only way to get a | 2829 // Instructions are read of out a code stream. The only way to get a |
599 // reference to an instruction is to convert a pointer. There is no way | 2830 // reference to an instruction is to convert a pointer. There is no way |
600 // to allocate or create instances of class Instruction. | 2831 // to allocate or create instances of class Instruction. |
(...skipping 25 matching lines...) Expand all Loading... |
626 // Lookup the register number for the name provided. | 2857 // Lookup the register number for the name provided. |
627 static int Number(const char* name); | 2858 static int Number(const char* name); |
628 | 2859 |
629 private: | 2860 private: |
630 static const char* names_[kNumDoubleRegisters]; | 2861 static const char* names_[kNumDoubleRegisters]; |
631 }; | 2862 }; |
632 } // namespace internal | 2863 } // namespace internal |
633 } // namespace v8 | 2864 } // namespace v8 |
634 | 2865 |
635 #endif // V8_PPC_CONSTANTS_PPC_H_ | 2866 #endif // V8_PPC_CONSTANTS_PPC_H_ |
OLD | NEW |