OLD | NEW |
1 #! /usr/bin/env python | 1 #! /usr/bin/env python |
2 # x86 instructions and prefixes data and code generation | 2 # x86 instructions and prefixes data and code generation |
3 # | 3 # |
4 # Copyright (C) 2002-2007 Peter Johnson | 4 # Copyright (C) 2002-2007 Peter Johnson |
5 # | 5 # |
6 # Redistribution and use in source and binary forms, with or without | 6 # Redistribution and use in source and binary forms, with or without |
7 # modification, are permitted provided that the following conditions | 7 # modification, are permitted provided that the following conditions |
8 # are met: | 8 # are met: |
9 # 1. Redistributions of source code must retain the above copyright | 9 # 1. Redistributions of source code must retain the above copyright |
10 # notice, this list of conditions and the following disclaimer. | 10 # notice, this list of conditions and the following disclaimer. |
11 # 2. Redistributions in binary form must reproduce the above copyright | 11 # 2. Redistributions in binary form must reproduce the above copyright |
12 # notice, this list of conditions and the following disclaimer in the | 12 # notice, this list of conditions and the following disclaimer in the |
13 # documentation and/or other materials provided with the distribution. | 13 # documentation and/or other materials provided with the distribution. |
14 # | 14 # |
15 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND OTHER CONTRIBUTORS ``AS IS'' | 15 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND OTHER CONTRIBUTORS ``AS IS'' |
16 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 16 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
17 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 17 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
18 # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR OTHER CONTRIBUTORS BE | 18 # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR OTHER CONTRIBUTORS BE |
19 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 19 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
20 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 20 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
21 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 21 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
22 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 22 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
23 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 23 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
24 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 24 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
25 # POSSIBILITY OF SUCH DAMAGE. | 25 # POSSIBILITY OF SUCH DAMAGE. |
26 # | 26 # |
27 # NOTE: operands are arranged in NASM / Intel order (e.g. dest, src) | 27 # NOTE: operands are arranged in NASM / Intel order (e.g. dest, src) |
28 | 28 |
29 from sys import stdout, version_info | |
30 | |
31 rcstag = "$Id: gen_x86_insn.py 2346 2010-08-01 01:37:37Z peter $" | |
32 | |
33 import os | 29 import os |
34 import sys | 30 import sys |
35 | 31 |
36 try: | 32 from sys import stdout, version_info |
37 scriptname = rcstag.split()[1] | 33 |
38 scriptrev = rcstag.split()[2] | 34 scriptname = "gen_x86_insn.py" |
39 except IndexError: | 35 scriptrev = "HEAD" |
40 scriptname = "gen_x86_insn.py" | |
41 scriptrev = "HEAD" | |
42 | 36 |
43 ordered_cpus = [ | 37 ordered_cpus = [ |
44 "086", "186", "286", "386", "486", "586", "686", "K6", "Athlon", "P3", | 38 "086", "186", "286", "386", "486", "586", "686", "K6", "Athlon", "P3", |
45 "P4", "IA64", "Hammer"] | 39 "P4", "IA64", "Hammer"] |
46 ordered_cpu_features = [ | 40 ordered_cpu_features = [ |
47 "FPU", "Cyrix", "AMD", "MMX", "3DNow", "SMM", "SSE", "SSE2", | 41 "FPU", "Cyrix", "AMD", "MMX", "3DNow", "SMM", "SSE", "SSE2", |
48 "SSE3", "SVM", "PadLock", "SSSE3", "SSE41", "SSE42", "SSE4a", "SSE5", | 42 "SSE3", "SVM", "PadLock", "SSSE3", "SSE41", "SSE42", "SSE4a", "SSE5", |
49 "AVX", "FMA", "AES", "CLMUL", "MOVBE", "XOP", "FMA4", "F16C", | 43 "AVX", "FMA", "AES", "CLMUL", "MOVBE", "XOP", "FMA4", "F16C", |
50 "FSGSBASE", "RDRAND", "XSAVEOPT", "EPTVPID", "SMX"] | 44 "FSGSBASE", "RDRAND", "XSAVEOPT", "EPTVPID", "SMX", "AVX2", "BMI1", |
| 45 "BMI2", "INVPCID", "LZCNT"] |
51 unordered_cpu_features = ["Priv", "Prot", "Undoc", "Obs"] | 46 unordered_cpu_features = ["Priv", "Prot", "Undoc", "Obs"] |
52 | 47 |
53 # Predefined VEX prefix field values | 48 # Predefined VEX prefix field values |
54 VEXW0 = 0xC0 | 49 VEXW0 = 0xC0 |
55 VEXW1 = 0xC8 | 50 VEXW1 = 0xC8 |
56 VEXL0 = 0xC0 | 51 VEXL0 = 0xC0 |
57 VEXL1 = 0xC4 | 52 VEXL1 = 0xC4 |
58 VEXpp = 0xC0 # OR with value | 53 VEXpp = 0xC0 # OR with value |
59 | 54 |
60 # Predefined XOP prefix field values | 55 # Predefined XOP prefix field values |
(...skipping 496 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
557 if form.gen_suffix and form.suffixes is not None: | 552 if form.gen_suffix and form.suffixes is not None: |
558 suffixes |= form.suffixes | 553 suffixes |= form.suffixes |
559 | 554 |
560 if not suffixes: | 555 if not suffixes: |
561 suffixes.add("Z") | 556 suffixes.add("Z") |
562 for suffix in suffixes: | 557 for suffix in suffixes: |
563 if suffix == "Z": | 558 if suffix == "Z": |
564 keyword = name | 559 keyword = name |
565 else: | 560 else: |
566 keyword = name+suffix | 561 keyword = name+suffix |
| 562 keyword = keyword.lower() |
567 if keyword in gas_insns: | 563 if keyword in gas_insns: |
568 raise ValueError("duplicate gas instruction %s" % | 564 raise ValueError("duplicate gas instruction %s" % |
569 keyword) | 565 keyword) |
570 newinsn = insn.copy() | 566 newinsn = insn.copy() |
571 newinsn.suffix = suffix | 567 if insn.suffix is None: |
| 568 newinsn.suffix = suffix |
572 newinsn.auto_cpu("gas") | 569 newinsn.auto_cpu("gas") |
573 newinsn.auto_misc_flags("gas") | 570 newinsn.auto_misc_flags("gas") |
574 gas_insns[keyword] = newinsn | 571 gas_insns[keyword] = newinsn |
575 | 572 |
576 if "nasm" in parsers: | 573 if "nasm" in parsers: |
577 keyword = name | 574 keyword = name |
578 if keyword in nasm_insns: | 575 if keyword in nasm_insns: |
579 raise ValueError("duplicate nasm instruction %s" % keyword) | 576 raise ValueError("duplicate nasm instruction %s" % keyword) |
580 newinsn = insn.copy() | 577 newinsn = insn.copy() |
581 newinsn.auto_cpu("nasm") | 578 newinsn.auto_cpu("nasm") |
(...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1128 operands=[Operand(type="Reg", size=64, dest="Spare"), | 1125 operands=[Operand(type="Reg", size=64, dest="Spare"), |
1129 Operand(type="RM", size=32, dest="EA")]) | 1126 Operand(type="RM", size=32, dest="EA")]) |
1130 | 1127 |
1131 add_insn("movslq", "movsxd", suffix="l") | 1128 add_insn("movslq", "movsxd", suffix="l") |
1132 add_insn("movsxd", "movsxd", parser="nasm") | 1129 add_insn("movsxd", "movsxd", parser="nasm") |
1133 | 1130 |
1134 # | 1131 # |
1135 # Push instructions | 1132 # Push instructions |
1136 # | 1133 # |
1137 add_group("push", | 1134 add_group("push", |
| 1135 def_opersize_64=64, |
| 1136 opcode=[0x50], |
| 1137 operands=[Operand(type="Reg", size="BITS", dest="Op0Add")]) |
| 1138 add_group("push", |
1138 suffix="w", | 1139 suffix="w", |
1139 opersize=16, | 1140 opersize=16, |
1140 def_opersize_64=64, | 1141 def_opersize_64=64, |
1141 opcode=[0x50], | 1142 opcode=[0x50], |
1142 operands=[Operand(type="Reg", size=16, dest="Op0Add")]) | 1143 operands=[Operand(type="Reg", size=16, dest="Op0Add")]) |
1143 add_group("push", | 1144 add_group("push", |
1144 suffix="l", | 1145 suffix="l", |
1145 not64=True, | 1146 not64=True, |
1146 opersize=32, | 1147 opersize=32, |
1147 opcode=[0x50], | 1148 opcode=[0x50], |
1148 operands=[Operand(type="Reg", size=32, dest="Op0Add")]) | 1149 operands=[Operand(type="Reg", size=32, dest="Op0Add")]) |
1149 add_group("push", | 1150 add_group("push", |
1150 suffix="q", | 1151 suffix="q", |
| 1152 only64=True, |
1151 def_opersize_64=64, | 1153 def_opersize_64=64, |
1152 opcode=[0x50], | 1154 opcode=[0x50], |
1153 operands=[Operand(type="Reg", size=64, dest="Op0Add")]) | 1155 operands=[Operand(type="Reg", size=64, dest="Op0Add")]) |
| 1156 |
| 1157 add_group("push", |
| 1158 def_opersize_64=64, |
| 1159 opcode=[0xFF], |
| 1160 spare=6, |
| 1161 operands=[Operand(type="RM", size="BITS", dest="EA")]) |
1154 add_group("push", | 1162 add_group("push", |
1155 suffix="w", | 1163 suffix="w", |
1156 opersize=16, | 1164 opersize=16, |
1157 def_opersize_64=64, | 1165 def_opersize_64=64, |
1158 opcode=[0xFF], | 1166 opcode=[0xFF], |
1159 spare=6, | 1167 spare=6, |
1160 operands=[Operand(type="RM", size=16, dest="EA")]) | 1168 operands=[Operand(type="RM", size=16, dest="EA")]) |
1161 add_group("push", | 1169 add_group("push", |
1162 suffix="l", | 1170 suffix="l", |
1163 not64=True, | 1171 not64=True, |
1164 opersize=32, | 1172 opersize=32, |
1165 opcode=[0xFF], | 1173 opcode=[0xFF], |
1166 spare=6, | 1174 spare=6, |
1167 operands=[Operand(type="RM", size=32, dest="EA")]) | 1175 operands=[Operand(type="RM", size=32, dest="EA")]) |
1168 add_group("push", | 1176 add_group("push", |
1169 suffix="q", | 1177 suffix="q", |
| 1178 only64=True, |
1170 def_opersize_64=64, | 1179 def_opersize_64=64, |
1171 opcode=[0xFF], | 1180 opcode=[0xFF], |
1172 spare=6, | 1181 spare=6, |
1173 operands=[Operand(type="RM", size=64, dest="EA")]) | 1182 operands=[Operand(type="RM", size=64, dest="EA")]) |
| 1183 |
1174 add_group("push", | 1184 add_group("push", |
1175 cpu=["186"], | 1185 cpu=["186"], |
1176 parsers=["nasm"], | 1186 parsers=["nasm"], |
1177 def_opersize_64=64, | 1187 def_opersize_64=64, |
1178 opcode=[0x6A], | 1188 opcode=[0x6A], |
1179 operands=[Operand(type="Imm", size=8, dest="SImm")]) | 1189 operands=[Operand(type="Imm", size=8, dest="SImm")]) |
1180 add_group("push", | 1190 add_group("push", |
1181 cpu=["186"], | 1191 cpu=["186"], |
1182 parsers=["gas"], | 1192 parsers=["gas"], |
1183 def_opersize_64=64, | 1193 def_opersize_64=64, |
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1335 add_insn("pushad", "onebyte", parser="nasm", modifiers=[0x60, 32], | 1345 add_insn("pushad", "onebyte", parser="nasm", modifiers=[0x60, 32], |
1336 cpu=["386"], not64=True) | 1346 cpu=["386"], not64=True) |
1337 add_insn("pushal", "onebyte", parser="gas", modifiers=[0x60, 32], | 1347 add_insn("pushal", "onebyte", parser="gas", modifiers=[0x60, 32], |
1338 cpu=["386"], not64=True) | 1348 cpu=["386"], not64=True) |
1339 add_insn("pushaw", "onebyte", modifiers=[0x60, 16], cpu=["186"], not64=True) | 1349 add_insn("pushaw", "onebyte", modifiers=[0x60, 16], cpu=["186"], not64=True) |
1340 | 1350 |
1341 # | 1351 # |
1342 # Pop instructions | 1352 # Pop instructions |
1343 # | 1353 # |
1344 add_group("pop", | 1354 add_group("pop", |
| 1355 def_opersize_64=64, |
| 1356 opcode=[0x58], |
| 1357 operands=[Operand(type="Reg", size="BITS", dest="Op0Add")]) |
| 1358 add_group("pop", |
1345 suffix="w", | 1359 suffix="w", |
1346 opersize=16, | 1360 opersize=16, |
1347 def_opersize_64=64, | 1361 def_opersize_64=64, |
1348 opcode=[0x58], | 1362 opcode=[0x58], |
1349 operands=[Operand(type="Reg", size=16, dest="Op0Add")]) | 1363 operands=[Operand(type="Reg", size=16, dest="Op0Add")]) |
1350 add_group("pop", | 1364 add_group("pop", |
1351 suffix="l", | 1365 suffix="l", |
1352 not64=True, | 1366 not64=True, |
1353 opersize=32, | 1367 opersize=32, |
1354 opcode=[0x58], | 1368 opcode=[0x58], |
1355 operands=[Operand(type="Reg", size=32, dest="Op0Add")]) | 1369 operands=[Operand(type="Reg", size=32, dest="Op0Add")]) |
1356 add_group("pop", | 1370 add_group("pop", |
1357 suffix="q", | 1371 suffix="q", |
| 1372 only64=True, |
1358 def_opersize_64=64, | 1373 def_opersize_64=64, |
1359 opcode=[0x58], | 1374 opcode=[0x58], |
1360 operands=[Operand(type="Reg", size=64, dest="Op0Add")]) | 1375 operands=[Operand(type="Reg", size=64, dest="Op0Add")]) |
| 1376 |
| 1377 add_group("pop", |
| 1378 def_opersize_64=64, |
| 1379 opcode=[0x8F], |
| 1380 operands=[Operand(type="RM", size="BITS", dest="EA")]) |
1361 add_group("pop", | 1381 add_group("pop", |
1362 suffix="w", | 1382 suffix="w", |
1363 opersize=16, | 1383 opersize=16, |
1364 def_opersize_64=64, | 1384 def_opersize_64=64, |
1365 opcode=[0x8F], | 1385 opcode=[0x8F], |
1366 operands=[Operand(type="RM", size=16, dest="EA")]) | 1386 operands=[Operand(type="RM", size=16, dest="EA")]) |
1367 add_group("pop", | 1387 add_group("pop", |
1368 suffix="l", | 1388 suffix="l", |
1369 not64=True, | 1389 not64=True, |
1370 opersize=32, | 1390 opersize=32, |
1371 opcode=[0x8F], | 1391 opcode=[0x8F], |
1372 operands=[Operand(type="RM", size=32, dest="EA")]) | 1392 operands=[Operand(type="RM", size=32, dest="EA")]) |
1373 add_group("pop", | 1393 add_group("pop", |
1374 suffix="q", | 1394 suffix="q", |
| 1395 only64=True, |
1375 def_opersize_64=64, | 1396 def_opersize_64=64, |
1376 opcode=[0x8F], | 1397 opcode=[0x8F], |
1377 operands=[Operand(type="RM", size=64, dest="EA")]) | 1398 operands=[Operand(type="RM", size=64, dest="EA")]) |
| 1399 |
1378 # POP CS is debateably valid on the 8086, if obsolete and undocumented. | 1400 # POP CS is debateably valid on the 8086, if obsolete and undocumented. |
1379 # We don't include it because it's VERY unlikely it will ever be used | 1401 # We don't include it because it's VERY unlikely it will ever be used |
1380 # anywhere. If someone really wants it they can db 0x0F it. | 1402 # anywhere. If someone really wants it they can db 0x0F it. |
1381 #add_group("pop", | 1403 #add_group("pop", |
1382 # cpu=["Undoc", "Obs"], | 1404 # cpu=["Undoc", "Obs"], |
1383 # opcode=[0x0F], | 1405 # opcode=[0x0F], |
1384 # operands=[Operand(type="CS", dest=None)]) | 1406 # operands=[Operand(type="CS", dest=None)]) |
1385 add_group("pop", | 1407 add_group("pop", |
1386 not64=True, | 1408 not64=True, |
1387 opcode=[0x17], | 1409 opcode=[0x17], |
(...skipping 336 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1724 not64=True, | 1746 not64=True, |
1725 modifiers=["Op0Add"], | 1747 modifiers=["Op0Add"], |
1726 opersize=sz, | 1748 opersize=sz, |
1727 opcode=[0x00], | 1749 opcode=[0x00], |
1728 operands=[Operand(type="Reg", size=sz, dest="Spare"), | 1750 operands=[Operand(type="Reg", size=sz, dest="Spare"), |
1729 Operand(type="Mem", relaxed=True, dest="EA")]) | 1751 Operand(type="Mem", relaxed=True, dest="EA")]) |
1730 | 1752 |
1731 add_insn("lds", "ldes", modifiers=[0xC5]) | 1753 add_insn("lds", "ldes", modifiers=[0xC5]) |
1732 add_insn("les", "ldes", modifiers=[0xC4]) | 1754 add_insn("les", "ldes", modifiers=[0xC4]) |
1733 | 1755 |
1734 for sfx, sz in zip("wl", [16, 32]): | 1756 for sfx, sz in zip("wlq", [16, 32, 64]): |
1735 add_group("lfgss", | 1757 add_group("lfgss", |
1736 suffix=sfx, | 1758 suffix=sfx, |
1737 cpu=["386"], | 1759 cpu=["386"], |
1738 modifiers=["Op1Add"], | 1760 modifiers=["Op1Add"], |
1739 opersize=sz, | 1761 opersize=sz, |
1740 opcode=[0x0F, 0x00], | 1762 opcode=[0x0F, 0x00], |
1741 operands=[Operand(type="Reg", size=sz, dest="Spare"), | 1763 operands=[Operand(type="Reg", size=sz, dest="Spare"), |
1742 Operand(type="Mem", relaxed=True, dest="EA")]) | 1764 Operand(type="Mem", relaxed=True, dest="EA")]) |
1743 | 1765 |
1744 add_insn("lfs", "lfgss", modifiers=[0xB4]) | 1766 add_insn("lfs", "lfgss", modifiers=[0xB4]) |
(...skipping 1331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3076 operands=[Operand(type="Reg", size=sz, dest="Spare"), | 3098 operands=[Operand(type="Reg", size=sz, dest="Spare"), |
3077 Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) | 3099 Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) |
3078 | 3100 |
3079 add_insn("bound", "bound") | 3101 add_insn("bound", "bound") |
3080 add_insn("hlt", "onebyte", modifiers=[0xF4], cpu=["Priv"]) | 3102 add_insn("hlt", "onebyte", modifiers=[0xF4], cpu=["Priv"]) |
3081 add_insn("nop", "onebyte", modifiers=[0x90]) | 3103 add_insn("nop", "onebyte", modifiers=[0x90]) |
3082 | 3104 |
3083 # | 3105 # |
3084 # Protection control | 3106 # Protection control |
3085 # | 3107 # |
3086 add_insn("lar", "bsfr", modifiers=[0x02], cpu=["286", "Prot"]) | 3108 for sfx, sz, sz2 in zip("wlq", [16, 32, 64], [16, 32, 32]): |
3087 add_insn("lsl", "bsfr", modifiers=[0x03], cpu=["286", "Prot"]) | 3109 add_group("larlsl", |
| 3110 suffix=sfx, |
| 3111 modifiers=["Op1Add"], |
| 3112 opersize=sz, |
| 3113 opcode=[0x0F, 0x00], |
| 3114 operands=[Operand(type="Reg", size=sz, dest="Spare"), |
| 3115 Operand(type="Reg", size=sz2, dest="EA")]) |
| 3116 add_group("larlsl", |
| 3117 suffix=sfx, |
| 3118 modifiers=["Op1Add"], |
| 3119 opersize=sz, |
| 3120 opcode=[0x0F, 0x00], |
| 3121 operands=[Operand(type="Reg", size=sz, dest="Spare"), |
| 3122 Operand(type="RM", size=16, relaxed=True, dest="EA")]) |
| 3123 |
| 3124 add_insn("lar", "larlsl", modifiers=[0x02], cpu=["286", "Prot"]) |
| 3125 add_insn("lsl", "larlsl", modifiers=[0x03], cpu=["286", "Prot"]) |
3088 | 3126 |
3089 add_group("arpl", | 3127 add_group("arpl", |
3090 suffix="w", | 3128 suffix="w", |
3091 cpu=["Prot", "286"], | 3129 cpu=["Prot", "286"], |
3092 not64=True, | 3130 not64=True, |
3093 opcode=[0x63], | 3131 opcode=[0x63], |
3094 operands=[Operand(type="RM", size=16, relaxed=True, dest="EA"), | 3132 operands=[Operand(type="RM", size=16, relaxed=True, dest="EA"), |
3095 Operand(type="Reg", size=16, dest="Spare")]) | 3133 Operand(type="Reg", size=16, dest="Spare")]) |
3096 | 3134 |
3097 add_insn("arpl", "arpl") | 3135 add_insn("arpl", "arpl") |
(...skipping 852 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3950 add_insn("psubusw", "mmxsse2", modifiers=[0xD9]) | 3988 add_insn("psubusw", "mmxsse2", modifiers=[0xD9]) |
3951 add_insn("punpckhbw", "mmxsse2", modifiers=[0x68]) | 3989 add_insn("punpckhbw", "mmxsse2", modifiers=[0x68]) |
3952 add_insn("punpckhwd", "mmxsse2", modifiers=[0x69]) | 3990 add_insn("punpckhwd", "mmxsse2", modifiers=[0x69]) |
3953 add_insn("punpckhdq", "mmxsse2", modifiers=[0x6A]) | 3991 add_insn("punpckhdq", "mmxsse2", modifiers=[0x6A]) |
3954 add_insn("punpcklbw", "mmxsse2", modifiers=[0x60]) | 3992 add_insn("punpcklbw", "mmxsse2", modifiers=[0x60]) |
3955 add_insn("punpcklwd", "mmxsse2", modifiers=[0x61]) | 3993 add_insn("punpcklwd", "mmxsse2", modifiers=[0x61]) |
3956 add_insn("punpckldq", "mmxsse2", modifiers=[0x62]) | 3994 add_insn("punpckldq", "mmxsse2", modifiers=[0x62]) |
3957 add_insn("pxor", "mmxsse2", modifiers=[0xEF]) | 3995 add_insn("pxor", "mmxsse2", modifiers=[0xEF]) |
3958 | 3996 |
3959 # AVX versions don't support the MMX registers | 3997 # AVX versions don't support the MMX registers |
3960 add_insn("vpackssdw", "xmm_xmm128", modifiers=[0x66, 0x6B, VEXL0], avx=True) | 3998 add_insn("vpackssdw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x6B, VEXL0], avx=
True) |
3961 add_insn("vpacksswb", "xmm_xmm128", modifiers=[0x66, 0x63, VEXL0], avx=True) | 3999 add_insn("vpacksswb", "xmm_xmm128_256avx2", modifiers=[0x66, 0x63, VEXL0], avx=
True) |
3962 add_insn("vpackuswb", "xmm_xmm128", modifiers=[0x66, 0x67, VEXL0], avx=True) | 4000 add_insn("vpackuswb", "xmm_xmm128_256avx2", modifiers=[0x66, 0x67, VEXL0], avx=
True) |
3963 add_insn("vpaddb", "xmm_xmm128", modifiers=[0x66, 0xFC, VEXL0], avx=True) | 4001 add_insn("vpaddb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFC, VEXL0], avx=
True) |
3964 add_insn("vpaddw", "xmm_xmm128", modifiers=[0x66, 0xFD, VEXL0], avx=True) | 4002 add_insn("vpaddw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFD, VEXL0], avx=
True) |
3965 add_insn("vpaddd", "xmm_xmm128", modifiers=[0x66, 0xFE, VEXL0], avx=True) | 4003 add_insn("vpaddd", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFE, VEXL0], avx=
True) |
3966 add_insn("vpaddq", "xmm_xmm128", modifiers=[0x66, 0xD4, VEXL0], avx=True) | 4004 add_insn("vpaddq", "xmm_xmm128_256avx2", modifiers=[0x66, 0xD4, VEXL0], avx=
True) |
3967 add_insn("vpaddsb", "xmm_xmm128", modifiers=[0x66, 0xEC, VEXL0], avx=True) | 4005 add_insn("vpaddsb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEC, VEXL0], avx=
True) |
3968 add_insn("vpaddsw", "xmm_xmm128", modifiers=[0x66, 0xED, VEXL0], avx=True) | 4006 add_insn("vpaddsw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xED, VEXL0], avx=
True) |
3969 add_insn("vpaddusb", "xmm_xmm128", modifiers=[0x66, 0xDC, VEXL0], avx=True) | 4007 add_insn("vpaddusb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDC, VEXL0], avx=
True) |
3970 add_insn("vpaddusw", "xmm_xmm128", modifiers=[0x66, 0xDD, VEXL0], avx=True) | 4008 add_insn("vpaddusw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDD, VEXL0], avx=
True) |
3971 add_insn("vpand", "xmm_xmm128", modifiers=[0x66, 0xDB, VEXL0], avx=True) | 4009 add_insn("vpand", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDB, VEXL0], avx=
True) |
3972 add_insn("vpandn", "xmm_xmm128", modifiers=[0x66, 0xDF, VEXL0], avx=True) | 4010 add_insn("vpandn", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDF, VEXL0], avx=
True) |
3973 add_insn("vpcmpeqb", "xmm_xmm128", modifiers=[0x66, 0x74, VEXL0], avx=True) | 4011 add_insn("vpcmpeqb", "xmm_xmm128_256avx2", modifiers=[0x66, 0x74, VEXL0], avx=
True) |
3974 add_insn("vpcmpeqw", "xmm_xmm128", modifiers=[0x66, 0x75, VEXL0], avx=True) | 4012 add_insn("vpcmpeqw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x75, VEXL0], avx=
True) |
3975 add_insn("vpcmpeqd", "xmm_xmm128", modifiers=[0x66, 0x76, VEXL0], avx=True) | 4013 add_insn("vpcmpeqd", "xmm_xmm128_256avx2", modifiers=[0x66, 0x76, VEXL0], avx=
True) |
3976 add_insn("vpcmpgtb", "xmm_xmm128", modifiers=[0x66, 0x64, VEXL0], avx=True) | 4014 add_insn("vpcmpgtb", "xmm_xmm128_256avx2", modifiers=[0x66, 0x64, VEXL0], avx=
True) |
3977 add_insn("vpcmpgtw", "xmm_xmm128", modifiers=[0x66, 0x65, VEXL0], avx=True) | 4015 add_insn("vpcmpgtw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x65, VEXL0], avx=
True) |
3978 add_insn("vpcmpgtd", "xmm_xmm128", modifiers=[0x66, 0x66, VEXL0], avx=True) | 4016 add_insn("vpcmpgtd", "xmm_xmm128_256avx2", modifiers=[0x66, 0x66, VEXL0], avx=
True) |
3979 add_insn("vpmaddwd", "xmm_xmm128", modifiers=[0x66, 0xF5, VEXL0], avx=True) | 4017 add_insn("vpmaddwd", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF5, VEXL0], avx=
True) |
3980 add_insn("vpmulhw", "xmm_xmm128", modifiers=[0x66, 0xE5, VEXL0], avx=True) | 4018 add_insn("vpmulhw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE5, VEXL0], avx=
True) |
3981 add_insn("vpmullw", "xmm_xmm128", modifiers=[0x66, 0xD5, VEXL0], avx=True) | 4019 add_insn("vpmullw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xD5, VEXL0], avx=
True) |
3982 add_insn("vpor", "xmm_xmm128", modifiers=[0x66, 0xEB, VEXL0], avx=True) | 4020 add_insn("vpor", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEB, VEXL0], avx=
True) |
3983 add_insn("vpsubb", "xmm_xmm128", modifiers=[0x66, 0xF8, VEXL0], avx=True) | 4021 add_insn("vpsubb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF8, VEXL0], avx=
True) |
3984 add_insn("vpsubw", "xmm_xmm128", modifiers=[0x66, 0xF9, VEXL0], avx=True) | 4022 add_insn("vpsubw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF9, VEXL0], avx=
True) |
3985 add_insn("vpsubd", "xmm_xmm128", modifiers=[0x66, 0xFA, VEXL0], avx=True) | 4023 add_insn("vpsubd", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFA, VEXL0], avx=
True) |
3986 add_insn("vpsubq", "xmm_xmm128", modifiers=[0x66, 0xFB, VEXL0], avx=True) | 4024 add_insn("vpsubq", "xmm_xmm128_256avx2", modifiers=[0x66, 0xFB, VEXL0], avx=
True) |
3987 add_insn("vpsubsb", "xmm_xmm128", modifiers=[0x66, 0xE8, VEXL0], avx=True) | 4025 add_insn("vpsubsb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE8, VEXL0], avx=
True) |
3988 add_insn("vpsubsw", "xmm_xmm128", modifiers=[0x66, 0xE9, VEXL0], avx=True) | 4026 add_insn("vpsubsw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE9, VEXL0], avx=
True) |
3989 add_insn("vpsubusb", "xmm_xmm128", modifiers=[0x66, 0xD8, VEXL0], avx=True) | 4027 add_insn("vpsubusb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xD8, VEXL0], avx=
True) |
3990 add_insn("vpsubusw", "xmm_xmm128", modifiers=[0x66, 0xD9, VEXL0], avx=True) | 4028 add_insn("vpsubusw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xD9, VEXL0], avx=
True) |
3991 add_insn("vpunpckhbw", "xmm_xmm128", modifiers=[0x66, 0x68, VEXL0], avx=True) | 4029 add_insn("vpunpckhbw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x68, VEXL0], avx=
True) |
3992 add_insn("vpunpckhwd", "xmm_xmm128", modifiers=[0x66, 0x69, VEXL0], avx=True) | 4030 add_insn("vpunpckhwd", "xmm_xmm128_256avx2", modifiers=[0x66, 0x69, VEXL0], avx=
True) |
3993 add_insn("vpunpckhdq", "xmm_xmm128", modifiers=[0x66, 0x6A, VEXL0], avx=True) | 4031 add_insn("vpunpckhdq", "xmm_xmm128_256avx2", modifiers=[0x66, 0x6A, VEXL0], avx=
True) |
3994 add_insn("vpunpcklbw", "xmm_xmm128", modifiers=[0x66, 0x60, VEXL0], avx=True) | 4032 add_insn("vpunpcklbw", "xmm_xmm128_256avx2", modifiers=[0x66, 0x60, VEXL0], avx=
True) |
3995 add_insn("vpunpcklwd", "xmm_xmm128", modifiers=[0x66, 0x61, VEXL0], avx=True) | 4033 add_insn("vpunpcklwd", "xmm_xmm128_256avx2", modifiers=[0x66, 0x61, VEXL0], avx=
True) |
3996 add_insn("vpunpckldq", "xmm_xmm128", modifiers=[0x66, 0x62, VEXL0], avx=True) | 4034 add_insn("vpunpckldq", "xmm_xmm128_256avx2", modifiers=[0x66, 0x62, VEXL0], avx=
True) |
3997 add_insn("vpxor", "xmm_xmm128", modifiers=[0x66, 0xEF, VEXL0], avx=True) | 4035 add_insn("vpxor", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEF, VEXL0], avx=
True) |
3998 | 4036 |
3999 add_group("pshift", | 4037 add_group("pshift", |
4000 cpu=["MMX"], | 4038 cpu=["MMX"], |
4001 modifiers=["Op1Add"], | 4039 modifiers=["Op1Add"], |
4002 opcode=[0x0F, 0x00], | 4040 opcode=[0x0F, 0x00], |
4003 operands=[Operand(type="SIMDReg", size=64, dest="Spare"), | 4041 operands=[Operand(type="SIMDReg", size=64, dest="Spare"), |
4004 Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) | 4042 Operand(type="SIMDRM", size=64, relaxed=True, dest="EA")]) |
4005 add_group("pshift", | 4043 add_group("pshift", |
4006 cpu=["MMX"], | 4044 cpu=["MMX"], |
4007 modifiers=["Gap", "Op1Add", "SpAdd"], | 4045 modifiers=["Gap", "Op1Add", "SpAdd"], |
(...skipping 20 matching lines...) Expand all Loading... |
4028 add_insn("psllw", "pshift", modifiers=[0xF1, 0x71, 6]) | 4066 add_insn("psllw", "pshift", modifiers=[0xF1, 0x71, 6]) |
4029 add_insn("pslld", "pshift", modifiers=[0xF2, 0x72, 6]) | 4067 add_insn("pslld", "pshift", modifiers=[0xF2, 0x72, 6]) |
4030 add_insn("psllq", "pshift", modifiers=[0xF3, 0x73, 6]) | 4068 add_insn("psllq", "pshift", modifiers=[0xF3, 0x73, 6]) |
4031 add_insn("psraw", "pshift", modifiers=[0xE1, 0x71, 4]) | 4069 add_insn("psraw", "pshift", modifiers=[0xE1, 0x71, 4]) |
4032 add_insn("psrad", "pshift", modifiers=[0xE2, 0x72, 4]) | 4070 add_insn("psrad", "pshift", modifiers=[0xE2, 0x72, 4]) |
4033 add_insn("psrlw", "pshift", modifiers=[0xD1, 0x71, 2]) | 4071 add_insn("psrlw", "pshift", modifiers=[0xD1, 0x71, 2]) |
4034 add_insn("psrld", "pshift", modifiers=[0xD2, 0x72, 2]) | 4072 add_insn("psrld", "pshift", modifiers=[0xD2, 0x72, 2]) |
4035 add_insn("psrlq", "pshift", modifiers=[0xD3, 0x73, 2]) | 4073 add_insn("psrlq", "pshift", modifiers=[0xD3, 0x73, 2]) |
4036 | 4074 |
4037 # Ran out of modifiers, so AVX has to be separate | 4075 # Ran out of modifiers, so AVX has to be separate |
4038 add_group("vpshift", | 4076 for cpu, sz in zip(["AVX", "AVX2"], [128, 256]): |
4039 cpu=["AVX"], | 4077 add_group("vpshift", |
4040 modifiers=["Op1Add"], | 4078 cpu=[cpu], |
4041 vex=128, | 4079 modifiers=["Op1Add"], |
4042 prefix=0x66, | 4080 vex=sz, |
4043 opcode=[0x0F, 0x00], | 4081 prefix=0x66, |
4044 operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), | 4082 opcode=[0x0F, 0x00], |
4045 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 4083 operands=[Operand(type="SIMDReg", size=sz, dest="SpareVEX"), |
4046 add_group("vpshift", | 4084 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
4047 cpu=["AVX"], | 4085 add_group("vpshift", |
4048 modifiers=["Gap", "Op1Add", "SpAdd"], | 4086 cpu=[cpu], |
4049 vex=128, | 4087 modifiers=["Gap", "Op1Add", "SpAdd"], |
4050 prefix=0x66, | 4088 vex=sz, |
4051 opcode=[0x0F, 0x00], | 4089 prefix=0x66, |
4052 spare=0, | 4090 opcode=[0x0F, 0x00], |
4053 operands=[Operand(type="SIMDReg", size=128, dest="EAVEX"), | 4091 spare=0, |
4054 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 4092 operands=[Operand(type="SIMDReg", size=sz, dest="EAVEX"), |
4055 add_group("vpshift", | 4093 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
4056 cpu=["AVX"], | 4094 add_group("vpshift", |
4057 modifiers=["Op1Add"], | 4095 cpu=[cpu], |
4058 vex=128, | 4096 modifiers=["Op1Add"], |
4059 prefix=0x66, | 4097 vex=sz, |
4060 opcode=[0x0F, 0x00], | 4098 prefix=0x66, |
4061 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 4099 opcode=[0x0F, 0x00], |
4062 Operand(type="SIMDReg", size=128, dest="VEX"), | 4100 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
4063 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 4101 Operand(type="SIMDReg", size=sz, dest="VEX"), |
4064 add_group("vpshift", | 4102 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
4065 cpu=["AVX"], | 4103 add_group("vpshift", |
4066 modifiers=["Gap", "Op1Add", "SpAdd"], | 4104 cpu=[cpu], |
4067 vex=128, | 4105 modifiers=["Gap", "Op1Add", "SpAdd"], |
4068 prefix=0x66, | 4106 vex=sz, |
4069 opcode=[0x0F, 0x00], | 4107 prefix=0x66, |
4070 spare=0, | 4108 opcode=[0x0F, 0x00], |
4071 operands=[Operand(type="SIMDReg", size=128, dest="VEX"), | 4109 spare=0, |
4072 Operand(type="SIMDReg", size=128, dest="EA"), | 4110 operands=[Operand(type="SIMDReg", size=sz, dest="VEX"), |
4073 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 4111 Operand(type="SIMDReg", size=sz, dest="EA"), |
| 4112 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
4074 | 4113 |
4075 add_insn("vpsllw", "vpshift", modifiers=[0xF1, 0x71, 6]) | 4114 add_insn("vpsllw", "vpshift", modifiers=[0xF1, 0x71, 6]) |
4076 add_insn("vpslld", "vpshift", modifiers=[0xF2, 0x72, 6]) | 4115 add_insn("vpslld", "vpshift", modifiers=[0xF2, 0x72, 6]) |
4077 add_insn("vpsllq", "vpshift", modifiers=[0xF3, 0x73, 6]) | 4116 add_insn("vpsllq", "vpshift", modifiers=[0xF3, 0x73, 6]) |
4078 add_insn("vpsraw", "vpshift", modifiers=[0xE1, 0x71, 4]) | 4117 add_insn("vpsraw", "vpshift", modifiers=[0xE1, 0x71, 4]) |
4079 add_insn("vpsrad", "vpshift", modifiers=[0xE2, 0x72, 4]) | 4118 add_insn("vpsrad", "vpshift", modifiers=[0xE2, 0x72, 4]) |
4080 add_insn("vpsrlw", "vpshift", modifiers=[0xD1, 0x71, 2]) | 4119 add_insn("vpsrlw", "vpshift", modifiers=[0xD1, 0x71, 2]) |
4081 add_insn("vpsrld", "vpshift", modifiers=[0xD2, 0x72, 2]) | 4120 add_insn("vpsrld", "vpshift", modifiers=[0xD2, 0x72, 2]) |
4082 add_insn("vpsrlq", "vpshift", modifiers=[0xD3, 0x73, 2]) | 4121 add_insn("vpsrlq", "vpshift", modifiers=[0xD3, 0x73, 2]) |
4083 | 4122 |
4084 # | 4123 # |
4085 # PIII (Katmai) new instructions / SIMD instructions | 4124 # PIII (Katmai) new instructions / SIMD instructions |
4086 # | 4125 # |
4087 add_insn("pavgb", "mmxsse2", modifiers=[0xE0], cpu=["P3", "MMX"]) | 4126 add_insn("pavgb", "mmxsse2", modifiers=[0xE0], cpu=["P3", "MMX"]) |
4088 add_insn("pavgw", "mmxsse2", modifiers=[0xE3], cpu=["P3", "MMX"]) | 4127 add_insn("pavgw", "mmxsse2", modifiers=[0xE3], cpu=["P3", "MMX"]) |
4089 add_insn("pmaxsw", "mmxsse2", modifiers=[0xEE], cpu=["P3", "MMX"]) | 4128 add_insn("pmaxsw", "mmxsse2", modifiers=[0xEE], cpu=["P3", "MMX"]) |
4090 add_insn("pmaxub", "mmxsse2", modifiers=[0xDE], cpu=["P3", "MMX"]) | 4129 add_insn("pmaxub", "mmxsse2", modifiers=[0xDE], cpu=["P3", "MMX"]) |
4091 add_insn("pminsw", "mmxsse2", modifiers=[0xEA], cpu=["P3", "MMX"]) | 4130 add_insn("pminsw", "mmxsse2", modifiers=[0xEA], cpu=["P3", "MMX"]) |
4092 add_insn("pminub", "mmxsse2", modifiers=[0xDA], cpu=["P3", "MMX"]) | 4131 add_insn("pminub", "mmxsse2", modifiers=[0xDA], cpu=["P3", "MMX"]) |
4093 add_insn("pmulhuw", "mmxsse2", modifiers=[0xE4], cpu=["P3", "MMX"]) | 4132 add_insn("pmulhuw", "mmxsse2", modifiers=[0xE4], cpu=["P3", "MMX"]) |
4094 add_insn("psadbw", "mmxsse2", modifiers=[0xF6], cpu=["P3", "MMX"]) | 4133 add_insn("psadbw", "mmxsse2", modifiers=[0xF6], cpu=["P3", "MMX"]) |
4095 | 4134 |
4096 # AVX versions don't support MMX register | 4135 # AVX versions don't support MMX register |
4097 add_insn("vpavgb", "xmm_xmm128", modifiers=[0x66, 0xE0, VEXL0], avx=True) | 4136 add_insn("vpavgb", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE0, VEXL0], avx=Tr
ue) |
4098 add_insn("vpavgw", "xmm_xmm128", modifiers=[0x66, 0xE3, VEXL0], avx=True) | 4137 add_insn("vpavgw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE3, VEXL0], avx=Tr
ue) |
4099 add_insn("vpmaxsw", "xmm_xmm128", modifiers=[0x66, 0xEE, VEXL0], avx=True) | 4138 add_insn("vpmaxsw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEE, VEXL0], avx=Tr
ue) |
4100 add_insn("vpmaxub", "xmm_xmm128", modifiers=[0x66, 0xDE, VEXL0], avx=True) | 4139 add_insn("vpmaxub", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDE, VEXL0], avx=Tr
ue) |
4101 add_insn("vpminsw", "xmm_xmm128", modifiers=[0x66, 0xEA, VEXL0], avx=True) | 4140 add_insn("vpminsw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xEA, VEXL0], avx=Tr
ue) |
4102 add_insn("vpminub", "xmm_xmm128", modifiers=[0x66, 0xDA, VEXL0], avx=True) | 4141 add_insn("vpminub", "xmm_xmm128_256avx2", modifiers=[0x66, 0xDA, VEXL0], avx=Tr
ue) |
4103 add_insn("vpmulhuw", "xmm_xmm128", modifiers=[0x66, 0xE4, VEXL0], avx=True) | 4142 add_insn("vpmulhuw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xE4, VEXL0], avx=Tr
ue) |
4104 add_insn("vpsadbw", "xmm_xmm128", modifiers=[0x66, 0xF6, VEXL0], avx=True) | 4143 add_insn("vpsadbw", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF6, VEXL0], avx=Tr
ue) |
4105 | 4144 |
4106 add_insn("prefetchnta", "twobytemem", modifiers=[0, 0x0F, 0x18], cpu=["P3"]) | 4145 add_insn("prefetchnta", "twobytemem", modifiers=[0, 0x0F, 0x18], cpu=["P3"]) |
4107 add_insn("prefetcht0", "twobytemem", modifiers=[1, 0x0F, 0x18], cpu=["P3"]) | 4146 add_insn("prefetcht0", "twobytemem", modifiers=[1, 0x0F, 0x18], cpu=["P3"]) |
4108 add_insn("prefetcht1", "twobytemem", modifiers=[2, 0x0F, 0x18], cpu=["P3"]) | 4147 add_insn("prefetcht1", "twobytemem", modifiers=[2, 0x0F, 0x18], cpu=["P3"]) |
4109 add_insn("prefetcht2", "twobytemem", modifiers=[3, 0x0F, 0x18], cpu=["P3"]) | 4148 add_insn("prefetcht2", "twobytemem", modifiers=[3, 0x0F, 0x18], cpu=["P3"]) |
4110 | 4149 |
4111 add_insn("sfence", "threebyte", modifiers=[0x0F, 0xAE, 0xF8], cpu=["P3"]) | 4150 add_insn("sfence", "threebyte", modifiers=[0x0F, 0xAE, 0xF8], cpu=["P3"]) |
4112 | 4151 |
4113 add_group("xmm_xmm128_256", | 4152 add_group("xmm_xmm128_256", |
4114 cpu=["SSE"], | 4153 cpu=["SSE"], |
(...skipping 10 matching lines...) Expand all Loading... |
4125 opcode=[0x0F, 0x00], | 4164 opcode=[0x0F, 0x00], |
4126 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 4165 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
4127 Operand(type="SIMDReg", size=128, dest="VEX"), | 4166 Operand(type="SIMDReg", size=128, dest="VEX"), |
4128 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 4167 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
4129 add_group("xmm_xmm128_256", | 4168 add_group("xmm_xmm128_256", |
4130 cpu=["AVX"], | 4169 cpu=["AVX"], |
4131 modifiers=["PreAdd", "Op1Add"], | 4170 modifiers=["PreAdd", "Op1Add"], |
4132 vex=256, | 4171 vex=256, |
4133 prefix=0x00, | 4172 prefix=0x00, |
4134 opcode=[0x0F, 0x00], | 4173 opcode=[0x0F, 0x00], |
| 4174 operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), |
| 4175 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
| 4176 add_group("xmm_xmm128_256", |
| 4177 cpu=["AVX"], |
| 4178 modifiers=["PreAdd", "Op1Add"], |
| 4179 vex=256, |
| 4180 prefix=0x00, |
| 4181 opcode=[0x0F, 0x00], |
| 4182 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 4183 Operand(type="SIMDReg", size=256, dest="VEX"), |
| 4184 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
| 4185 |
| 4186 # Same as above, except 256-bit version only available in AVX2 |
| 4187 add_group("xmm_xmm128_256avx2", |
| 4188 cpu=["SSE"], |
| 4189 modifiers=["PreAdd", "Op1Add", "SetVEX"], |
| 4190 prefix=0x00, |
| 4191 opcode=[0x0F, 0x00], |
| 4192 operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), |
| 4193 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
| 4194 add_group("xmm_xmm128_256avx2", |
| 4195 cpu=["AVX"], |
| 4196 modifiers=["PreAdd", "Op1Add"], |
| 4197 vex=128, |
| 4198 prefix=0x00, |
| 4199 opcode=[0x0F, 0x00], |
| 4200 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 4201 Operand(type="SIMDReg", size=128, dest="VEX"), |
| 4202 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
| 4203 add_group("xmm_xmm128_256avx2", |
| 4204 cpu=["AVX2"], |
| 4205 modifiers=["PreAdd", "Op1Add"], |
| 4206 vex=256, |
| 4207 prefix=0x00, |
| 4208 opcode=[0x0F, 0x00], |
| 4209 operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), |
| 4210 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
| 4211 add_group("xmm_xmm128_256avx2", |
| 4212 cpu=["AVX2"], |
| 4213 modifiers=["PreAdd", "Op1Add"], |
| 4214 vex=256, |
| 4215 prefix=0x00, |
| 4216 opcode=[0x0F, 0x00], |
4135 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 4217 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
4136 Operand(type="SIMDReg", size=256, dest="VEX"), | 4218 Operand(type="SIMDReg", size=256, dest="VEX"), |
4137 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) | 4219 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
4138 | 4220 |
4139 # Version that does not allow YMM registers | 4221 # Version that does not allow YMM registers |
4140 add_group("xmm_xmm128", | 4222 add_group("xmm_xmm128", |
4141 cpu=["SSE"], | 4223 cpu=["SSE"], |
4142 modifiers=["PreAdd", "Op1Add", "SetVEX"], | 4224 modifiers=["PreAdd", "Op1Add", "SetVEX"], |
4143 prefix=0x00, | 4225 prefix=0x00, |
4144 opcode=[0x0F, 0x00], | 4226 opcode=[0x0F, 0x00], |
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4479 cpu=["SSE"], | 4561 cpu=["SSE"], |
4480 modifiers=["PreAdd", "Op1Add", "SetVEX"], | 4562 modifiers=["PreAdd", "Op1Add", "SetVEX"], |
4481 opcode=[0x0F, 0x00], | 4563 opcode=[0x0F, 0x00], |
4482 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 4564 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
4483 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), | 4565 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
4484 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 4566 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
4485 | 4567 |
4486 add_insn("cmpps", "xmm_xmm128_imm", modifiers=[0, 0xC2]) | 4568 add_insn("cmpps", "xmm_xmm128_imm", modifiers=[0, 0xC2]) |
4487 add_insn("shufps", "xmm_xmm128_imm", modifiers=[0, 0xC6]) | 4569 add_insn("shufps", "xmm_xmm128_imm", modifiers=[0, 0xC6]) |
4488 | 4570 |
| 4571 # YMM register AVX2 version of above |
| 4572 add_group("xmm_xmm128_imm_256avx2", |
| 4573 cpu=["SSE"], |
| 4574 modifiers=["PreAdd", "Op1Add", "SetVEX"], |
| 4575 opcode=[0x0F, 0x00], |
| 4576 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 4577 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
| 4578 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 4579 add_group("xmm_xmm128_imm_256avx2", |
| 4580 cpu=["AVX2"], |
| 4581 modifiers=["PreAdd", "Op1Add"], |
| 4582 vex=256, |
| 4583 opcode=[0x0F, 0x00], |
| 4584 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 4585 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
| 4586 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 4587 |
4489 # YMM register and 4-operand version of above | 4588 # YMM register and 4-operand version of above |
4490 add_group("xmm_xmm128_imm_256", | 4589 add_group("xmm_xmm128_imm_256", |
4491 cpu=["SSE"], | 4590 cpu=["SSE"], |
4492 modifiers=["PreAdd", "Op1Add", "SetVEX"], | 4591 modifiers=["PreAdd", "Op1Add", "SetVEX"], |
4493 opcode=[0x0F, 0x00], | 4592 opcode=[0x0F, 0x00], |
4494 operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), | 4593 operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), |
4495 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), | 4594 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
4496 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 4595 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
4497 add_group("xmm_xmm128_imm_256", | 4596 add_group("xmm_xmm128_imm_256", |
4498 cpu=["AVX"], | 4597 cpu=["AVX"], |
(...skipping 445 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4944 Operand(type="SIMDReg", size=64, dest="EA")]) | 5043 Operand(type="SIMDReg", size=64, dest="EA")]) |
4945 add_group("pmovmskb", | 5044 add_group("pmovmskb", |
4946 suffix="l", | 5045 suffix="l", |
4947 cpu=["SSE2"], | 5046 cpu=["SSE2"], |
4948 modifiers=["SetVEX"], | 5047 modifiers=["SetVEX"], |
4949 prefix=0x66, | 5048 prefix=0x66, |
4950 opcode=[0x0F, 0xD7], | 5049 opcode=[0x0F, 0xD7], |
4951 operands=[Operand(type="Reg", size=32, dest="Spare"), | 5050 operands=[Operand(type="Reg", size=32, dest="Spare"), |
4952 Operand(type="SIMDReg", size=128, dest="EA")]) | 5051 Operand(type="SIMDReg", size=128, dest="EA")]) |
4953 add_group("pmovmskb", | 5052 add_group("pmovmskb", |
| 5053 suffix="l", |
| 5054 cpu=["AVX2"], |
| 5055 vex=256, |
| 5056 prefix=0x66, |
| 5057 opcode=[0x0F, 0xD7], |
| 5058 operands=[Operand(type="Reg", size=32, dest="Spare"), |
| 5059 Operand(type="SIMDReg", size=256, dest="EA")]) |
| 5060 add_group("pmovmskb", |
4954 suffix="q", | 5061 suffix="q", |
4955 cpu=["MMX", "P3"], | 5062 cpu=["MMX", "P3"], |
4956 notavx=True, | 5063 notavx=True, |
4957 opersize=64, | 5064 opersize=64, |
| 5065 def_opersize_64=64, |
4958 opcode=[0x0F, 0xD7], | 5066 opcode=[0x0F, 0xD7], |
4959 operands=[Operand(type="Reg", size=64, dest="Spare"), | 5067 operands=[Operand(type="Reg", size=64, dest="Spare"), |
4960 Operand(type="SIMDReg", size=64, dest="EA")]) | 5068 Operand(type="SIMDReg", size=64, dest="EA")]) |
4961 add_group("pmovmskb", | 5069 add_group("pmovmskb", |
4962 suffix="q", | 5070 suffix="q", |
4963 cpu=["SSE2"], | 5071 cpu=["SSE2"], |
4964 modifiers=["SetVEX"], | 5072 modifiers=["SetVEX"], |
4965 opersize=64, | 5073 opersize=64, |
| 5074 def_opersize_64=64, |
4966 prefix=0x66, | 5075 prefix=0x66, |
4967 opcode=[0x0F, 0xD7], | 5076 opcode=[0x0F, 0xD7], |
4968 operands=[Operand(type="Reg", size=64, dest="Spare"), | 5077 operands=[Operand(type="Reg", size=64, dest="Spare"), |
4969 Operand(type="SIMDReg", size=128, dest="EA")]) | 5078 Operand(type="SIMDReg", size=128, dest="EA")]) |
| 5079 add_group("pmovmskb", |
| 5080 suffix="q", |
| 5081 cpu=["SSE2"], |
| 5082 vex=256, |
| 5083 opersize=64, |
| 5084 def_opersize_64=64, |
| 5085 prefix=0x66, |
| 5086 opcode=[0x0F, 0xD7], |
| 5087 operands=[Operand(type="Reg", size=64, dest="Spare"), |
| 5088 Operand(type="SIMDReg", size=256, dest="EA")]) |
4970 | 5089 |
4971 add_insn("pmovmskb", "pmovmskb") | 5090 add_insn("pmovmskb", "pmovmskb") |
4972 add_insn("vpmovmskb", "pmovmskb", modifiers=[VEXL0], avx=True) | 5091 add_insn("vpmovmskb", "pmovmskb", modifiers=[VEXL0], avx=True) |
4973 | 5092 |
4974 add_group("pshufw", | 5093 add_group("pshufw", |
4975 cpu=["MMX", "P3"], | 5094 cpu=["MMX", "P3"], |
4976 opcode=[0x0F, 0x70], | 5095 opcode=[0x0F, 0x70], |
4977 operands=[Operand(type="SIMDReg", size=64, dest="Spare"), | 5096 operands=[Operand(type="SIMDReg", size=64, dest="Spare"), |
4978 Operand(type="SIMDRM", size=64, relaxed=True, dest="EA"), | 5097 Operand(type="SIMDRM", size=64, relaxed=True, dest="EA"), |
4979 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 5098 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
(...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5370 add_insn("pmuludq", "mmxsse2", modifiers=[0xF4], cpu=["SSE2"]) | 5489 add_insn("pmuludq", "mmxsse2", modifiers=[0xF4], cpu=["SSE2"]) |
5371 add_insn("pshufd", "xmm_xmm128_imm", modifiers=[0x66, 0x70], cpu=["SSE2"]) | 5490 add_insn("pshufd", "xmm_xmm128_imm", modifiers=[0x66, 0x70], cpu=["SSE2"]) |
5372 add_insn("pshufhw", "xmm_xmm128_imm", modifiers=[0xF3, 0x70], cpu=["SSE2"]) | 5491 add_insn("pshufhw", "xmm_xmm128_imm", modifiers=[0xF3, 0x70], cpu=["SSE2"]) |
5373 add_insn("pshuflw", "xmm_xmm128_imm", modifiers=[0xF2, 0x70], cpu=["SSE2"]) | 5492 add_insn("pshuflw", "xmm_xmm128_imm", modifiers=[0xF2, 0x70], cpu=["SSE2"]) |
5374 add_insn("punpckhqdq", "xmm_xmm128", modifiers=[0x66, 0x6D], cpu=["SSE2"]) | 5493 add_insn("punpckhqdq", "xmm_xmm128", modifiers=[0x66, 0x6D], cpu=["SSE2"]) |
5375 add_insn("punpcklqdq", "xmm_xmm128", modifiers=[0x66, 0x6C], cpu=["SSE2"]) | 5494 add_insn("punpcklqdq", "xmm_xmm128", modifiers=[0x66, 0x6C], cpu=["SSE2"]) |
5376 | 5495 |
5377 add_insn("vcvttsd2si", "cvt_rx_xmm64", modifiers=[0xF2, 0x2C, VEXL0], avx=True) | 5496 add_insn("vcvttsd2si", "cvt_rx_xmm64", modifiers=[0xF2, 0x2C, VEXL0], avx=True) |
5378 # vcvttpd2dq takes xmm, ymm combination | 5497 # vcvttpd2dq takes xmm, ymm combination |
5379 # vcvttps2dq is two-operand | 5498 # vcvttps2dq is two-operand |
5380 add_insn("vpmuludq", "xmm_xmm128", modifiers=[0x66, 0xF4, VEXL0], avx=True) | 5499 add_insn("vpmuludq", "xmm_xmm128_256avx2", modifiers=[0x66, 0xF4, VEXL0], avx=Tr
ue) |
5381 add_insn("vpshufd", "xmm_xmm128_imm", modifiers=[0x66, 0x70, VEXL0], avx=True) | 5500 add_insn("vpshufd", "xmm_xmm128_imm_256avx2", modifiers=[0x66, 0x70, VEXL0], avx
=True) |
5382 add_insn("vpshufhw", "xmm_xmm128_imm", modifiers=[0xF3, 0x70, VEXL0], avx=True) | 5501 add_insn("vpshufhw", "xmm_xmm128_imm_256avx2", modifiers=[0xF3, 0x70, VEXL0], av
x=True) |
5383 add_insn("vpshuflw", "xmm_xmm128_imm", modifiers=[0xF2, 0x70, VEXL0], avx=True) | 5502 add_insn("vpshuflw", "xmm_xmm128_imm_256avx2", modifiers=[0xF2, 0x70, VEXL0], av
x=True) |
5384 add_insn("vpunpckhqdq", "xmm_xmm128", modifiers=[0x66, 0x6D, VEXL0], avx=True) | 5503 add_insn("vpunpckhqdq", "xmm_xmm128_256avx2", modifiers=[0x66, 0x6D, VEXL0], avx
=True) |
5385 add_insn("vpunpcklqdq", "xmm_xmm128", modifiers=[0x66, 0x6C, VEXL0], avx=True) | 5504 add_insn("vpunpcklqdq", "xmm_xmm128_256avx2", modifiers=[0x66, 0x6C, VEXL0], avx
=True) |
5386 | 5505 |
5387 add_insn("cvtss2sd", "xmm_xmm32", modifiers=[0xF3, 0x5A], cpu=["SSE2"]) | 5506 add_insn("cvtss2sd", "xmm_xmm32", modifiers=[0xF3, 0x5A], cpu=["SSE2"]) |
5388 add_insn("vcvtss2sd", "xmm_xmm32", modifiers=[0xF3, 0x5A, VEXL0], avx=True) | 5507 add_insn("vcvtss2sd", "xmm_xmm32", modifiers=[0xF3, 0x5A, VEXL0], avx=True) |
5389 | 5508 |
5390 add_group("maskmovdqu", | 5509 add_group("maskmovdqu", |
5391 cpu=["SSE2"], | 5510 cpu=["SSE2"], |
5392 modifiers=["SetVEX"], | 5511 modifiers=["SetVEX"], |
5393 prefix=0x66, | 5512 prefix=0x66, |
5394 opcode=[0x0F, 0xF7], | 5513 opcode=[0x0F, 0xF7], |
5395 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 5514 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5431 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 5550 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
5432 add_group("pslrldq", | 5551 add_group("pslrldq", |
5433 cpu=["SSE2"], | 5552 cpu=["SSE2"], |
5434 modifiers=["SpAdd", "SetVEX"], | 5553 modifiers=["SpAdd", "SetVEX"], |
5435 prefix=0x66, | 5554 prefix=0x66, |
5436 opcode=[0x0F, 0x73], | 5555 opcode=[0x0F, 0x73], |
5437 spare=0, | 5556 spare=0, |
5438 operands=[Operand(type="SIMDReg", size=128, dest="VEX"), | 5557 operands=[Operand(type="SIMDReg", size=128, dest="VEX"), |
5439 Operand(type="SIMDReg", size=128, dest="EA"), | 5558 Operand(type="SIMDReg", size=128, dest="EA"), |
5440 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 5559 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 5560 add_group("pslrldq", |
| 5561 cpu=["AVX2"], |
| 5562 modifiers=["SpAdd"], |
| 5563 vex=256, |
| 5564 prefix=0x66, |
| 5565 opcode=[0x0F, 0x73], |
| 5566 spare=0, |
| 5567 operands=[Operand(type="SIMDReg", size=256, dest="EAVEX"), |
| 5568 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 5569 add_group("pslrldq", |
| 5570 cpu=["AVX2"], |
| 5571 modifiers=["SpAdd"], |
| 5572 vex=256, |
| 5573 prefix=0x66, |
| 5574 opcode=[0x0F, 0x73], |
| 5575 spare=0, |
| 5576 operands=[Operand(type="SIMDReg", size=256, dest="VEX"), |
| 5577 Operand(type="SIMDReg", size=256, dest="EA"), |
| 5578 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
5441 | 5579 |
5442 add_insn("pslldq", "pslrldq", modifiers=[7]) | 5580 add_insn("pslldq", "pslrldq", modifiers=[7]) |
5443 add_insn("psrldq", "pslrldq", modifiers=[3]) | 5581 add_insn("psrldq", "pslrldq", modifiers=[3]) |
5444 add_insn("vpslldq", "pslrldq", modifiers=[7, VEXL0], avx=True) | 5582 add_insn("vpslldq", "pslrldq", modifiers=[7, VEXL0], avx=True) |
5445 add_insn("vpsrldq", "pslrldq", modifiers=[3, VEXL0], avx=True) | 5583 add_insn("vpsrldq", "pslrldq", modifiers=[3, VEXL0], avx=True) |
5446 | 5584 |
5447 ##################################################################### | 5585 ##################################################################### |
5448 # SSE3 / PNI Prescott New Instructions instructions | 5586 # SSE3 / PNI Prescott New Instructions instructions |
5449 ##################################################################### | 5587 ##################################################################### |
5450 add_insn("addsubpd", "xmm_xmm128", modifiers=[0x66, 0xD0], cpu=["SSE3"]) | 5588 add_insn("addsubpd", "xmm_xmm128", modifiers=[0x66, 0xD0], cpu=["SSE3"]) |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5507 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 5645 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
5508 add_group("ssse3", | 5646 add_group("ssse3", |
5509 cpu=["AVX"], | 5647 cpu=["AVX"], |
5510 modifiers=["Op2Add"], | 5648 modifiers=["Op2Add"], |
5511 vex=128, | 5649 vex=128, |
5512 prefix=0x66, | 5650 prefix=0x66, |
5513 opcode=[0x0F, 0x38, 0x00], | 5651 opcode=[0x0F, 0x38, 0x00], |
5514 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 5652 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
5515 Operand(type="SIMDReg", size=128, dest="VEX"), | 5653 Operand(type="SIMDReg", size=128, dest="VEX"), |
5516 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 5654 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
| 5655 add_group("ssse3", |
| 5656 cpu=["AVX2"], |
| 5657 modifiers=["Op2Add"], |
| 5658 vex=256, |
| 5659 prefix=0x66, |
| 5660 opcode=[0x0F, 0x38, 0x00], |
| 5661 operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), |
| 5662 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
| 5663 add_group("ssse3", |
| 5664 cpu=["AVX2"], |
| 5665 modifiers=["Op2Add"], |
| 5666 vex=256, |
| 5667 prefix=0x66, |
| 5668 opcode=[0x0F, 0x38, 0x00], |
| 5669 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 5670 Operand(type="SIMDReg", size=256, dest="VEX"), |
| 5671 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
5517 | 5672 |
5518 add_insn("pshufb", "ssse3", modifiers=[0x00]) | 5673 add_insn("pshufb", "ssse3", modifiers=[0x00]) |
5519 add_insn("phaddw", "ssse3", modifiers=[0x01]) | 5674 add_insn("phaddw", "ssse3", modifiers=[0x01]) |
5520 add_insn("phaddd", "ssse3", modifiers=[0x02]) | 5675 add_insn("phaddd", "ssse3", modifiers=[0x02]) |
5521 add_insn("phaddsw", "ssse3", modifiers=[0x03]) | 5676 add_insn("phaddsw", "ssse3", modifiers=[0x03]) |
5522 add_insn("pmaddubsw", "ssse3", modifiers=[0x04]) | 5677 add_insn("pmaddubsw", "ssse3", modifiers=[0x04]) |
5523 add_insn("phsubw", "ssse3", modifiers=[0x05]) | 5678 add_insn("phsubw", "ssse3", modifiers=[0x05]) |
5524 add_insn("phsubd", "ssse3", modifiers=[0x06]) | 5679 add_insn("phsubd", "ssse3", modifiers=[0x06]) |
5525 add_insn("phsubsw", "ssse3", modifiers=[0x07]) | 5680 add_insn("phsubsw", "ssse3", modifiers=[0x07]) |
5526 add_insn("psignb", "ssse3", modifiers=[0x08]) | 5681 add_insn("psignb", "ssse3", modifiers=[0x08]) |
(...skipping 28 matching lines...) Expand all Loading... |
5555 add_group("ssse3imm", | 5710 add_group("ssse3imm", |
5556 cpu=["SSSE3"], | 5711 cpu=["SSSE3"], |
5557 modifiers=["Op2Add"], | 5712 modifiers=["Op2Add"], |
5558 prefix=0x66, | 5713 prefix=0x66, |
5559 opcode=[0x0F, 0x3A, 0x00], | 5714 opcode=[0x0F, 0x3A, 0x00], |
5560 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 5715 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
5561 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), | 5716 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
5562 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 5717 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
5563 | 5718 |
5564 add_insn("palignr", "ssse3imm", modifiers=[0x0F]) | 5719 add_insn("palignr", "ssse3imm", modifiers=[0x0F]) |
5565 add_insn("vpalignr", "sse4imm", modifiers=[0x0F, VEXL0], avx=True) | 5720 add_insn("vpalignr", "sse4imm_256avx2", modifiers=[0x0F, VEXL0], avx=True) |
5566 | 5721 |
5567 ##################################################################### | 5722 ##################################################################### |
5568 # SSE4.1 / SSE4.2 instructions | 5723 # SSE4.1 / SSE4.2 instructions |
5569 ##################################################################### | 5724 ##################################################################### |
5570 | 5725 |
5571 add_group("sse4", | 5726 add_group("sse4", |
5572 cpu=["SSE41"], | 5727 cpu=["SSE41"], |
5573 modifiers=["Op2Add", "SetVEX"], | 5728 modifiers=["Op2Add", "SetVEX"], |
5574 prefix=0x66, | 5729 prefix=0x66, |
5575 opcode=[0x0F, 0x38, 0x00], | 5730 opcode=[0x0F, 0x38, 0x00], |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5635 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 5790 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
5636 Operand(type="SIMDReg", size=128, dest="VEX"), | 5791 Operand(type="SIMDReg", size=128, dest="VEX"), |
5637 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), | 5792 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
5638 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 5793 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
5639 add_group("sse4imm_256", | 5794 add_group("sse4imm_256", |
5640 cpu=["AVX"], | 5795 cpu=["AVX"], |
5641 modifiers=["Op2Add"], | 5796 modifiers=["Op2Add"], |
5642 vex=256, | 5797 vex=256, |
5643 prefix=0x66, | 5798 prefix=0x66, |
5644 opcode=[0x0F, 0x3A, 0x00], | 5799 opcode=[0x0F, 0x3A, 0x00], |
| 5800 operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), |
| 5801 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
| 5802 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 5803 add_group("sse4imm_256", |
| 5804 cpu=["AVX"], |
| 5805 modifiers=["Op2Add"], |
| 5806 vex=256, |
| 5807 prefix=0x66, |
| 5808 opcode=[0x0F, 0x3A, 0x00], |
| 5809 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 5810 Operand(type="SIMDReg", size=256, dest="VEX"), |
| 5811 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
| 5812 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 5813 |
| 5814 # Same as above except AVX2 required for 256-bit. |
| 5815 add_group("sse4imm_256avx2", |
| 5816 cpu=["SSE41"], |
| 5817 modifiers=["Op2Add", "SetVEX"], |
| 5818 prefix=0x66, |
| 5819 opcode=[0x0F, 0x3A, 0x00], |
| 5820 operands=[Operand(type="SIMDReg", size=128, dest="SpareVEX"), |
| 5821 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
| 5822 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 5823 add_group("sse4imm_256avx2", |
| 5824 cpu=["AVX"], |
| 5825 modifiers=["Op2Add"], |
| 5826 vex=128, |
| 5827 prefix=0x66, |
| 5828 opcode=[0x0F, 0x3A, 0x00], |
| 5829 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 5830 Operand(type="SIMDReg", size=128, dest="VEX"), |
| 5831 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
| 5832 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 5833 add_group("sse4imm_256avx2", |
| 5834 cpu=["AVX2"], |
| 5835 modifiers=["Op2Add"], |
| 5836 vex=256, |
| 5837 prefix=0x66, |
| 5838 opcode=[0x0F, 0x3A, 0x00], |
| 5839 operands=[Operand(type="SIMDReg", size=256, dest="SpareVEX"), |
| 5840 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
| 5841 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 5842 add_group("sse4imm_256avx2", |
| 5843 cpu=["AVX2"], |
| 5844 modifiers=["Op2Add"], |
| 5845 vex=256, |
| 5846 prefix=0x66, |
| 5847 opcode=[0x0F, 0x3A, 0x00], |
5645 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 5848 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
5646 Operand(type="SIMDReg", size=256, dest="VEX"), | 5849 Operand(type="SIMDReg", size=256, dest="VEX"), |
5647 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), | 5850 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
5648 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 5851 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
5649 | 5852 |
5650 # Version that does not allow YMM registers | 5853 # Version that does not allow YMM registers |
5651 add_group("sse4imm", | 5854 add_group("sse4imm", |
5652 cpu=["SSE41"], | 5855 cpu=["SSE41"], |
5653 modifiers=["Op2Add", "SetVEX"], | 5856 modifiers=["Op2Add", "SetVEX"], |
5654 prefix=0x66, | 5857 prefix=0x66, |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5709 add_insn("blendps", "sse4imm", modifiers=[0x0C]) | 5912 add_insn("blendps", "sse4imm", modifiers=[0x0C]) |
5710 add_insn("dppd", "sse4imm", modifiers=[0x41]) | 5913 add_insn("dppd", "sse4imm", modifiers=[0x41]) |
5711 add_insn("dpps", "sse4imm", modifiers=[0x40]) | 5914 add_insn("dpps", "sse4imm", modifiers=[0x40]) |
5712 add_insn("mpsadbw", "sse4imm", modifiers=[0x42]) | 5915 add_insn("mpsadbw", "sse4imm", modifiers=[0x42]) |
5713 add_insn("pblendw", "sse4imm", modifiers=[0x0E]) | 5916 add_insn("pblendw", "sse4imm", modifiers=[0x0E]) |
5714 add_insn("roundpd", "sse4imm", modifiers=[0x09]) | 5917 add_insn("roundpd", "sse4imm", modifiers=[0x09]) |
5715 add_insn("roundps", "sse4imm", modifiers=[0x08]) | 5918 add_insn("roundps", "sse4imm", modifiers=[0x08]) |
5716 add_insn("roundsd", "sse4m64imm", modifiers=[0x0B]) | 5919 add_insn("roundsd", "sse4m64imm", modifiers=[0x0B]) |
5717 add_insn("roundss", "sse4m32imm", modifiers=[0x0A]) | 5920 add_insn("roundss", "sse4m32imm", modifiers=[0x0A]) |
5718 | 5921 |
5719 # vdppd, vmpsadbw, and vpblendw do not allow YMM registers | 5922 # vdppd does not allow YMM registers |
| 5923 # vmpsadbw and vpblendw do not allow YMM registers unless AVX2 |
5720 add_insn("vblendpd", "sse4imm_256", modifiers=[0x0D, VEXL0], avx=True) | 5924 add_insn("vblendpd", "sse4imm_256", modifiers=[0x0D, VEXL0], avx=True) |
5721 add_insn("vblendps", "sse4imm_256", modifiers=[0x0C, VEXL0], avx=True) | 5925 add_insn("vblendps", "sse4imm_256", modifiers=[0x0C, VEXL0], avx=True) |
5722 add_insn("vdppd", "sse4imm", modifiers=[0x41, VEXL0], avx=True) | 5926 add_insn("vdppd", "sse4imm", modifiers=[0x41, VEXL0], avx=True) |
5723 add_insn("vdpps", "sse4imm_256", modifiers=[0x40, VEXL0], avx=True) | 5927 add_insn("vdpps", "sse4imm_256", modifiers=[0x40, VEXL0], avx=True) |
5724 add_insn("vmpsadbw", "sse4imm", modifiers=[0x42, VEXL0], avx=True) | 5928 add_insn("vmpsadbw", "sse4imm_256avx2", modifiers=[0x42, VEXL0], avx=True) |
5725 add_insn("vpblendw", "sse4imm", modifiers=[0x0E, VEXL0], avx=True) | 5929 add_insn("vpblendw", "sse4imm_256avx2", modifiers=[0x0E, VEXL0], avx=True) |
5726 # vroundpd and vroundps don't add another register operand | 5930 # vroundpd and vroundps don't add another register operand |
5727 add_insn("vroundsd", "sse4m64imm", modifiers=[0x0B, VEXL0], avx=True) | 5931 add_insn("vroundsd", "sse4m64imm", modifiers=[0x0B, VEXL0], avx=True) |
5728 add_insn("vroundss", "sse4m32imm", modifiers=[0x0A, VEXL0], avx=True) | 5932 add_insn("vroundss", "sse4m32imm", modifiers=[0x0A, VEXL0], avx=True) |
5729 | 5933 |
5730 add_group("sse4xmm0", | 5934 add_group("sse4xmm0", |
5731 cpu=["SSE41"], | 5935 cpu=["SSE41"], |
5732 modifiers=["Op2Add"], | 5936 modifiers=["Op2Add"], |
5733 prefix=0x66, | 5937 prefix=0x66, |
5734 opcode=[0x0F, 0x38, 0x00], | 5938 opcode=[0x0F, 0x38, 0x00], |
5735 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 5939 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
(...skipping 29 matching lines...) Expand all Loading... |
5765 prefix=0x66, | 5969 prefix=0x66, |
5766 opcode=[0x0F, 0x3A, 0x00], | 5970 opcode=[0x0F, 0x3A, 0x00], |
5767 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 5971 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
5768 Operand(type="SIMDReg", size=256, dest="VEX"), | 5972 Operand(type="SIMDReg", size=256, dest="VEX"), |
5769 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), | 5973 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
5770 Operand(type="SIMDReg", size=256, dest="VEXImmSrc")]) | 5974 Operand(type="SIMDReg", size=256, dest="VEXImmSrc")]) |
5771 | 5975 |
5772 add_insn("vblendvpd", "avx_sse4xmm0", modifiers=[0x4B]) | 5976 add_insn("vblendvpd", "avx_sse4xmm0", modifiers=[0x4B]) |
5773 add_insn("vblendvps", "avx_sse4xmm0", modifiers=[0x4A]) | 5977 add_insn("vblendvps", "avx_sse4xmm0", modifiers=[0x4A]) |
5774 | 5978 |
5775 # vpblendvb doesn't have a 256-bit form | 5979 # vpblendvb didn't have a 256-bit form until AVX2 |
5776 add_group("avx_sse4xmm0_128", | 5980 add_group("avx2_sse4xmm0", |
5777 cpu=["AVX"], | 5981 cpu=["AVX2"], |
5778 modifiers=["Op2Add"], | 5982 modifiers=["Op2Add"], |
5779 vex=128, | 5983 vex=128, |
5780 prefix=0x66, | 5984 prefix=0x66, |
5781 opcode=[0x0F, 0x3A, 0x00], | 5985 opcode=[0x0F, 0x3A, 0x00], |
5782 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 5986 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
5783 Operand(type="SIMDReg", size=128, dest="VEX"), | 5987 Operand(type="SIMDReg", size=128, dest="VEX"), |
5784 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), | 5988 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
5785 Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) | 5989 Operand(type="SIMDReg", size=128, dest="VEXImmSrc")]) |
| 5990 add_group("avx2_sse4xmm0", |
| 5991 cpu=["AVX2"], |
| 5992 modifiers=["Op2Add"], |
| 5993 vex=256, |
| 5994 prefix=0x66, |
| 5995 opcode=[0x0F, 0x3A, 0x00], |
| 5996 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 5997 Operand(type="SIMDReg", size=256, dest="VEX"), |
| 5998 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
| 5999 Operand(type="SIMDReg", size=256, dest="VEXImmSrc")]) |
5786 | 6000 |
5787 add_insn("vpblendvb", "avx_sse4xmm0_128", modifiers=[0x4C]) | 6001 add_insn("vpblendvb", "avx2_sse4xmm0", modifiers=[0x4C]) |
5788 | 6002 |
5789 for sfx, sz in zip("bwl", [8, 16, 32]): | 6003 for sfx, sz in zip("bwl", [8, 16, 32]): |
5790 add_group("crc32", | 6004 add_group("crc32", |
5791 suffix=sfx, | 6005 suffix=sfx, |
5792 cpu=["SSE42"], | 6006 cpu=["SSE42"], |
5793 opersize=sz, | 6007 opersize=sz, |
5794 prefix=0xF2, | 6008 prefix=0xF2, |
5795 opcode=[0x0F, 0x38, 0xF0+(sz!=8)], | 6009 opcode=[0x0F, 0x38, 0xF0+(sz!=8)], |
5796 operands=[Operand(type="Reg", size=32, dest="Spare"), | 6010 operands=[Operand(type="Reg", size=32, dest="Spare"), |
5797 Operand(type="RM", size=sz, relaxed=(sz==32), dest="EA")]) | 6011 Operand(type="RM", size=sz, relaxed=(sz==32), dest="EA")]) |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5866 add_insn("insertps", "insertps") | 6080 add_insn("insertps", "insertps") |
5867 add_insn("vinsertps", "insertps", modifiers=[VEXL0], avx=True) | 6081 add_insn("vinsertps", "insertps", modifiers=[VEXL0], avx=True) |
5868 | 6082 |
5869 add_group("movntdqa", | 6083 add_group("movntdqa", |
5870 cpu=["SSE41"], | 6084 cpu=["SSE41"], |
5871 modifiers=["SetVEX"], | 6085 modifiers=["SetVEX"], |
5872 prefix=0x66, | 6086 prefix=0x66, |
5873 opcode=[0x0F, 0x38, 0x2A], | 6087 opcode=[0x0F, 0x38, 0x2A], |
5874 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6088 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
5875 Operand(type="Mem", size=128, relaxed=True, dest="EA")]) | 6089 Operand(type="Mem", size=128, relaxed=True, dest="EA")]) |
| 6090 add_group("movntdqa", |
| 6091 cpu=["AVX2"], |
| 6092 vex=256, |
| 6093 prefix=0x66, |
| 6094 opcode=[0x0F, 0x38, 0x2A], |
| 6095 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 6096 Operand(type="Mem", size=256, relaxed=True, dest="EA")]) |
5876 | 6097 |
5877 add_insn("movntdqa", "movntdqa") | 6098 add_insn("movntdqa", "movntdqa") |
5878 add_insn("vmovntdqa", "movntdqa", modifiers=[VEXL0], avx=True) | 6099 add_insn("vmovntdqa", "movntdqa", modifiers=[VEXL0], avx=True) |
5879 | 6100 |
5880 add_group("sse4pcmpstr", | 6101 add_group("sse4pcmpstr", |
5881 cpu=["SSE42"], | 6102 cpu=["SSE42"], |
5882 modifiers=["Op2Add", "SetVEX"], | 6103 modifiers=["Op2Add", "SetVEX"], |
5883 prefix=0x66, | 6104 prefix=0x66, |
5884 opcode=[0x0F, 0x3A, 0x00], | 6105 opcode=[0x0F, 0x3A, 0x00], |
5885 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6106 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6040 opcode=[0x0F, 0x38, 0x00], | 6261 opcode=[0x0F, 0x38, 0x00], |
6041 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6262 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
6042 Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) | 6263 Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) |
6043 add_group("sse4m%d" % sz, | 6264 add_group("sse4m%d" % sz, |
6044 cpu=["SSE41"], | 6265 cpu=["SSE41"], |
6045 modifiers=["Op2Add", "SetVEX"], | 6266 modifiers=["Op2Add", "SetVEX"], |
6046 prefix=0x66, | 6267 prefix=0x66, |
6047 opcode=[0x0F, 0x38, 0x00], | 6268 opcode=[0x0F, 0x38, 0x00], |
6048 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6269 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
6049 Operand(type="SIMDReg", size=128, dest="EA")]) | 6270 Operand(type="SIMDReg", size=128, dest="EA")]) |
| 6271 add_group("sse4m%d" % sz, |
| 6272 cpu=["AVX2"], |
| 6273 modifiers=["Op2Add"], |
| 6274 vex=256, |
| 6275 prefix=0x66, |
| 6276 opcode=[0x0F, 0x38, 0x00], |
| 6277 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 6278 Operand(type="Mem", size=sz*2, relaxed=True, dest="EA")]) |
| 6279 add_group("sse4m%d" % sz, |
| 6280 cpu=["AVX2"], |
| 6281 modifiers=["Op2Add"], |
| 6282 vex=256, |
| 6283 prefix=0x66, |
| 6284 opcode=[0x0F, 0x38, 0x00], |
| 6285 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 6286 Operand(type="SIMDReg", size=128, dest="EA")]) |
6050 | 6287 |
6051 add_insn("pmovsxbw", "sse4m64", modifiers=[0x20]) | 6288 add_insn("pmovsxbw", "sse4m64", modifiers=[0x20]) |
6052 add_insn("pmovsxwd", "sse4m64", modifiers=[0x23]) | 6289 add_insn("pmovsxwd", "sse4m64", modifiers=[0x23]) |
6053 add_insn("pmovsxdq", "sse4m64", modifiers=[0x25]) | 6290 add_insn("pmovsxdq", "sse4m64", modifiers=[0x25]) |
6054 add_insn("pmovzxbw", "sse4m64", modifiers=[0x30]) | 6291 add_insn("pmovzxbw", "sse4m64", modifiers=[0x30]) |
6055 add_insn("pmovzxwd", "sse4m64", modifiers=[0x33]) | 6292 add_insn("pmovzxwd", "sse4m64", modifiers=[0x33]) |
6056 add_insn("pmovzxdq", "sse4m64", modifiers=[0x35]) | 6293 add_insn("pmovzxdq", "sse4m64", modifiers=[0x35]) |
6057 | 6294 |
6058 add_insn("vpmovsxbw", "sse4m64", modifiers=[0x20, VEXL0], avx=True) | 6295 add_insn("vpmovsxbw", "sse4m64", modifiers=[0x20, VEXL0], avx=True) |
6059 add_insn("vpmovsxwd", "sse4m64", modifiers=[0x23, VEXL0], avx=True) | 6296 add_insn("vpmovsxwd", "sse4m64", modifiers=[0x23, VEXL0], avx=True) |
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6316 # Some SSE3 opcodes are only two operand in AVX | 6553 # Some SSE3 opcodes are only two operand in AVX |
6317 # (VEX.vvvv must be 1111b) | 6554 # (VEX.vvvv must be 1111b) |
6318 add_group("avx_ssse3_2op", | 6555 add_group("avx_ssse3_2op", |
6319 cpu=["AVX"], | 6556 cpu=["AVX"], |
6320 modifiers=["Op2Add"], | 6557 modifiers=["Op2Add"], |
6321 vex=128, | 6558 vex=128, |
6322 prefix=0x66, | 6559 prefix=0x66, |
6323 opcode=[0x0F, 0x38, 0x00], | 6560 opcode=[0x0F, 0x38, 0x00], |
6324 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6561 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
6325 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 6562 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
| 6563 add_insn("vphminposuw", "avx_ssse3_2op", modifiers=[0x41], avx=True) |
6326 | 6564 |
6327 add_insn("vpabsb", "avx_ssse3_2op", modifiers=[0x1C], avx=True) | 6565 # VPABS* are extended to 256-bit in AVX2 |
6328 add_insn("vpabsw", "avx_ssse3_2op", modifiers=[0x1D], avx=True) | 6566 for cpu, sz in zip(["AVX", "AVX2"], [128, 256]): |
6329 add_insn("vpabsd", "avx_ssse3_2op", modifiers=[0x1E], avx=True) | 6567 add_group("avx2_ssse3_2op", |
6330 add_insn("vphminposuw", "avx_ssse3_2op", modifiers=[0x41], avx=True) | 6568 cpu=[cpu], |
| 6569 modifiers=["Op2Add"], |
| 6570 vex=sz, |
| 6571 prefix=0x66, |
| 6572 opcode=[0x0F, 0x38, 0x00], |
| 6573 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6574 Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA")]) |
| 6575 add_insn("vpabsb", "avx2_ssse3_2op", modifiers=[0x1C], avx=True) |
| 6576 add_insn("vpabsw", "avx2_ssse3_2op", modifiers=[0x1D], avx=True) |
| 6577 add_insn("vpabsd", "avx2_ssse3_2op", modifiers=[0x1E], avx=True) |
6331 | 6578 |
6332 # Some conversion functions take xmm, ymm combination | 6579 # Some conversion functions take xmm, ymm combination |
6333 # Need separate x and y versions for gas mode | 6580 # Need separate x and y versions for gas mode |
6334 add_group("avx_cvt_xmm128_x", | 6581 add_group("avx_cvt_xmm128_x", |
6335 cpu=["AVX"], | 6582 cpu=["AVX"], |
6336 modifiers=["PreAdd", "Op1Add"], | 6583 modifiers=["PreAdd", "Op1Add"], |
6337 vex=128, | 6584 vex=128, |
6338 prefix=0x00, | 6585 prefix=0x00, |
6339 opcode=[0x0F, 0x00], | 6586 opcode=[0x0F, 0x00], |
6340 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6587 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6388 opcode=[0x0F, 0x38, 0x18], | 6635 opcode=[0x0F, 0x38, 0x18], |
6389 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6636 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
6390 Operand(type="Mem", size=32, relaxed=True, dest="EA")]) | 6637 Operand(type="Mem", size=32, relaxed=True, dest="EA")]) |
6391 add_group("vbroadcastss", | 6638 add_group("vbroadcastss", |
6392 cpu=["AVX"], | 6639 cpu=["AVX"], |
6393 vex=256, | 6640 vex=256, |
6394 prefix=0x66, | 6641 prefix=0x66, |
6395 opcode=[0x0F, 0x38, 0x18], | 6642 opcode=[0x0F, 0x38, 0x18], |
6396 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 6643 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
6397 Operand(type="Mem", size=32, relaxed=True, dest="EA")]) | 6644 Operand(type="Mem", size=32, relaxed=True, dest="EA")]) |
| 6645 add_group("vbroadcastss", |
| 6646 cpu=["AVX2"], |
| 6647 vex=128, |
| 6648 prefix=0x66, |
| 6649 opcode=[0x0F, 0x38, 0x18], |
| 6650 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 6651 Operand(type="SIMDReg", size=128, dest="EA")]) |
| 6652 add_group("vbroadcastss", |
| 6653 cpu=["AVX2"], |
| 6654 vex=256, |
| 6655 prefix=0x66, |
| 6656 opcode=[0x0F, 0x38, 0x18], |
| 6657 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 6658 Operand(type="SIMDReg", size=128, dest="EA")]) |
6398 | 6659 |
6399 add_insn("vbroadcastss", "vbroadcastss") | 6660 add_insn("vbroadcastss", "vbroadcastss") |
6400 | 6661 |
6401 add_group("vbroadcastsd", | 6662 add_group("vbroadcastsd", |
6402 cpu=["AVX"], | 6663 cpu=["AVX"], |
6403 vex=256, | 6664 vex=256, |
6404 prefix=0x66, | 6665 prefix=0x66, |
6405 opcode=[0x0F, 0x38, 0x19], | 6666 opcode=[0x0F, 0x38, 0x19], |
6406 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 6667 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
6407 Operand(type="Mem", size=64, relaxed=True, dest="EA")]) | 6668 Operand(type="Mem", size=64, relaxed=True, dest="EA")]) |
| 6669 add_group("vbroadcastsd", |
| 6670 cpu=["AVX2"], |
| 6671 vex=256, |
| 6672 prefix=0x66, |
| 6673 opcode=[0x0F, 0x38, 0x19], |
| 6674 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 6675 Operand(type="SIMDReg", size=128, dest="EA")]) |
6408 | 6676 |
6409 add_insn("vbroadcastsd", "vbroadcastsd") | 6677 add_insn("vbroadcastsd", "vbroadcastsd") |
6410 | 6678 |
6411 add_group("vbroadcastf128", | 6679 add_group("vbroadcastif128", |
6412 cpu=["AVX"], | 6680 modifiers=["Op2Add"], |
6413 vex=256, | 6681 vex=256, |
6414 prefix=0x66, | 6682 prefix=0x66, |
6415 opcode=[0x0F, 0x38, 0x1A], | 6683 opcode=[0x0F, 0x38, 0x00], |
6416 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 6684 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
6417 Operand(type="Mem", size=128, relaxed=True, dest="EA")]) | 6685 Operand(type="Mem", size=128, relaxed=True, dest="EA")]) |
6418 | 6686 |
6419 add_insn("vbroadcastf128", "vbroadcastf128") | 6687 add_insn("vbroadcastf128", "vbroadcastif128", modifiers=[0x1A], cpu=["AVX"]) |
| 6688 add_insn("vbroadcasti128", "vbroadcastif128", modifiers=[0x5A], cpu=["AVX2"]) |
6420 | 6689 |
6421 add_group("vextractf128", | 6690 add_group("vextractif128", |
6422 cpu=["AVX"], | 6691 modifiers=["Op2Add"], |
6423 vex=256, | 6692 vex=256, |
6424 prefix=0x66, | 6693 prefix=0x66, |
6425 opcode=[0x0F, 0x3A, 0x19], | 6694 opcode=[0x0F, 0x3A, 0x00], |
6426 operands=[Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), | 6695 operands=[Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
6427 Operand(type="SIMDReg", size=256, dest="Spare"), | 6696 Operand(type="SIMDReg", size=256, dest="Spare"), |
6428 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 6697 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
6429 | 6698 |
6430 add_insn("vextractf128", "vextractf128") | 6699 add_insn("vextractf128", "vextractif128", modifiers=[0x19], cpu=["AVX"]) |
| 6700 add_insn("vextracti128", "vextractif128", modifiers=[0x39], cpu=["AVX2"]) |
6431 | 6701 |
6432 add_group("vinsertf128", | 6702 add_group("vinsertif128", |
6433 cpu=["AVX"], | 6703 modifiers=["Op2Add"], |
6434 vex=256, | 6704 vex=256, |
6435 prefix=0x66, | 6705 prefix=0x66, |
6436 opcode=[0x0F, 0x3A, 0x18], | 6706 opcode=[0x0F, 0x3A, 0x00], |
6437 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 6707 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
6438 Operand(type="SIMDReg", size=256, dest="VEX"), | 6708 Operand(type="SIMDReg", size=256, dest="VEX"), |
6439 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), | 6709 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
6440 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 6710 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
6441 | 6711 |
6442 add_insn("vinsertf128", "vinsertf128") | 6712 add_insn("vinsertf128", "vinsertif128", modifiers=[0x18], cpu=["AVX"]) |
| 6713 add_insn("vinserti128", "vinsertif128", modifiers=[0x38], cpu=["AVX2"]) |
6443 | 6714 |
6444 add_group("vzero", | 6715 add_group("vzero", |
6445 cpu=["AVX"], | 6716 cpu=["AVX"], |
6446 modifiers=["SetVEX"], | 6717 modifiers=["SetVEX"], |
6447 opcode=[0x0F, 0x77], | 6718 opcode=[0x0F, 0x77], |
6448 operands=[]) | 6719 operands=[]) |
6449 | 6720 |
6450 add_insn("vzeroall", "vzero", modifiers=[VEXL1]) | 6721 add_insn("vzeroall", "vzero", modifiers=[VEXL1]) |
6451 add_insn("vzeroupper", "vzero", modifiers=[VEXL0]) | 6722 add_insn("vzeroupper", "vzero", modifiers=[VEXL0]) |
6452 | 6723 |
6453 add_group("vmaskmov", | 6724 add_group("vmaskmov", |
6454 cpu=["AVX"], | |
6455 modifiers=["Op2Add"], | 6725 modifiers=["Op2Add"], |
6456 vex=128, | 6726 vex=128, |
6457 prefix=0x66, | 6727 prefix=0x66, |
6458 opcode=[0x0F, 0x38, 0x00], | 6728 opcode=[0x0F, 0x38, 0x00], |
6459 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6729 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
6460 Operand(type="SIMDReg", size=128, dest="VEX"), | 6730 Operand(type="SIMDReg", size=128, dest="VEX"), |
6461 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 6731 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
6462 add_group("vmaskmov", | 6732 add_group("vmaskmov", |
6463 cpu=["AVX"], | |
6464 modifiers=["Op2Add"], | 6733 modifiers=["Op2Add"], |
6465 vex=256, | 6734 vex=256, |
6466 prefix=0x66, | 6735 prefix=0x66, |
6467 opcode=[0x0F, 0x38, 0x00], | 6736 opcode=[0x0F, 0x38, 0x00], |
6468 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 6737 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
6469 Operand(type="SIMDReg", size=256, dest="VEX"), | 6738 Operand(type="SIMDReg", size=256, dest="VEX"), |
6470 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) | 6739 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
6471 add_group("vmaskmov", | 6740 add_group("vmaskmov", |
6472 cpu=["AVX"], | |
6473 modifiers=["Op2Add"], | 6741 modifiers=["Op2Add"], |
6474 vex=128, | 6742 vex=128, |
6475 prefix=0x66, | 6743 prefix=0x66, |
6476 opcode=[0x0F, 0x38, 0x02], | 6744 opcode=[0x0F, 0x38, 0x02], |
6477 operands=[Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), | 6745 operands=[Operand(type="SIMDRM", size=128, relaxed=True, dest="EA"), |
6478 Operand(type="SIMDReg", size=128, dest="VEX"), | 6746 Operand(type="SIMDReg", size=128, dest="VEX"), |
6479 Operand(type="SIMDReg", size=128, dest="Spare")]) | 6747 Operand(type="SIMDReg", size=128, dest="Spare")]) |
6480 add_group("vmaskmov", | 6748 add_group("vmaskmov", |
6481 cpu=["AVX"], | |
6482 modifiers=["Op2Add"], | 6749 modifiers=["Op2Add"], |
6483 vex=256, | 6750 vex=256, |
6484 prefix=0x66, | 6751 prefix=0x66, |
6485 opcode=[0x0F, 0x38, 0x02], | 6752 opcode=[0x0F, 0x38, 0x02], |
6486 operands=[Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), | 6753 operands=[Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
6487 Operand(type="SIMDReg", size=256, dest="VEX"), | 6754 Operand(type="SIMDReg", size=256, dest="VEX"), |
6488 Operand(type="SIMDReg", size=256, dest="Spare")]) | 6755 Operand(type="SIMDReg", size=256, dest="Spare")]) |
6489 | 6756 |
6490 add_insn("vmaskmovps", "vmaskmov", modifiers=[0x2C]) | 6757 add_insn("vmaskmovps", "vmaskmov", modifiers=[0x2C], cpu=["AVX"]) |
6491 add_insn("vmaskmovpd", "vmaskmov", modifiers=[0x2D]) | 6758 add_insn("vmaskmovpd", "vmaskmov", modifiers=[0x2D], cpu=["AVX"]) |
6492 | 6759 |
6493 add_group("vpermil", | 6760 add_group("vpermil", |
6494 cpu=["AVX"], | 6761 cpu=["AVX"], |
6495 modifiers=["Op2Add"], | 6762 modifiers=["Op2Add"], |
6496 vex=128, | 6763 vex=128, |
6497 prefix=0x66, | 6764 prefix=0x66, |
6498 opcode=[0x0F, 0x38, 0x08], | 6765 opcode=[0x0F, 0x38, 0x08], |
6499 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 6766 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
6500 Operand(type="SIMDReg", size=128, dest="VEX"), | 6767 Operand(type="SIMDReg", size=128, dest="VEX"), |
6501 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 6768 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6536 prefix=0x66, | 6803 prefix=0x66, |
6537 opcode=[0x0F, 0x3A, 0x06], | 6804 opcode=[0x0F, 0x3A, 0x06], |
6538 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 6805 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
6539 Operand(type="SIMDReg", size=256, dest="VEX"), | 6806 Operand(type="SIMDReg", size=256, dest="VEX"), |
6540 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), | 6807 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
6541 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) | 6808 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
6542 | 6809 |
6543 add_insn("vperm2f128", "vperm2f128") | 6810 add_insn("vperm2f128", "vperm2f128") |
6544 | 6811 |
6545 ##################################################################### | 6812 ##################################################################### |
| 6813 # Intel AVX2 instructions |
| 6814 ##################################################################### |
| 6815 |
| 6816 # Most AVX2 instructions are mixed in with above SSEx/AVX groups. |
| 6817 # Some make more sense to have separate groups. |
| 6818 |
| 6819 # vex.vvvv=1111b |
| 6820 add_group("vperm_var_avx2", |
| 6821 cpu=["AVX2"], |
| 6822 modifiers=["Op2Add"], |
| 6823 vex=256, |
| 6824 vexw=0, |
| 6825 prefix=0x66, |
| 6826 opcode=[0x0F, 0x38, 0x00], |
| 6827 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 6828 Operand(type="SIMDReg", size=256, dest="VEX"), |
| 6829 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
| 6830 |
| 6831 add_insn("vpermd", "vperm_var_avx2", modifiers=[0x36]) |
| 6832 add_insn("vpermps", "vperm_var_avx2", modifiers=[0x16]) |
| 6833 |
| 6834 # vex.vvvv=1111b |
| 6835 add_group("vperm_imm_avx2", |
| 6836 cpu=["AVX2"], |
| 6837 modifiers=["Op2Add"], |
| 6838 vex=256, |
| 6839 vexw=1, |
| 6840 prefix=0x66, |
| 6841 opcode=[0x0F, 0x3A, 0x00], |
| 6842 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 6843 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
| 6844 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 6845 |
| 6846 add_insn("vpermq", "vperm_imm_avx2", modifiers=[0x00]) |
| 6847 add_insn("vpermpd", "vperm_imm_avx2", modifiers=[0x01]) |
| 6848 |
| 6849 add_group("vperm2i128_avx2", |
| 6850 cpu=["AVX2"], |
| 6851 vex=256, |
| 6852 prefix=0x66, |
| 6853 opcode=[0x0F, 0x3A, 0x46], |
| 6854 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 6855 Operand(type="SIMDReg", size=256, dest="VEX"), |
| 6856 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA"), |
| 6857 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 6858 |
| 6859 add_insn("vperm2i128", "vperm2i128_avx2") |
| 6860 |
| 6861 # vex.vvvv=1111b |
| 6862 for sz in [128, 256]: |
| 6863 add_group("vpbroadcastb_avx2", |
| 6864 cpu=["AVX2"], |
| 6865 vex=sz, |
| 6866 vexw=0, |
| 6867 prefix=0x66, |
| 6868 opcode=[0x0F, 0x38, 0x78], |
| 6869 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6870 Operand(type="SIMDReg", size=128, relaxed=True, dest="EA")]) |
| 6871 # vex.vvvv=1111b |
| 6872 for sz in [128, 256]: |
| 6873 add_group("vpbroadcastb_avx2", |
| 6874 cpu=["AVX2"], |
| 6875 vex=sz, |
| 6876 vexw=0, |
| 6877 prefix=0x66, |
| 6878 opcode=[0x0F, 0x38, 0x78], |
| 6879 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6880 Operand(type="RM", size=8, relaxed=True, dest="EA")]) |
| 6881 |
| 6882 add_insn("vpbroadcastb", "vpbroadcastb_avx2") |
| 6883 |
| 6884 # vex.vvvv=1111b |
| 6885 for sz in [128, 256]: |
| 6886 add_group("vpbroadcastw_avx2", |
| 6887 cpu=["AVX2"], |
| 6888 vex=sz, |
| 6889 vexw=0, |
| 6890 prefix=0x66, |
| 6891 opcode=[0x0F, 0x38, 0x79], |
| 6892 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6893 Operand(type="SIMDReg", size=128, relaxed=True, dest="EA")]) |
| 6894 # vex.vvvv=1111b |
| 6895 for sz in [128, 256]: |
| 6896 add_group("vpbroadcastw_avx2", |
| 6897 cpu=["AVX2"], |
| 6898 vex=sz, |
| 6899 vexw=0, |
| 6900 prefix=0x66, |
| 6901 opcode=[0x0F, 0x38, 0x79], |
| 6902 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6903 Operand(type="RM", size=16, relaxed=True, dest="EA")]) |
| 6904 |
| 6905 add_insn("vpbroadcastw", "vpbroadcastw_avx2") |
| 6906 |
| 6907 # vex.vvvv=1111b |
| 6908 for sz in [128, 256]: |
| 6909 add_group("vpbroadcastd_avx2", |
| 6910 cpu=["AVX2"], |
| 6911 vex=sz, |
| 6912 vexw=0, |
| 6913 prefix=0x66, |
| 6914 opcode=[0x0F, 0x38, 0x58], |
| 6915 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6916 Operand(type="SIMDReg", size=128, relaxed=True, dest="EA")]) |
| 6917 # vex.vvvv=1111b |
| 6918 for sz in [128, 256]: |
| 6919 add_group("vpbroadcastd_avx2", |
| 6920 cpu=["AVX2"], |
| 6921 vex=sz, |
| 6922 vexw=0, |
| 6923 prefix=0x66, |
| 6924 opcode=[0x0F, 0x38, 0x58], |
| 6925 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6926 Operand(type="RM", size=32, relaxed=True, dest="EA")]) |
| 6927 |
| 6928 add_insn("vpbroadcastd", "vpbroadcastd_avx2") |
| 6929 |
| 6930 # vex.vvvv=1111b |
| 6931 for sz in [128, 256]: |
| 6932 add_group("vpbroadcastq_avx2", |
| 6933 cpu=["AVX2"], |
| 6934 vex=sz, |
| 6935 vexw=0, |
| 6936 prefix=0x66, |
| 6937 opcode=[0x0F, 0x38, 0x59], |
| 6938 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6939 Operand(type="SIMDReg", size=128, relaxed=True, dest="EA")]) |
| 6940 # vex.vvvv=1111b |
| 6941 for sz in [128, 256]: |
| 6942 add_group("vpbroadcastq_avx2", |
| 6943 cpu=["AVX2"], |
| 6944 vex=sz, |
| 6945 vexw=0, |
| 6946 prefix=0x66, |
| 6947 opcode=[0x0F, 0x38, 0x59], |
| 6948 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6949 Operand(type="RM", size=64, relaxed=True, dest="EA")]) |
| 6950 |
| 6951 add_insn("vpbroadcastq", "vpbroadcastq_avx2") |
| 6952 |
| 6953 for sz in [128, 256]: |
| 6954 add_group("vpshiftv_vexw0_avx2", |
| 6955 cpu=["AVX2"], |
| 6956 modifiers=["Op2Add"], |
| 6957 vex=sz, |
| 6958 vexw=0, |
| 6959 prefix=0x66, |
| 6960 opcode=[0x0F, 0x38, 0x00], |
| 6961 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6962 Operand(type="SIMDReg", size=sz, dest="VEX"), |
| 6963 Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA")]) |
| 6964 |
| 6965 for sz in [128, 256]: |
| 6966 add_group("vpshiftv_vexw1_avx2", |
| 6967 cpu=["AVX2"], |
| 6968 modifiers=["Op2Add"], |
| 6969 vex=sz, |
| 6970 vexw=1, |
| 6971 prefix=0x66, |
| 6972 opcode=[0x0F, 0x38, 0x00], |
| 6973 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6974 Operand(type="SIMDReg", size=sz, dest="VEX"), |
| 6975 Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA")]) |
| 6976 |
| 6977 add_insn("vpsrlvd", "vpshiftv_vexw0_avx2", modifiers=[0x45]) |
| 6978 add_insn("vpsrlvq", "vpshiftv_vexw1_avx2", modifiers=[0x45]) |
| 6979 add_insn("vpsravd", "vpshiftv_vexw0_avx2", modifiers=[0x46]) |
| 6980 |
| 6981 add_insn("vpsllvd", "vpshiftv_vexw0_avx2", modifiers=[0x47]) |
| 6982 add_insn("vpsllvq", "vpshiftv_vexw1_avx2", modifiers=[0x47]) |
| 6983 |
| 6984 add_insn("vpmaskmovd", "vmaskmov", modifiers=[0x8C], cpu=["AVX2"]) |
| 6985 |
| 6986 # vex.vvvv=1111b |
| 6987 for sz in [128, 256]: |
| 6988 add_group("vmaskmov_vexw1_avx2", |
| 6989 cpu=["AVX2"], |
| 6990 modifiers=["Op2Add"], |
| 6991 vex=sz, |
| 6992 vexw=1, |
| 6993 prefix=0x66, |
| 6994 opcode=[0x0F, 0x38, 0x00], |
| 6995 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 6996 Operand(type="SIMDReg", size=sz, dest="VEX"), |
| 6997 Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA")]) |
| 6998 |
| 6999 for sz in [128, 256]: |
| 7000 add_group("vmaskmov_vexw1_avx2", |
| 7001 cpu=["AVX2"], |
| 7002 modifiers=["Op2Add"], |
| 7003 vex=sz, |
| 7004 vexw=1, |
| 7005 prefix=0x66, |
| 7006 opcode=[0x0F, 0x38, 0x02], |
| 7007 operands=[Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA"), |
| 7008 Operand(type="SIMDReg", size=sz, dest="VEX"), |
| 7009 Operand(type="SIMDReg", size=sz, dest="Spare")]) |
| 7010 |
| 7011 add_insn("vpmaskmovq", "vmaskmov_vexw1_avx2", modifiers=[0x8C]) |
| 7012 |
| 7013 for sz in [128, 256]: |
| 7014 add_group("vex_66_0F3A_imm8_avx2", |
| 7015 cpu=["AVX2"], |
| 7016 modifiers=["Op2Add"], |
| 7017 vex=sz, |
| 7018 vexw=0, |
| 7019 prefix=0x66, |
| 7020 opcode=[0x0F, 0x3A, 0x00], |
| 7021 operands=[Operand(type="SIMDReg", size=sz, dest="Spare"), |
| 7022 Operand(type="SIMDReg", size=sz, dest="VEX"), |
| 7023 Operand(type="SIMDRM", size=sz, relaxed=True, dest="EA"), |
| 7024 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 7025 |
| 7026 add_insn("vpblendd", "vex_66_0F3A_imm8_avx2", modifiers=[0x02]) |
| 7027 |
| 7028 # Vector register in EA. |
| 7029 add_group("gather_64x_64x", |
| 7030 cpu=["AVX2"], |
| 7031 modifiers=["Op2Add"], |
| 7032 vex=128, |
| 7033 vexw=1, |
| 7034 prefix=0x66, |
| 7035 opcode=[0x0F, 0x38, 0x00], |
| 7036 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 7037 Operand(type="MemXMMIndex", size=64, relaxed=True, dest="EA"), |
| 7038 Operand(type="SIMDReg", size=128, dest="VEX")]) |
| 7039 add_group("gather_64x_64x", |
| 7040 cpu=["AVX2"], |
| 7041 modifiers=["Op2Add"], |
| 7042 vex=256, |
| 7043 vexw=1, |
| 7044 prefix=0x66, |
| 7045 opcode=[0x0F, 0x38, 0x00], |
| 7046 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 7047 Operand(type="MemXMMIndex", size=64, relaxed=True, dest="EA"), |
| 7048 Operand(type="SIMDReg", size=256, dest="VEX")]) |
| 7049 add_insn("vgatherdpd", "gather_64x_64x", modifiers=[0x92]) |
| 7050 add_insn("vpgatherdq", "gather_64x_64x", modifiers=[0x90]) |
| 7051 |
| 7052 add_group("gather_64x_64y", |
| 7053 cpu=["AVX2"], |
| 7054 modifiers=["Op2Add"], |
| 7055 vex=128, |
| 7056 vexw=1, |
| 7057 prefix=0x66, |
| 7058 opcode=[0x0F, 0x38, 0x00], |
| 7059 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 7060 Operand(type="MemXMMIndex", size=64, relaxed=True, dest="EA"), |
| 7061 Operand(type="SIMDReg", size=128, dest="VEX")]) |
| 7062 add_group("gather_64x_64y", |
| 7063 cpu=["AVX2"], |
| 7064 modifiers=["Op2Add"], |
| 7065 vex=256, |
| 7066 vexw=1, |
| 7067 prefix=0x66, |
| 7068 opcode=[0x0F, 0x38, 0x00], |
| 7069 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 7070 Operand(type="MemYMMIndex", size=64, relaxed=True, dest="EA"), |
| 7071 Operand(type="SIMDReg", size=256, dest="VEX")]) |
| 7072 add_insn("vgatherqpd", "gather_64x_64y", modifiers=[0x93]) |
| 7073 add_insn("vpgatherqq", "gather_64x_64y", modifiers=[0x91]) |
| 7074 |
| 7075 add_group("gather_32x_32y", |
| 7076 cpu=["AVX2"], |
| 7077 modifiers=["Op2Add"], |
| 7078 vex=128, |
| 7079 vexw=0, |
| 7080 prefix=0x66, |
| 7081 opcode=[0x0F, 0x38, 0x00], |
| 7082 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 7083 Operand(type="MemXMMIndex", size=32, relaxed=True, dest="EA"), |
| 7084 Operand(type="SIMDReg", size=128, dest="VEX")]) |
| 7085 add_group("gather_32x_32y", |
| 7086 cpu=["AVX2"], |
| 7087 modifiers=["Op2Add"], |
| 7088 vex=256, |
| 7089 vexw=0, |
| 7090 prefix=0x66, |
| 7091 opcode=[0x0F, 0x38, 0x00], |
| 7092 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
| 7093 Operand(type="MemYMMIndex", size=32, relaxed=True, dest="EA"), |
| 7094 Operand(type="SIMDReg", size=256, dest="VEX")]) |
| 7095 add_insn("vgatherdps", "gather_32x_32y", modifiers=[0x92]) |
| 7096 add_insn("vpgatherdd", "gather_32x_32y", modifiers=[0x90]) |
| 7097 |
| 7098 add_group("gather_32x_32y_128", |
| 7099 cpu=["AVX2"], |
| 7100 modifiers=["Op2Add"], |
| 7101 vex=128, |
| 7102 vexw=0, |
| 7103 prefix=0x66, |
| 7104 opcode=[0x0F, 0x38, 0x00], |
| 7105 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 7106 Operand(type="MemXMMIndex", size=32, relaxed=True, dest="EA"), |
| 7107 Operand(type="SIMDReg", size=128, dest="VEX")]) |
| 7108 add_group("gather_32x_32y_128", |
| 7109 cpu=["AVX2"], |
| 7110 modifiers=["Op2Add"], |
| 7111 vex=256, |
| 7112 vexw=0, |
| 7113 prefix=0x66, |
| 7114 opcode=[0x0F, 0x38, 0x00], |
| 7115 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
| 7116 Operand(type="MemYMMIndex", size=32, relaxed=True, dest="EA"), |
| 7117 Operand(type="SIMDReg", size=128, dest="VEX")]) |
| 7118 add_insn("vgatherqps", "gather_32x_32y_128", modifiers=[0x93]) |
| 7119 add_insn("vpgatherqd", "gather_32x_32y_128", modifiers=[0x91]) |
| 7120 |
| 7121 ##################################################################### |
6546 # Intel FMA instructions | 7122 # Intel FMA instructions |
6547 ##################################################################### | 7123 ##################################################################### |
6548 | 7124 |
6549 ### 128/256b FMA PS | 7125 ### 128/256b FMA PS |
6550 add_group("vfma_ps", | 7126 add_group("vfma_ps", |
6551 cpu=["FMA"], | 7127 cpu=["FMA"], |
6552 modifiers=["Op2Add"], | 7128 modifiers=["Op2Add"], |
6553 vex=128, | 7129 vex=128, |
6554 vexw=0, # single precision | 7130 vexw=0, # single precision |
6555 prefix=0x66, | 7131 prefix=0x66, |
6556 opcode=[0x0F, 0x38, 0x00], | 7132 opcode=[0x0F, 0x38, 0x00], |
6557 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 7133 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
6558 Operand(type="SIMDReg", size=128, dest="VEX"), | 7134 Operand(type="SIMDReg", size=128, dest="VEX"), |
6559 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 7135 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
6560 add_group("vfma_ps", | 7136 add_group("vfma_ps", |
6561 cpu=["FMA"], | 7137 cpu=["FMA"], |
6562 modifiers=["Op2Add"], | 7138 modifiers=["Op2Add"], |
6563 vex=256, | 7139 vex=256, |
6564 vexw=0, # single precision | 7140 vexw=0, # single precision |
6565 prefix=0x66, | 7141 prefix=0x66, |
6566 opcode=[0x0F, 0x38, 0x00], | 7142 opcode=[0x0F, 0x38, 0x00], |
6567 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), | 7143 operands=[Operand(type="SIMDReg", size=256, dest="Spare"), |
6568 Operand(type="SIMDReg", size=256, dest="VEX"), | 7144 Operand(type="SIMDReg", size=256, dest="VEX"), |
6569 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) | 7145 Operand(type="SIMDRM", size=256, relaxed=True, dest="EA")]) |
6570 | 7146 |
6571 ### 128/256b FMA PD(W=1) | 7147 ### 128/256b FMA PD(W=1) |
6572 add_group("vfma_pd", | 7148 add_group("vfma_pd", |
6573 cpu=["FMA"], | 7149 cpu=["FMA"], |
6574 modifiers=["Op2Add"], | 7150 modifiers=["Op2Add"], |
6575 vex=128, | 7151 vex=128, |
6576 vexw=1, # double precision | 7152 vexw=1, # double precision |
6577 prefix=0x66, | 7153 prefix=0x66, |
6578 opcode=[0x0F, 0x38, 0x00], | 7154 opcode=[0x0F, 0x38, 0x00], |
6579 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), | 7155 operands=[Operand(type="SIMDReg", size=128, dest="Spare"), |
6580 Operand(type="SIMDReg", size=128, dest="VEX"), | 7156 Operand(type="SIMDReg", size=128, dest="VEX"), |
6581 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) | 7157 Operand(type="SIMDRM", size=128, relaxed=True, dest="EA")]) |
(...skipping 725 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7307 Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) | 7883 Operand(type="Mem", size=sz, relaxed=True, dest="EA")]) |
7308 add_group("movbe", | 7884 add_group("movbe", |
7309 cpu=["MOVBE"], | 7885 cpu=["MOVBE"], |
7310 opersize=sz, | 7886 opersize=sz, |
7311 opcode=[0x0F, 0x38, 0xF1], | 7887 opcode=[0x0F, 0x38, 0xF1], |
7312 operands=[Operand(type="Mem", size=sz, relaxed=True, dest="EA"), | 7888 operands=[Operand(type="Mem", size=sz, relaxed=True, dest="EA"), |
7313 Operand(type="Reg", size=sz, dest="Spare")]) | 7889 Operand(type="Reg", size=sz, dest="Spare")]) |
7314 add_insn("movbe", "movbe") | 7890 add_insn("movbe", "movbe") |
7315 | 7891 |
7316 ##################################################################### | 7892 ##################################################################### |
| 7893 # Intel advanced bit manipulations (BMI1/2) |
| 7894 ##################################################################### |
| 7895 |
| 7896 add_insn("tzcnt", "cnt", modifiers=[0xBC], cpu=["BMI1"]) |
| 7897 # LZCNT is present as AMD ext |
| 7898 |
| 7899 for sfx, sz in zip("wlq", [32, 64]): |
| 7900 add_group("vex_gpr_ndd_rm_0F38_regext", |
| 7901 suffix=sfx, |
| 7902 modifiers=["PreAdd", "Op2Add", "SpAdd" ], |
| 7903 opersize=sz, |
| 7904 prefix=0x00, |
| 7905 opcode=[0x0F, 0x38, 0x00], |
| 7906 vex=0, ## VEX.L=0 |
| 7907 operands=[Operand(type="Reg", size=sz, dest="VEX"), |
| 7908 Operand(type="RM", size=sz, relaxed=True, dest="EA")]) |
| 7909 |
| 7910 |
| 7911 add_insn("blsr", "vex_gpr_ndd_rm_0F38_regext", modifiers=[0x00, 0xF3, 1], |
| 7912 cpu=["BMI1"]) |
| 7913 add_insn("blsmsk", "vex_gpr_ndd_rm_0F38_regext", modifiers=[0x00, 0xF3, 2], |
| 7914 cpu=["BMI1"]) |
| 7915 add_insn("blsi", "vex_gpr_ndd_rm_0F38_regext", modifiers=[0x00, 0xF3, 3], |
| 7916 cpu=["BMI1"]) |
| 7917 |
| 7918 for sfx, sz in zip("wlq", [32, 64]): |
| 7919 add_group("vex_gpr_reg_rm_0F_imm8", |
| 7920 suffix=sfx, |
| 7921 modifiers=["PreAdd", "Op1Add", "Op2Add"], |
| 7922 opersize=sz, |
| 7923 prefix=0x00, |
| 7924 opcode=[0x0F, 0x00, 0x00], |
| 7925 vex=0, ## VEX.L=0 |
| 7926 operands=[Operand(type="Reg", size=sz, dest="Spare"), |
| 7927 Operand(type="RM", size=sz, relaxed=True, dest="EA"), |
| 7928 Operand(type="Imm", size=8, relaxed=True, dest="Imm")]) |
| 7929 |
| 7930 add_insn("rorx", "vex_gpr_reg_rm_0F_imm8", modifiers=[0xF2, 0x3A, 0xF0], |
| 7931 cpu=["BMI2"]) |
| 7932 |
| 7933 for sfx, sz in zip("lq", [32, 64]): # no 16-bit forms |
| 7934 add_group("vex_gpr_reg_nds_rm_0F", |
| 7935 suffix=sfx, |
| 7936 modifiers=["PreAdd", "Op1Add", "Op2Add"], |
| 7937 opersize=sz, |
| 7938 prefix=0x00, |
| 7939 opcode=[0x0F, 0x00, 0x00], |
| 7940 vex=0, |
| 7941 operands=[Operand(type="Reg", size=sz, dest="Spare"), |
| 7942 Operand(type="Reg", size=sz, dest="VEX"), |
| 7943 Operand(type="RM", size=sz, relaxed=True, dest="EA")]) |
| 7944 |
| 7945 add_insn("andn", "vex_gpr_reg_nds_rm_0F", modifiers=[0x00, 0x38, 0xF2], |
| 7946 cpu=["BMI1"]) |
| 7947 |
| 7948 add_insn("pdep", "vex_gpr_reg_nds_rm_0F", modifiers=[0xF2, 0x38, 0xF5], |
| 7949 cpu=["BMI2"]) |
| 7950 add_insn("pext", "vex_gpr_reg_nds_rm_0F", modifiers=[0xF3, 0x38, 0xF5], |
| 7951 cpu=["BMI2"]) |
| 7952 |
| 7953 for sfx, sz in zip("lq", [32, 64]): # no 16-bit forms |
| 7954 add_group("vex_gpr_reg_rm_nds_0F", |
| 7955 suffix=sfx, |
| 7956 modifiers=["PreAdd", "Op1Add", "Op2Add"], |
| 7957 opersize=sz, |
| 7958 prefix=0x00, |
| 7959 opcode=[0x0F, 0x00, 0x00], |
| 7960 vex=0, |
| 7961 operands=[Operand(type="Reg", size=sz, dest="Spare"), |
| 7962 Operand(type="RM", size=sz, relaxed=True, dest="EA"), |
| 7963 Operand(type="Reg", size=sz, dest="VEX")]) |
| 7964 |
| 7965 add_insn("bzhi", "vex_gpr_reg_rm_nds_0F", modifiers=[0x00, 0x38, 0xF5], |
| 7966 cpu=["BMI2"]) |
| 7967 add_insn("bextr","vex_gpr_reg_rm_nds_0F", modifiers=[0x00, 0x38, 0xF7], |
| 7968 cpu=["BMI1"]) |
| 7969 add_insn("shlx", "vex_gpr_reg_rm_nds_0F", modifiers=[0x66, 0x38, 0xF7], |
| 7970 cpu=["BMI2"]) |
| 7971 add_insn("shrx", "vex_gpr_reg_rm_nds_0F", modifiers=[0xF2, 0x38, 0xF7], |
| 7972 cpu=["BMI2"]) |
| 7973 add_insn("sarx", "vex_gpr_reg_rm_nds_0F", modifiers=[0xF3, 0x38, 0xF7], |
| 7974 cpu=["BMI2"]) |
| 7975 |
| 7976 add_insn("mulx", "vex_gpr_reg_nds_rm_0F", modifiers=[0xF2, 0x38, 0xF6], |
| 7977 cpu=["BMI2"]) |
| 7978 |
| 7979 |
| 7980 |
| 7981 ##################################################################### |
| 7982 # Intel INVPCID instruction |
| 7983 ##################################################################### |
| 7984 add_group("invpcid", |
| 7985 cpu=["INVPCID", "Priv"], |
| 7986 not64=True, |
| 7987 prefix=0x66, |
| 7988 opcode=[0x0F, 0x38, 0x82], |
| 7989 operands=[Operand(type="Reg", size=32, dest="Spare"), |
| 7990 Operand(type="Mem", size=128, relaxed=True, dest="EA")]) |
| 7991 add_group("invpcid", |
| 7992 cpu=["INVPCID", "Priv"], |
| 7993 only64=True, |
| 7994 def_opersize_64=64, |
| 7995 prefix=0x66, |
| 7996 opcode=[0x0F, 0x38, 0x82], |
| 7997 operands=[Operand(type="Reg", size=64, dest="Spare"), |
| 7998 Operand(type="Mem", size=128, relaxed=True, dest="EA")]) |
| 7999 add_insn("invpcid", "invpcid") |
| 8000 |
| 8001 ##################################################################### |
7317 # AMD 3DNow! instructions | 8002 # AMD 3DNow! instructions |
7318 ##################################################################### | 8003 ##################################################################### |
7319 | 8004 |
7320 add_insn("prefetch", "twobytemem", modifiers=[0x00, 0x0F, 0x0D], cpu=["3DNow"]) | 8005 add_insn("prefetch", "twobytemem", modifiers=[0x00, 0x0F, 0x0D], cpu=["3DNow"]) |
7321 add_insn("prefetchw", "twobytemem", modifiers=[0x01, 0x0F, 0x0D], cpu=["3DNow"]) | 8006 add_insn("prefetchw", "twobytemem", modifiers=[0x01, 0x0F, 0x0D], cpu=["3DNow"]) |
7322 add_insn("femms", "twobyte", modifiers=[0x0F, 0x0E], cpu=["3DNow"]) | 8007 add_insn("femms", "twobyte", modifiers=[0x0F, 0x0E], cpu=["3DNow"]) |
7323 | 8008 |
7324 add_group("now3d", | 8009 add_group("now3d", |
7325 cpu=["3DNow"], | 8010 cpu=["3DNow"], |
7326 modifiers=["Imm8"], | 8011 modifiers=["Imm8"], |
(...skipping 27 matching lines...) Expand all Loading... |
7354 add_insn("pswapd", "now3d", modifiers=[0xBB], cpu=["Athlon", "3DNow"]) | 8039 add_insn("pswapd", "now3d", modifiers=[0xBB], cpu=["Athlon", "3DNow"]) |
7355 | 8040 |
7356 ##################################################################### | 8041 ##################################################################### |
7357 # AMD extensions | 8042 # AMD extensions |
7358 ##################################################################### | 8043 ##################################################################### |
7359 | 8044 |
7360 add_insn("syscall", "twobyte", modifiers=[0x0F, 0x05], cpu=["686", "AMD"]) | 8045 add_insn("syscall", "twobyte", modifiers=[0x0F, 0x05], cpu=["686", "AMD"]) |
7361 for sfx in [None, "l", "q"]: | 8046 for sfx in [None, "l", "q"]: |
7362 add_insn("sysret"+(sfx or ""), "twobyte", suffix=sfx, modifiers=[0x0F, 0x07]
, | 8047 add_insn("sysret"+(sfx or ""), "twobyte", suffix=sfx, modifiers=[0x0F, 0x07]
, |
7363 cpu=["686", "AMD", "Priv"]) | 8048 cpu=["686", "AMD", "Priv"]) |
7364 add_insn("lzcnt", "cnt", modifiers=[0xBD], cpu=["686", "AMD"]) | 8049 add_insn("lzcnt", "cnt", modifiers=[0xBD], cpu=["LZCNT"]) |
7365 | 8050 |
7366 ##################################################################### | 8051 ##################################################################### |
7367 # AMD x86-64 extensions | 8052 # AMD x86-64 extensions |
7368 ##################################################################### | 8053 ##################################################################### |
7369 | 8054 |
7370 add_insn("swapgs", "threebyte", modifiers=[0x0F, 0x01, 0xF8], only64=True) | 8055 add_insn("swapgs", "threebyte", modifiers=[0x0F, 0x01, 0xF8], only64=True) |
7371 add_insn("rdtscp", "threebyte", modifiers=[0x0F, 0x01, 0xF9], | 8056 add_insn("rdtscp", "threebyte", modifiers=[0x0F, 0x01, 0xF9], |
7372 cpu=["686", "AMD", "Priv"]) | 8057 cpu=["686", "AMD", "Priv"]) |
7373 | 8058 |
7374 add_group("cmpxchg16b", | 8059 add_group("cmpxchg16b", |
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7642 # Output generation | 8327 # Output generation |
7643 ##################################################################### | 8328 ##################################################################### |
7644 | 8329 |
7645 out_dir = "" | 8330 out_dir = "" |
7646 if len(sys.argv) > 1: | 8331 if len(sys.argv) > 1: |
7647 out_dir = sys.argv[1] | 8332 out_dir = sys.argv[1] |
7648 | 8333 |
7649 output_groups(file(os.path.join(out_dir, "x86insns.c"), "wt")) | 8334 output_groups(file(os.path.join(out_dir, "x86insns.c"), "wt")) |
7650 output_gas_insns(file(os.path.join(out_dir, "x86insn_gas.gperf"), "wt")) | 8335 output_gas_insns(file(os.path.join(out_dir, "x86insn_gas.gperf"), "wt")) |
7651 output_nasm_insns(file(os.path.join(out_dir, "x86insn_nasm.gperf"), "wt")) | 8336 output_nasm_insns(file(os.path.join(out_dir, "x86insn_nasm.gperf"), "wt")) |
| 8337 |
OLD | NEW |