OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. | |
3 * Use of this source code is governed by a BSD-style license that can be | |
4 * found in the LICENSE file. | |
5 */ | |
6 | |
7 #include "native_client/src/trusted/validator/x86/nacl_cpuid.h" | |
8 | |
9 /* | |
10 * nacl_cpuid.c | |
11 * Retrieve and decode CPU model specific feature mask. | |
12 */ | |
13 #if NACL_WINDOWS | |
14 #include <intrin.h> /* __cpuid intrinsic */ | |
15 #endif /* NACL_WINDOWS */ | |
16 | |
17 #include <stdio.h> | |
18 #include <string.h> | |
19 #include <assert.h> | |
20 #include <stdlib.h> | |
21 | |
22 #include "native_client/src/include/portability_io.h" | |
23 #include "native_client/src/shared/platform/nacl_log.h" | |
24 | |
25 /* | |
26 * TODO(bradchen): consolidate to use one debug print mechanism. | |
27 */ | |
28 | |
29 #define CPUID_EDX_x87 0x00000001 /* x87 FPU support */ | |
30 #define CPUID_EDX_VME 0x00000002 /* Virtual 8086 Mode Enhancement */ | |
31 #define CPUID_EDX_DEB 0x00000004 /* Debugging Extensions */ | |
32 #define CPUID_EDX_PSE 0x00000008 /* Page Size Extensions */ | |
33 #define CPUID_EDX_TSC 0x00000010 /* Time Stamp Counter */ | |
34 #define CPUID_EDX_MSR 0x00000020 /* RDMSR and WRMSR */ | |
35 #define CPUID_EDX_PAE 0x00000040 /* Physical Address Extensions */ | |
36 #define CPUID_EDX_MCE 0x00000080 /* Machine Check Exception */ | |
37 #define CPUID_EDX_CX8 0x00000100 /* CMPXCHG8B Instruction */ | |
38 #define CPUID_EDX_APIC 0x00000200 /* APIC on chip */ | |
39 /* 0x00000400 reserved */ | |
40 #define CPUID_EDX_SEP 0x00000800 /* SYSENTER and SYSEXIT */ | |
41 #define CPUID_EDX_MTRR 0x00001000 /* Memory Type Range Registers */ | |
42 #define CPUID_EDX_PGE 0x00002000 /* PTE Global Bit */ | |
43 #define CPUID_EDX_MCA 0x00004000 /* Machine Check Architecture */ | |
44 #define CPUID_EDX_CMOV 0x00008000 /* CMOV instruction */ | |
45 #define CPUID_EDX_PAT 0x00010000 /* Page Attribute Table */ | |
46 #define CPUID_EDX_PSE36 0x00020000 /* Page Size Extension bis */ | |
47 #define CPUID_EDX_PSN 0x00040000 /* Processor Serial Number */ | |
48 #define CPUID_EDX_CLFLUSH 0x00080000 /* CLFLUSH instruction */ | |
49 /* 0x00100000 reserved */ | |
50 #define CPUID_EDX_DS 0x00200000 /* Debug Store */ | |
51 #define CPUID_EDX_ACPI 0x00400000 /* Thermal Monitor and Clock Ctrl */ | |
52 #define CPUID_EDX_MMX 0x00800000 /* MMX extensions */ | |
53 #define CPUID_EDX_FXSR 0x01000000 /* FXSAVE/FXRSTOR instructions */ | |
54 #define CPUID_EDX_SSE 0x02000000 /* SSE extensions */ | |
55 #define CPUID_EDX_SSE2 0x04000000 /* SSE2 extensions */ | |
56 #define CPUID_EDX_SS 0x08000000 /* Self snoop */ | |
57 #define CPUID_EDX_HTT 0x10000000 /* Hyper-threading */ | |
58 #define CPUID_EDX_TM 0x20000000 /* Thermal monitor */ | |
59 /* 0x40000000 reserved */ | |
60 #define CPUID_EDX_PBE 0x80000000 /* Pending Break Enable */ | |
61 | |
62 #define CPUID_ECX_SSE3 0x00000001 /* SSE3 extensions */ | |
63 #define CPUID_ECX_CLMUL 0x00000002 /* PCLMULQDQ instruction */ | |
64 #define CPUID_ECX_DTES64 0x00000004 /* 64-bit DS Area */ | |
65 #define CPUID_ECX_MON 0x00000008 /* MONITOR/MWAIT instructions */ | |
66 #define CPUID_ECX_DSCPL 0x00000010 /* CPL Qualified Debug Store */ | |
67 #define CPUID_ECX_VMX 0x00000020 /* Virtual Machine Extensions */ | |
68 #define CPUID_ECX_SMX 0x00000040 /* Safer Mode Extensions */ | |
69 #define CPUID_ECX_EST 0x00000080 /* Enahcned SpeedStep */ | |
70 #define CPUID_ECX_TM2 0x00000100 /* Thermal Monitor 2 */ | |
71 #define CPUID_ECX_SSSE3 0x00000200 /* SS_S_E3 extensions */ | |
72 #define CPUID_ECX_CXID 0x00000400 /* L1 context ID */ | |
73 /* 0x00000800 reserved */ | |
74 #define CPUID_ECX_FMA 0x00001000 /* FMA instructions */ | |
75 #define CPUID_ECX_CX16 0x00002000 /* CMPXCHG16B instruction */ | |
76 #define CPUID_ECX_XTPR 0x00004000 /* xTPR update control */ | |
77 #define CPUID_ECX_PDCM 0x00008000 /* Perf/Debug Capability MSR */ | |
78 /* 0x00010000 reserved */ | |
79 #define CPUID_ECX_PCID 0x00020000 /* Process-context identifiers */ | |
80 #define CPUID_ECX_DCA 0x00040000 /* Direct Cache Access */ | |
81 #define CPUID_ECX_SSE41 0x00080000 /* SSE4.1 extensions */ | |
82 #define CPUID_ECX_SSE42 0x00100000 /* SSE4.2 extensions */ | |
83 #define CPUID_ECX_x2APIC 0x00800000 /* x2APIC feature */ | |
84 #define CPUID_ECX_MOVBE 0x00400000 /* MOVBE instruction */ | |
85 #define CPUID_ECX_POPCNT 0x00800000 /* POPCNT instruction */ | |
86 #define CPUID_ECX_TSCDDLN 0xx1000000 /* TSC-Deadline */ | |
87 #define CPUID_ECX_AES 0x02000000 /* AES instructions */ | |
88 #define CPUID_ECX_XSAVE 0x04000000 /* XSAVE/XRSTOR/XSETBV/XGETBV */ | |
89 #define CPUID_ECX_OSXSAVE 0x08000000 /* XSAVE et al enabled by OS */ | |
90 #define CPUID_ECX_AVX 0x10000000 /* AVX instructions */ | |
91 #define CPUID_ECX_F16C 0x20000000 /* 16bit floating-point instructions */ | |
92 #define CPUID_ECX_RDRAND 0x40000000 /* RDRAND instruction */ | |
93 /* 0x80000000 reserved */ | |
94 | |
95 /* Function 07h main leaf (ecx = 0) */ | |
96 #define CPUID_EBX_FSGSBASE 0x00000001 /* RD/WR FS/GS BASE instructions */ | |
97 /* 0x00000002 reserved */ | |
98 /* 0x00000004 reserved */ | |
99 #define CPUID_EBX_BMI1 0x00000008 /* BMI1 instructions */ | |
100 #define CPUID_EBX_HLE 0x00000010 /* HLE instructions */ | |
101 #define CPUID_EBX_AVX2 0x00000020 /* AVX2 instructions */ | |
102 #define CPUID_EBX_SMEP 0x00000040 /* Supervisor Mode Exec-Protection */ | |
103 /* 0x00000080 reserved */ | |
104 #define CPUID_EBX_BMI2 0x00000100 /* BMI2 instructions */ | |
105 #define CPUID_EBX_ERMS 0x00000200 /* Enhanced REP MOVSB/STOSB */ | |
106 #define CPUID_EBX_INVPCID 0x00000400 /* Invalidate Processor Context ID */ | |
107 #define CPUID_EBX_RTM 0x00000800 /* Restricted Transactional Memory */ | |
108 /* 0xFFFFF000 reserved */ | |
109 | |
110 /* AMD-specific masks - most are the same as in eax == 1 subfunction */ | |
111 /* 0x00000001 duplicates CPUID_EDX_x87 */ | |
112 /* 0x00000002 duplicates CPUID_EDX_VME */ | |
113 /* 0x00000004 duplicates CPUID_EDX_DEB */ | |
114 /* 0x00000008 duplicates CPUID_EDX_PSE */ | |
115 /* 0x00000010 duplicates CPUID_EDX_TSC */ | |
116 /* 0x00000020 duplicates CPUID_EDX_MSR */ | |
117 /* 0x00000040 duplicates CPUID_EDX_PAE */ | |
118 /* 0x00000080 duplicates CPUID_EDX_MCE */ | |
119 /* 0x00000100 duplicates CPUID_EDX_CX8 */ | |
120 /* 0000000200 duplicates CPUID_EDX_APIC */ | |
121 /* 0x00000400 reserved */ | |
122 #define CPUID_EDX_SYSCALL 0x00000800 /* SYSCALL/SYSRET instructions */ | |
123 /* 0x00001000 duplicates CPUID_EDX_MTRR */ | |
124 /* 0x00002000 duplicates CPUID_EDX_PGE */ | |
125 /* 0x00004000 duplicates CPUID_EDX_MCA */ | |
126 /* 0x00008000 duplicates CPUID_EDX_CMOV */ | |
127 /* 0x00010000 duplicates CPUID_EDX_PAT */ | |
128 /* 0x00020000 duplicates CPUID_EDX_PSE36 */ | |
129 /* 0x00040000 reserved */ | |
130 /* 0x00080000 reserved */ | |
131 #define CPUID_EDX_NX 0x00100000 /* Execute Disable Bit available */ | |
132 /* 0x00200000 reserved */ | |
133 #define CPUID_EDX_EMMX 0x00400000 /* Extensions to MMX instructions */ | |
134 /* 0x00800000 duplicates CPUID_EDX_MMX */ | |
135 /* 0x01000000 duplicates CPUID_EDX_FXSR */ | |
136 #define CPUID_EDX_FFFXSR 0x02000000 /* FXSAVE/FXRSTOR optimizations */ | |
137 #define CPUID_EDX_1GBPAGES 0x04000000 /* 1-GB large page support */ | |
138 #define CPUID_EDX_TSCP 0x08000000 /* RDTSCP instruction */ | |
139 /* 0x10000000 reserved */ | |
140 #define CPUID_EDX_LM 0x20000000 /* Longmode (AKA x86-64 mode) */ | |
141 #define CPUID_EDX_E3DN 0x40000000 /* Extensions to 3DNow! instructions */ | |
142 #define CPUID_EDX_3DN 0x80000000 /* 3DNow! instructions */ | |
143 | |
144 #define CPUID_ECX_LAHF 0x00000001 /* LAHF/SAHF in x86-64 mode */ | |
145 #define CPUID_ECX_CMPLEGACY 0x00000002 /* Core Multi-Processing legacy mode */ | |
146 #define CPUID_ECX_SVM 0x00000004 /* Secure Virtual Machine */ | |
147 #define CPUID_ECX_EXTAPIC 0x00000008 /* Extended APIC space */ | |
148 #define CPUID_ECX_ALTMOVCR8 0x00000010 /* LOCK MOV CR0 means MOV CR8 */ | |
149 #define CPUID_ECX_ABM 0x00000020 /* LZCNT instruction */ | |
150 #define CPUID_ECX_SSE4A 0x00000040 /* SSRE4A instructions */ | |
151 #define CPUID_ECX_MISALGNSSE 0x00000080 /* Misalign SSE mode */ | |
152 #define CPUID_ECX_PRE 0x00000100 /* 3DNow! prefetch */ | |
153 #define CPUID_ECX_OSVW 0x00000200 /* OS visible workaround */ | |
154 #define CPUID_ECX_IBS 0x00000400 /* Instruction Based Sampling */ | |
155 #define CPUID_ECX_XOP 0x00000800 /* XOP instructions */ | |
156 #define CPUID_ECX_SKINIT 0x00001000 /* SKINIT/STGI are always supported */ | |
157 #define CPUID_ECX_WDT 0x00002000 /* Watchdog timer support */ | |
158 /* 0x00004000 reserved */ | |
159 #define CPUID_ECX_LWP 0x00008000 /* Lightweight profiling support */ | |
160 #define CPUID_ECX_FMA4 0x00010000 /* FMA4 instructions */ | |
161 /* 0x00020000 reserved */ | |
162 /* 0x00040000 reserved */ | |
163 #define CPUID_ECX_NODEID 0x00080000 /* MSRC001_100C[NodeId, NodesPerCPU] */ | |
164 /* 0x00100000 reserved */ | |
165 #define CPUID_ECX_TBM 0x00200000 /* Trailing bit manipulations */ | |
166 #define CPUID_ECX_TOPOLOGY 0x00400000 /* Topology extensions support */ | |
167 /* 0xFF800000 reserved */ | |
168 | |
169 | |
170 typedef enum { | |
171 CFReg_EAX_I=0, /* eax == 1 */ | |
172 CFReg_EBX_I, /* eax == 1 */ | |
173 CFReg_ECX_I, /* eax == 1 */ | |
174 CFReg_EDX_I, /* eax == 1 */ | |
175 CFReg_EAX_7, /* eax == 7; ecx == 0 */ | |
176 CFReg_EBX_7, /* eax == 7; ecx == 0 */ | |
177 CFReg_ECX_7, /* eax == 7; ecx == 0 */ | |
178 CFReg_EDX_7, /* eax == 7; ecx == 0 */ | |
179 CFReg_EAX_A, /* eax == 0x80000001 */ | |
180 CFReg_EBX_A, /* eax == 0x80000001 */ | |
181 CFReg_ECX_A, /* eax == 0x80000001 */ | |
182 CFReg_EDX_A /* eax == 0x80000001 */ | |
183 } CPUFeatureReg; | |
184 | |
185 typedef struct cpufeature { | |
186 CPUFeatureReg reg; | |
187 uint32_t mask; | |
188 const char *name; | |
189 } CPUFeature; | |
190 | |
191 static const CPUFeature CPUFeatureDescriptions[(int)NaClCPUFeatureX86_Max] = { | |
192 {0, 0, "CPUID supported"}, | |
193 {0, 0, "CPU supported"}, | |
194 {CFReg_EDX_A, CPUID_EDX_3DN, "3DNow"}, | |
195 {CFReg_ECX_I, CPUID_ECX_AES, "AES"}, | |
196 {CFReg_ECX_I, CPUID_ECX_AVX, "AVX"}, | |
197 {CFReg_EBX_7, CPUID_EBX_BMI1, "BMI1"}, | |
198 {CFReg_EDX_I, CPUID_EDX_CLFLUSH, "CLFLUSH"}, | |
199 {CFReg_ECX_I, CPUID_ECX_CLMUL, "CLMUL"}, | |
200 {CFReg_EDX_I, CPUID_EDX_CMOV, "CMOV"}, | |
201 {CFReg_ECX_I, CPUID_ECX_CX16, "CMPXCHG16B"}, | |
202 {CFReg_EDX_I, CPUID_EDX_CX8, "CMPXCHG8B"}, | |
203 {CFReg_EDX_A, CPUID_EDX_E3DN, "E3DNow"}, | |
204 {CFReg_EDX_A, CPUID_EDX_EMMX, "EMMX"}, | |
205 {CFReg_ECX_I, CPUID_ECX_F16C, "F16C"}, | |
206 {CFReg_ECX_I, CPUID_ECX_FMA, "FMA"}, | |
207 {CFReg_ECX_A, CPUID_ECX_FMA4, "FMA4"}, | |
208 {CFReg_EDX_I, CPUID_EDX_FXSR, "FXSAVE/FXRSTOR"}, | |
209 {CFReg_EDX_A, CPUID_ECX_LAHF, "LAHF"}, | |
210 {CFReg_EDX_A, CPUID_EDX_LM, "LongMode"}, | |
211 {CFReg_ECX_A, CPUID_ECX_LWP, "LWP"}, | |
212 {CFReg_ECX_A, CPUID_ECX_ABM, "LZCNT"}, | |
213 {CFReg_EDX_I, CPUID_EDX_MMX, "MMX"}, | |
214 {CFReg_ECX_I, CPUID_ECX_MON, "MONITOR/MWAIT"}, | |
215 {CFReg_ECX_I, CPUID_ECX_MOVBE, "MOVBE"}, | |
216 {CFReg_ECX_I, CPUID_ECX_OSXSAVE, "OSXSAVE"}, | |
217 {CFReg_ECX_I, CPUID_ECX_POPCNT, "POPCNT"}, | |
218 {CFReg_ECX_A, CPUID_ECX_PRE, "3DNowPrefetch"}, | |
219 {CFReg_EDX_I, CPUID_EDX_SSE, "SSE"}, | |
220 {CFReg_EDX_I, CPUID_EDX_SSE2, "SSE2"}, | |
221 {CFReg_ECX_I, CPUID_ECX_SSE3, "SSE3"}, | |
222 {CFReg_ECX_I, CPUID_ECX_SSE41, "SSE41"}, | |
223 {CFReg_ECX_I, CPUID_ECX_SSE42, "SSE42"}, | |
224 {CFReg_ECX_A, CPUID_ECX_SSE4A, "SSE4A"}, | |
225 {CFReg_ECX_I, CPUID_ECX_SSSE3, "SSSE3"}, | |
226 {CFReg_ECX_A, CPUID_ECX_TBM, "TBM"}, | |
227 {CFReg_EDX_I, CPUID_EDX_TSC, "RDTSC"}, | |
228 {CFReg_EDX_I, CPUID_EDX_x87, "x87"}, | |
229 {CFReg_ECX_A, CPUID_ECX_XOP, "XOP"}, | |
230 }; | |
231 | |
232 #define /* static const int */ kVendorIDLength 13 | |
233 static const char Intel_CPUID0[kVendorIDLength] = "GenuineIntel"; | |
234 static const char AMD_CPUID0[kVendorIDLength] = "AuthenticAMD"; | |
235 #ifdef NOTYET | |
236 static const char UMC_CPUID0[kVendorIDLength] = "UMC UMC UMC "; | |
237 static const char Cyrix_CPUID0[kVendorIDLength] = "CyrixInstead"; | |
238 static const char NexGen_CPUID0[kVendorIDLength] = "NexGenDriven"; | |
239 static const char Cantaur_CPUID0[kVendorIDLength] = "CentaurHauls"; | |
240 static const char Rise_CPUID0[kVendorIDLength] = "RiseRiseRise"; | |
241 static const char SiS_CPUID0[kVendorIDLength] = "SiS SiS SiS "; | |
242 static const char TM_CPUID0[kVendorIDLength] = "GenuineTMx86"; | |
243 static const char NSC_CPUID0[kVendorIDLength] = "Geode by NSC"; | |
244 #endif | |
245 | |
246 static int asm_HasCPUID(void) { | |
247 volatile int before, after, result; | |
248 #if NACL_BUILD_SUBARCH == 64 | |
249 /* Note: If we are running in x86-64, then cpuid must be defined, | |
250 * since CPUID dates from DX2 486, and x86-64 was added after this. | |
251 */ | |
252 return 1; | |
253 /* TODO(bradchen): split into separate Windows, etc., files */ | |
254 #elif defined(__GNUC__) | |
255 __asm__ volatile("pushfl \n\t" /* save EFLAGS to eax */ | |
256 "pop %%eax \n\t" | |
257 "movl %%eax, %0 \n\t" /* remember EFLAGS in %0 */ | |
258 "xor $0x00200000, %%eax\n\t" /* toggle bit 21 */ | |
259 "push %%eax \n\t" /* write eax to EFLAGS */ | |
260 "popfl \n\t" | |
261 "pushfl \n\t" /* save EFLAGS to %1 */ | |
262 "pop %1 \n\t" | |
263 /* | |
264 * We use "r" constraints here, forcing registers, | |
265 * because a memory reference using the stack | |
266 * pointer wouldn't be safe since we're moving the | |
267 * stack pointer around in between the | |
268 * instructions. We need to inform the compiler | |
269 * that we're clobbering %eax as a scratch register. | |
270 */ | |
271 : "=r" (before), "=r" (after) : : "eax"); | |
272 #elif NACL_WINDOWS | |
273 __asm { | |
274 pushfd | |
275 pop eax | |
276 mov before, eax | |
277 xor eax, 0x00200000 | |
278 push eax | |
279 popfd | |
280 pushfd | |
281 pop after | |
282 } | |
283 #else | |
284 # error Unsupported platform | |
285 #endif | |
286 result = (before ^ after) & 0x0200000; | |
287 return result; | |
288 } | |
289 | |
290 static void asm_CPUID(uint32_t op, volatile uint32_t reg[4]) { | |
291 #if defined(__GNUC__) | |
292 #if NACL_BUILD_SUBARCH == 64 | |
293 __asm__ volatile("push %%rbx \n\t" /* save %ebx */ | |
294 #else | |
295 __asm__ volatile("pushl %%ebx \n\t" | |
296 #endif | |
297 "cpuid \n\t" | |
298 "movl %%ebx, %1 \n\t" | |
299 /* save what cpuid just put in %ebx */ | |
300 #if NACL_BUILD_SUBARCH == 64 | |
301 "pop %%rbx \n\t" | |
302 #else | |
303 "popl %%ebx \n\t" /* restore the old %ebx */ | |
304 #endif | |
305 : "=a"(reg[0]), "=S"(reg[1]), "=c"(reg[2]), "=d"(reg[3]) | |
306 : "a"(op) | |
307 : "cc"); | |
308 #elif NACL_WINDOWS | |
309 __cpuid((uint32_t*)reg, op); | |
310 #else | |
311 # error Unsupported platform | |
312 #endif | |
313 } | |
314 | |
315 /* | |
316 * Historically CPUID only used eax to select "CPUID function". Function 07h | |
317 * broke this tradition: it's now required to specify "leaf" in ECX register. | |
318 * | |
319 * We can specify leaf in all cases (older "CPUID functions" will just ignore | |
320 * it), but there is a catch: MSVC 2005 or below don't include __cpuidex | |
321 * intrinsic required to call CPUID with leaf support! | |
322 * | |
323 * Thus we have two functions: asm_CPUID (for "CPUID functions" without leaves) | |
324 * and asm_CPUIDx (for "CPUID functions" with leaves). If code is compiled using | |
325 * MSVC 2005 or MSVC 2008 then features detected using function 07h will not be | |
326 * available. | |
327 * | |
328 * Note: MSVC 2008 is particularly problematic: MSVC 2008 does not support | |
329 * __cpuidex while MSVC 2008 SP1 does. Unfortunatelly there are no easy way | |
330 * to distinguish MSVC 2008 SP1 from MSVC 2008 using ifdef's thus we disable | |
331 * __cpuidex for MSVC 2008 unconditionally. | |
332 */ | |
333 static void asm_CPUIDx(uint32_t op, volatile uint32_t reg[4], uint32_t ecx) { | |
334 #if defined(__GNUC__) | |
335 #if NACL_BUILD_SUBARCH == 64 | |
336 __asm__ volatile("push %%rbx \n\t" /* save %ebx */ | |
337 #else | |
338 __asm__ volatile("pushl %%ebx \n\t" | |
339 #endif | |
340 "cpuid \n\t" | |
341 "movl %%ebx, %1 \n\t" | |
342 /* save what cpuid just put in %ebx */ | |
343 #if NACL_BUILD_SUBARCH == 64 | |
344 "pop %%rbx \n\t" | |
345 #else | |
346 "popl %%ebx \n\t" /* restore the old %ebx */ | |
347 #endif | |
348 : "=a"(reg[0]), "=S"(reg[1]), "=c"(reg[2]), "=d"(reg[3]) | |
349 : "a"(op), "c"(ecx) | |
350 : "cc"); | |
351 #elif NACL_WINDOWS | |
352 #ifdef _MSC_VER | |
353 #if _MSC_VER < 1600 | |
354 reg[0] = 0; | |
355 reg[1] = 0; | |
356 reg[2] = 0; | |
357 reg[3] = 0; | |
358 #else | |
359 __cpuidex((uint32_t*)reg, op, ecx); | |
360 #endif | |
361 #else /* NACL_WINDOWS, but _MSC_VER is not defined */ | |
362 /* This is Windows but not MSVC: who knows if __cpuidex is available? */ | |
363 # error Unsupported compiler | |
364 #endif | |
365 #else | |
366 # error Unsupported platform | |
367 #endif | |
368 } | |
369 | |
370 static void CacheCPUVersionID(NaClCPUData* data) { | |
371 uint32_t reg[4] = {0, 0, 0, 0 }; | |
372 asm_CPUID(0, reg); | |
373 data->_vidwords[0] = reg[1]; | |
374 data->_vidwords[1] = reg[3]; | |
375 data->_vidwords[2] = reg[2]; | |
376 data->_vidwords[3] = 0; | |
377 } | |
378 | |
379 /* Defines the (cached) cpu version id */ | |
380 #define CPUVersionID(data) ((char*) (data)->_vidwords) | |
381 | |
382 | |
383 /* Cache feature vector as array of uint32_t [ecx, edx] */ | |
384 static void CacheCPUFeatureVector(NaClCPUData* data) { | |
385 int i; | |
386 for (i = 0; i < kMaxCPUFeatureReg; ++i) { | |
387 data->_featurev[i] = 0; | |
388 } | |
389 asm_CPUID(1, data->_featurev); | |
390 /* This is for for model-specific instructions from AMD and Intel after | |
391 AMD's Bulldozer (introduced BMI1 set) and Intel's Haswell (introduced | |
392 AVX2, BMI2, HLE, RTM, and RD/WR FS/GS BASE instructions). */ | |
393 asm_CPUIDx(7, &data->_featurev[CFReg_EAX_7], 0); | |
394 /* this is for AMD CPUs */ | |
395 asm_CPUID(0x80000001, &data->_featurev[CFReg_EAX_A]); | |
396 #if 0 | |
397 /* print feature vector */ | |
398 printf("CPUID: %08x %08x %08x %08x\n", | |
399 data->_featurev[0], | |
400 data->_featurev[1], | |
401 data->_featurev[2], | |
402 data->_featurev[3]); | |
403 printf("CPUID: %08x %08x %08x %08x\n", | |
404 data->_featurev[4], | |
405 data->_featurev[5], | |
406 data->_featurev[6], | |
407 data->_featurev[7]); | |
408 #endif | |
409 } | |
410 | |
411 /* CacheGetCPUIDString creates an ASCII string that identfies this CPU's */ | |
412 /* vendor ID, family, model, and stepping, as per the CPUID instruction */ | |
413 static void CacheGetCPUIDString(NaClCPUData* data) { | |
414 char *cpuversionid = CPUVersionID(data); | |
415 uint32_t *fv = data->_featurev; | |
416 char* wlid = data->_wlid; | |
417 /* Subtract 1 in this assert to avoid counting two null characters. */ | |
418 assert(9 + kVendorIDLength - 1 == kCPUIDStringLength); | |
419 memcpy(wlid, cpuversionid, kVendorIDLength-1); | |
420 SNPRINTF(&(wlid[kVendorIDLength-1]), 9, "%08x", (int)fv[CFReg_EAX_I]); | |
421 } | |
422 | |
423 char *GetCPUIDString(NaClCPUData* data) { | |
424 return data->_wlid; | |
425 } | |
426 | |
427 /* Returns true if the given feature is defined by the CPUID. */ | |
428 static int CheckCPUFeature(NaClCPUData* data, NaClCPUFeatureX86ID fid) { | |
429 const CPUFeature *f = &CPUFeatureDescriptions[fid]; | |
430 uint32_t *fv = data->_featurev; | |
431 if ((fid == NaClCPUFeatureX86_CPUIDSupported) || | |
432 (fid == NaClCPUFeatureX86_CPUSupported)) { | |
433 /* CPUIDSupported and CPUSupported aren't actually in CPUID, | |
434 CPUFeatureDescriptions therefore doesn't contain actual reg/mask for | |
435 them. */ | |
436 return 1; | |
437 } | |
438 if (fv[f->reg] & f->mask) { | |
439 return 1; | |
440 } else { | |
441 return 0; | |
442 } | |
443 } | |
444 | |
445 uint64_t NaClXGETBV(uint32_t); | |
446 | |
447 /* Cache XCR vector */ | |
448 static void CacheCPUXCRVector(NaClCPUData* data) { | |
449 if (CheckCPUFeature(data, NaClCPUFeatureX86_OSXSAVE)) { | |
450 int i; | |
451 for (i = 0; i < kMaxCPUXCRReg; ++i) { | |
452 data->_xcrv[i] = NaClXGETBV(i); | |
453 } | |
454 } else { | |
455 int i; | |
456 for (i = 0; i < kMaxCPUXCRReg; ++i) { | |
457 data->_xcrv[i] = 0; | |
458 } | |
459 } | |
460 } | |
461 | |
462 /* Check that we have a supported 386 architecture. NOTE: | |
463 * As as side effect, the given cpu features is cleared before | |
464 * setting the appropriate fields. | |
465 */ | |
466 static void CheckNaClArchFeatures(NaClCPUData *data, | |
467 NaClCPUFeaturesX86 *features) { | |
468 const size_t kCPUID0Length = 12; | |
469 char *cpuversionid; | |
470 if (data->_has_CPUID) { | |
471 NaClSetCPUFeatureX86(features, NaClCPUFeatureX86_CPUIDSupported, 1); | |
472 } | |
473 cpuversionid = CPUVersionID(data); | |
474 if (strncmp(cpuversionid, Intel_CPUID0, kCPUID0Length) == 0) { | |
475 NaClSetCPUFeatureX86(features, NaClCPUFeatureX86_CPUSupported, 1); | |
476 } else if (strncmp(cpuversionid, AMD_CPUID0, kCPUID0Length) == 0) { | |
477 NaClSetCPUFeatureX86(features, NaClCPUFeatureX86_CPUSupported, 1); | |
478 } | |
479 } | |
480 | |
481 void NaClSetAllCPUFeaturesX86(NaClCPUFeatures *f) { | |
482 /* TODO(jfb) Use a safe cast in this interface. */ | |
483 NaClCPUFeaturesX86 *features = (NaClCPUFeaturesX86 *) f; | |
484 /* Be a little more pedantic than using memset because we don't know exactly | |
485 * how the structure is laid out. If we use memset, fields may be initialized | |
486 * to 0xff instead of 1 ... this isn't the end of the world but it can | |
487 * create a skew if the structure is hashed, etc. | |
488 */ | |
489 int id; | |
490 /* Ensure any padding is zeroed. */ | |
491 NaClClearCPUFeaturesX86(features); | |
492 for (id = 0; id < NaClCPUFeatureX86_Max; ++id) { | |
493 NaClSetCPUFeatureX86(features, id, 1); | |
494 } | |
495 } | |
496 | |
497 /* WARNING: This routine and subroutines it uses are not threadsafe. | |
498 * However, if races occur, they are short lived, and at worst, will | |
499 * result in defining fewer features than are actually supported by | |
500 * the hardware. Hence, if a race occurs, the validator may reject | |
501 * some features that should not be rejected. | |
502 */ | |
503 static void GetCPUFeatures(NaClCPUData* data, NaClCPUFeaturesX86 *cpuf) { | |
504 int id; | |
505 NaClClearCPUFeaturesX86(cpuf); | |
506 CheckNaClArchFeatures(data, cpuf); | |
507 if (!NaClGetCPUFeatureX86(cpuf, NaClCPUFeatureX86_CPUIDSupported)) { | |
508 return; | |
509 } | |
510 | |
511 for (id = 0; id < NaClCPUFeatureX86_Max; ++id) { | |
512 NaClSetCPUFeatureX86(cpuf, id, CheckCPUFeature(data, id)); | |
513 } | |
514 | |
515 /* | |
516 * If the operating system doesn't maintain the YMM state, | |
517 * pretend we don't have the instructions available at all. | |
518 */ | |
519 if (!(NaClGetCPUFeatureX86(cpuf, NaClCPUFeatureX86_OSXSAVE) | |
520 && (data->_xcrv[0] & 6) == 6)) { | |
521 NaClSetCPUFeatureX86(cpuf, NaClCPUFeatureX86_AVX, 0); | |
522 NaClSetCPUFeatureX86(cpuf, NaClCPUFeatureX86_F16C, 0); | |
523 NaClSetCPUFeatureX86(cpuf, NaClCPUFeatureX86_FMA, 0); | |
524 NaClSetCPUFeatureX86(cpuf, NaClCPUFeatureX86_FMA4, 0); | |
525 } | |
526 } | |
527 | |
528 void NaClCPUDataGet(NaClCPUData* data) { | |
529 data->_has_CPUID = asm_HasCPUID(); | |
530 CacheCPUVersionID(data); | |
531 CacheCPUFeatureVector(data); | |
532 CacheCPUXCRVector(data); | |
533 CacheGetCPUIDString(data); | |
534 } | |
535 | |
536 void NaClGetCurrentCPUFeaturesX86(NaClCPUFeatures *f) { | |
537 /* TODO(jfb) Use a safe cast in this interface. */ | |
538 NaClCPUFeaturesX86 *features = (NaClCPUFeaturesX86 *) f; | |
539 NaClCPUData cpu_data; | |
540 NaClCPUDataGet(&cpu_data); | |
541 GetCPUFeatures(&cpu_data, features); | |
542 } | |
543 | |
544 /* This array defines the CPU feature model for fixed-feature CPU | |
545 * mode. We currently require the same set of features for both | |
546 * 32- and 64-bit x86 CPUs, intended to be supported by most/all | |
547 * post-Pentium III CPUs. This set may be something we need to | |
548 * revisit in the future. | |
549 */ | |
550 const int kFixedFeatureCPUModel[NaClCPUFeatureX86_Max] = { | |
551 1, /* NaClCPUFeatureX86_CPUIDSupported */ | |
552 1, /* NaClCPUFeatureX86_CPUSupported */ | |
553 0, /* NaClCPUFeatureX86_3DNOW */ /* AMD-specific */ | |
554 0, /* NaClCPUFeatureX86_AES */ | |
555 0, /* NaClCPUFeatureX86_AVX */ | |
556 0, /* NaClCPUFeatureX86_BMI1 */ | |
557 1, /* NaClCPUFeatureX86_CLFLUSH */ | |
558 0, /* NaClCPUFeatureX86_CLMUL */ | |
559 1, /* NaClCPUFeatureX86_CMOV */ | |
560 1, /* NaClCPUFeatureX86_CX16 */ | |
561 1, /* NaClCPUFeatureX86_CX8 */ | |
562 0, /* NaClCPUFeatureX86_E3DNOW */ /* AMD-specific */ | |
563 0, /* NaClCPUFeatureX86_EMMX */ /* AMD-specific */ | |
564 0, /* NaClCPUFeatureX86_F16C */ | |
565 0, /* NaClCPUFeatureX86_FMA */ | |
566 0, /* NaClCPUFeatureX86_FMA4 */ /* AMD-specific */ | |
567 1, /* NaClCPUFeatureX86_FXSR */ | |
568 0, /* NaClCPUFeatureX86_LAHF */ | |
569 0, /* NaClCPUFeatureX86_LM */ | |
570 0, /* NaClCPUFeatureX86_LWP */ /* AMD-specific */ | |
571 0, /* NaClCPUFeatureX86_LZCNT */ /* AMD-specific */ | |
572 0, /* NaClCPUFeatureX86_MMX */ | |
573 0, /* NaClCPUFeatureX86_MON */ | |
574 0, /* NaClCPUFeatureX86_MOVBE */ | |
575 0, /* NaClCPUFeatureX86_OSXSAVE */ | |
576 0, /* NaClCPUFeatureX86_POPCNT */ | |
577 0, /* NaClCPUFeatureX86_PRE */ /* AMD-specific */ | |
578 1, /* NaClCPUFeatureX86_SSE */ | |
579 1, /* NaClCPUFeatureX86_SSE2 */ | |
580 1, /* NaClCPUFeatureX86_SSE3 */ | |
581 0, /* NaClCPUFeatureX86_SSE41 */ | |
582 0, /* NaClCPUFeatureX86_SSE42 */ | |
583 0, /* NaClCPUFeatureX86_SSE4A */ /* AMD-specific */ | |
584 0, /* NaClCPUFeatureX86_SSSE3 */ | |
585 0, /* NaClCPUFeatureX86_TBM */ /* AMD-specific */ | |
586 1, /* NaClCPUFeatureX86_TSC */ | |
587 0, /* NaClCPUFeatureX86_x87 */ | |
588 0 /* NaClCPUFeatureX86_XOP */ /* AMD-specific */ | |
589 }; | |
590 | |
591 int NaClFixCPUFeaturesX86(NaClCPUFeatures *f) { | |
592 /* TODO(jfb) Use a safe cast in this interface. */ | |
593 NaClCPUFeaturesX86 *features = (NaClCPUFeaturesX86 *) f; | |
594 NaClCPUFeatureX86ID fid; | |
595 int rvalue = 1; | |
596 | |
597 for (fid = 0; fid < NaClCPUFeatureX86_Max; fid++) { | |
598 if (kFixedFeatureCPUModel[fid]) { | |
599 if (!NaClGetCPUFeatureX86(features, fid)) { | |
600 /* This CPU is missing a required feature. */ | |
601 NaClLog(LOG_ERROR, | |
602 "This CPU is missing a feature required by fixed-mode: %s\n", | |
603 NaClGetCPUFeatureX86Name(fid)); | |
604 rvalue = 0; /* set return value to indicate failure */ | |
605 } | |
606 } else { | |
607 /* Feature is not in the fixed model. | |
608 * Ensure features does not have it either. | |
609 */ | |
610 NaClSetCPUFeatureX86(features, fid, 0); | |
611 } | |
612 } | |
613 return rvalue; | |
614 } | |
615 | |
616 const char* NaClGetCPUFeatureX86Name(NaClCPUFeatureX86ID id) { | |
617 return CPUFeatureDescriptions[id].name; | |
618 } | |
619 | |
620 void NaClSetCPUFeatureX86(NaClCPUFeaturesX86 *f, NaClCPUFeatureX86ID id, | |
621 int state) { | |
622 f->data[id] = (char) state; | |
623 } | |
624 | |
625 void NaClClearCPUFeaturesX86(NaClCPUFeaturesX86 *features) { | |
626 memset(features, 0, sizeof(*features)); | |
627 } | |
628 | |
629 void NaClCopyCPUFeaturesX86(NaClCPUFeaturesX86 *target, | |
630 const NaClCPUFeaturesX86 *source) { | |
631 memcpy(target, source, sizeof(*target)); | |
632 } | |
633 | |
634 int NaClArchSupportedX86(const NaClCPUFeaturesX86 *f) { | |
635 return (NaClGetCPUFeatureX86(f, NaClCPUFeatureX86_CPUIDSupported) && | |
636 NaClGetCPUFeatureX86(f, NaClCPUFeatureX86_CPUSupported)); | |
637 } | |
OLD | NEW |