OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
50 #elif defined(__x86_64__) | 50 #elif defined(__x86_64__) |
51 #define cpuid(a, b, c, d, inp) \ | 51 #define cpuid(a, b, c, d, inp) \ |
52 asm("mov %%rbx, %%rdi\n" \ | 52 asm("mov %%rbx, %%rdi\n" \ |
53 "cpuid\n" \ | 53 "cpuid\n" \ |
54 "xchg %%rdi, %%rbx\n" \ | 54 "xchg %%rdi, %%rbx\n" \ |
55 : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) | 55 : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) |
56 #endif | 56 #endif |
57 | 57 |
58 #if defined(cpuid) // initialize the struct only on x86 | 58 #if defined(cpuid) // initialize the struct only on x86 |
59 | 59 |
| 60 namespace v8 { |
| 61 namespace internal { |
| 62 |
60 // Set the flags so that code will run correctly and conservatively, so even | 63 // Set the flags so that code will run correctly and conservatively, so even |
61 // if we haven't been initialized yet, we're probably single threaded, and our | 64 // if we haven't been initialized yet, we're probably single threaded, and our |
62 // default values should hopefully be pretty safe. | 65 // default values should hopefully be pretty safe. |
63 struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { | 66 struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { |
64 false, // bug can't exist before process spawns multiple threads | 67 false, // bug can't exist before process spawns multiple threads |
65 false, // no SSE2 | 68 false, // no SSE2 |
66 }; | 69 }; |
67 | 70 |
| 71 } } // namespace v8::internal |
| 72 |
| 73 namespace { |
| 74 |
68 // Initialize the AtomicOps_Internalx86CPUFeatures struct. | 75 // Initialize the AtomicOps_Internalx86CPUFeatures struct. |
69 static void AtomicOps_Internalx86CPUFeaturesInit() { | 76 void AtomicOps_Internalx86CPUFeaturesInit() { |
| 77 using v8::internal::AtomicOps_Internalx86CPUFeatures; |
| 78 |
70 uint32_t eax; | 79 uint32_t eax; |
71 uint32_t ebx; | 80 uint32_t ebx; |
72 uint32_t ecx; | 81 uint32_t ecx; |
73 uint32_t edx; | 82 uint32_t edx; |
74 | 83 |
75 // Get vendor string (issue CPUID with eax = 0) | 84 // Get vendor string (issue CPUID with eax = 0) |
76 cpuid(eax, ebx, ecx, edx, 0); | 85 cpuid(eax, ebx, ecx, edx, 0); |
77 char vendor[13]; | 86 char vendor[13]; |
78 memcpy(vendor, &ebx, 4); | 87 memcpy(vendor, &ebx, 4); |
79 memcpy(vendor + 4, &edx, 4); | 88 memcpy(vendor + 4, &edx, 4); |
(...skipping 20 matching lines...) Expand all Loading... |
100 32 <= model && model <= 63) { | 109 32 <= model && model <= 63) { |
101 AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true; | 110 AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true; |
102 } else { | 111 } else { |
103 AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; | 112 AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; |
104 } | 113 } |
105 | 114 |
106 // edx bit 26 is SSE2 which we use to tell use whether we can use mfence | 115 // edx bit 26 is SSE2 which we use to tell use whether we can use mfence |
107 AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); | 116 AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); |
108 } | 117 } |
109 | 118 |
110 namespace { | |
111 | |
112 class AtomicOpsx86Initializer { | 119 class AtomicOpsx86Initializer { |
113 public: | 120 public: |
114 AtomicOpsx86Initializer() { | 121 AtomicOpsx86Initializer() { |
115 AtomicOps_Internalx86CPUFeaturesInit(); | 122 AtomicOps_Internalx86CPUFeaturesInit(); |
116 } | 123 } |
117 }; | 124 }; |
118 | 125 |
119 // A global to get use initialized on startup via static initialization :/ | 126 // A global to get use initialized on startup via static initialization :/ |
120 AtomicOpsx86Initializer g_initer; | 127 AtomicOpsx86Initializer g_initer; |
121 | 128 |
122 } // namespace | 129 } // namespace |
123 | 130 |
124 #endif // if x86 | 131 #endif // if x86 |
125 | 132 |
126 #endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ | 133 #endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ |
OLD | NEW |