OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 26 matching lines...) Expand all Loading... |
37 // the platform dependent classes could have been implemented using abstract | 37 // the platform dependent classes could have been implemented using abstract |
38 // superclasses with virtual methods and having specializations for each | 38 // superclasses with virtual methods and having specializations for each |
39 // platform. This design was rejected because it was more complicated and | 39 // platform. This design was rejected because it was more complicated and |
40 // slower. It would require factory methods for selecting the right | 40 // slower. It would require factory methods for selecting the right |
41 // implementation and the overhead of virtual methods for performance | 41 // implementation and the overhead of virtual methods for performance |
42 // sensitive like mutex locking/unlocking. | 42 // sensitive like mutex locking/unlocking. |
43 | 43 |
44 #ifndef V8_PLATFORM_H_ | 44 #ifndef V8_PLATFORM_H_ |
45 #define V8_PLATFORM_H_ | 45 #define V8_PLATFORM_H_ |
46 | 46 |
47 #include <stdarg.h> | |
48 | |
49 #ifdef __sun | 47 #ifdef __sun |
50 # ifndef signbit | 48 # ifndef signbit |
51 namespace std { | 49 namespace std { |
52 int signbit(double x); | 50 int signbit(double x); |
53 } | 51 } |
54 # endif | 52 # endif |
55 #endif | 53 #endif |
56 | 54 |
| 55 // GCC specific stuff |
| 56 #ifdef __GNUC__ |
| 57 |
| 58 // Needed for va_list on at least MinGW and Android. |
| 59 #include <stdarg.h> |
| 60 |
| 61 #define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) |
| 62 |
| 63 #endif // __GNUC__ |
| 64 |
57 | 65 |
58 // Windows specific stuff. | 66 // Windows specific stuff. |
59 #ifdef WIN32 | 67 #ifdef WIN32 |
60 | 68 |
61 // Microsoft Visual C++ specific stuff. | 69 // Microsoft Visual C++ specific stuff. |
62 #ifdef _MSC_VER | 70 #ifdef _MSC_VER |
63 | 71 |
64 #include "win32-headers.h" | 72 #include "win32-headers.h" |
65 #include "win32-math.h" | 73 #include "win32-math.h" |
66 | 74 |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
117 void lazily_initialize_fast_exp(); | 125 void lazily_initialize_fast_exp(); |
118 | 126 |
119 // Forward declarations. | 127 // Forward declarations. |
120 class Socket; | 128 class Socket; |
121 | 129 |
122 // ---------------------------------------------------------------------------- | 130 // ---------------------------------------------------------------------------- |
123 // Fast TLS support | 131 // Fast TLS support |
124 | 132 |
125 #ifndef V8_NO_FAST_TLS | 133 #ifndef V8_NO_FAST_TLS |
126 | 134 |
127 #if V8_CC_MSVC && V8_HOST_ARCH_IA32 | 135 #if defined(_MSC_VER) && V8_HOST_ARCH_IA32 |
128 | 136 |
129 #define V8_FAST_TLS_SUPPORTED 1 | 137 #define V8_FAST_TLS_SUPPORTED 1 |
130 | 138 |
131 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); | 139 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); |
132 | 140 |
133 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { | 141 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { |
134 const intptr_t kTibInlineTlsOffset = 0xE10; | 142 const intptr_t kTibInlineTlsOffset = 0xE10; |
135 const intptr_t kTibExtraTlsOffset = 0xF94; | 143 const intptr_t kTibExtraTlsOffset = 0xF94; |
136 const intptr_t kMaxInlineSlots = 64; | 144 const intptr_t kMaxInlineSlots = 64; |
137 const intptr_t kMaxSlots = kMaxInlineSlots + 1024; | 145 const intptr_t kMaxSlots = kMaxInlineSlots + 1024; |
138 ASSERT(0 <= index && index < kMaxSlots); | 146 ASSERT(0 <= index && index < kMaxSlots); |
139 if (index < kMaxInlineSlots) { | 147 if (index < kMaxInlineSlots) { |
140 return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset + | 148 return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset + |
141 kPointerSize * index)); | 149 kPointerSize * index)); |
142 } | 150 } |
143 intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); | 151 intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); |
144 ASSERT(extra != 0); | 152 ASSERT(extra != 0); |
145 return *reinterpret_cast<intptr_t*>(extra + | 153 return *reinterpret_cast<intptr_t*>(extra + |
146 kPointerSize * (index - kMaxInlineSlots)); | 154 kPointerSize * (index - kMaxInlineSlots)); |
147 } | 155 } |
148 | 156 |
149 #elif V8_OS_DARWIN && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) | 157 #elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) |
150 | 158 |
151 #define V8_FAST_TLS_SUPPORTED 1 | 159 #define V8_FAST_TLS_SUPPORTED 1 |
152 | 160 |
153 extern intptr_t kMacTlsBaseOffset; | 161 extern intptr_t kMacTlsBaseOffset; |
154 | 162 |
155 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); | 163 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); |
156 | 164 |
157 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { | 165 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { |
158 intptr_t result; | 166 intptr_t result; |
159 #if V8_HOST_ARCH_IA32 | 167 #if V8_HOST_ARCH_IA32 |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
370 // EABI is used. | 378 // EABI is used. |
371 static bool ArmUsingHardFloat(); | 379 static bool ArmUsingHardFloat(); |
372 | 380 |
373 // Support runtime detection of FPU on MIPS CPUs. | 381 // Support runtime detection of FPU on MIPS CPUs. |
374 static bool MipsCpuHasFeature(CpuFeature feature); | 382 static bool MipsCpuHasFeature(CpuFeature feature); |
375 | 383 |
376 // Returns the activation frame alignment constraint or zero if | 384 // Returns the activation frame alignment constraint or zero if |
377 // the platform doesn't care. Guaranteed to be a power of two. | 385 // the platform doesn't care. Guaranteed to be a power of two. |
378 static int ActivationFrameAlignment(); | 386 static int ActivationFrameAlignment(); |
379 | 387 |
380 #if V8_TARGET_ARCH_IA32 | 388 #if defined(V8_TARGET_ARCH_IA32) |
381 // Limit below which the extra overhead of the MemCopy function is likely | 389 // Limit below which the extra overhead of the MemCopy function is likely |
382 // to outweigh the benefits of faster copying. | 390 // to outweigh the benefits of faster copying. |
383 static const int kMinComplexMemCopy = 64; | 391 static const int kMinComplexMemCopy = 64; |
384 | 392 |
385 // Copy memory area. No restrictions. | 393 // Copy memory area. No restrictions. |
386 static void MemMove(void* dest, const void* src, size_t size); | 394 static void MemMove(void* dest, const void* src, size_t size); |
387 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); | 395 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); |
388 | 396 |
389 // Keep the distinction of "move" vs. "copy" for the benefit of other | 397 // Keep the distinction of "move" vs. "copy" for the benefit of other |
390 // architectures. | 398 // architectures. |
391 static void MemCopy(void* dest, const void* src, size_t size) { | 399 static void MemCopy(void* dest, const void* src, size_t size) { |
392 MemMove(dest, src, size); | 400 MemMove(dest, src, size); |
393 } | 401 } |
394 #elif V8_HOST_ARCH_ARM | 402 #elif defined(V8_HOST_ARCH_ARM) |
395 typedef void (*MemCopyUint8Function)(uint8_t* dest, | 403 typedef void (*MemCopyUint8Function)(uint8_t* dest, |
396 const uint8_t* src, | 404 const uint8_t* src, |
397 size_t size); | 405 size_t size); |
398 static MemCopyUint8Function memcopy_uint8_function; | 406 static MemCopyUint8Function memcopy_uint8_function; |
399 static void MemCopyUint8Wrapper(uint8_t* dest, | 407 static void MemCopyUint8Wrapper(uint8_t* dest, |
400 const uint8_t* src, | 408 const uint8_t* src, |
401 size_t chars) { | 409 size_t chars) { |
402 memcpy(dest, src, chars); | 410 memcpy(dest, src, chars); |
403 } | 411 } |
404 // For values < 16, the assembler function is slower than the inlined C code. | 412 // For values < 16, the assembler function is slower than the inlined C code. |
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
809 static uint16_t HToN(uint16_t value); | 817 static uint16_t HToN(uint16_t value); |
810 static uint16_t NToH(uint16_t value); | 818 static uint16_t NToH(uint16_t value); |
811 static uint32_t HToN(uint32_t value); | 819 static uint32_t HToN(uint32_t value); |
812 static uint32_t NToH(uint32_t value); | 820 static uint32_t NToH(uint32_t value); |
813 }; | 821 }; |
814 | 822 |
815 | 823 |
816 } } // namespace v8::internal | 824 } } // namespace v8::internal |
817 | 825 |
818 #endif // V8_PLATFORM_H_ | 826 #endif // V8_PLATFORM_H_ |
OLD | NEW |