OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 26 matching lines...) Expand all Loading... |
37 // the platform dependent classes could have been implemented using abstract | 37 // the platform dependent classes could have been implemented using abstract |
38 // superclasses with virtual methods and having specializations for each | 38 // superclasses with virtual methods and having specializations for each |
39 // platform. This design was rejected because it was more complicated and | 39 // platform. This design was rejected because it was more complicated and |
40 // slower. It would require factory methods for selecting the right | 40 // slower. It would require factory methods for selecting the right |
41 // implementation and the overhead of virtual methods for performance | 41 // implementation and the overhead of virtual methods for performance |
42 // sensitive like mutex locking/unlocking. | 42 // sensitive like mutex locking/unlocking. |
43 | 43 |
44 #ifndef V8_PLATFORM_H_ | 44 #ifndef V8_PLATFORM_H_ |
45 #define V8_PLATFORM_H_ | 45 #define V8_PLATFORM_H_ |
46 | 46 |
| 47 #include <stdarg.h> |
| 48 |
47 #ifdef __sun | 49 #ifdef __sun |
48 # ifndef signbit | 50 # ifndef signbit |
49 namespace std { | 51 namespace std { |
50 int signbit(double x); | 52 int signbit(double x); |
51 } | 53 } |
52 # endif | 54 # endif |
53 #endif | 55 #endif |
54 | 56 |
55 // GCC specific stuff | |
56 #ifdef __GNUC__ | |
57 | |
58 // Needed for va_list on at least MinGW and Android. | |
59 #include <stdarg.h> | |
60 | |
61 #define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) | |
62 | |
63 #endif // __GNUC__ | |
64 | |
65 | 57 |
66 // Windows specific stuff. | 58 // Windows specific stuff. |
67 #ifdef WIN32 | 59 #ifdef WIN32 |
68 | 60 |
69 // Microsoft Visual C++ specific stuff. | 61 // Microsoft Visual C++ specific stuff. |
70 #ifdef _MSC_VER | 62 #ifdef _MSC_VER |
71 | 63 |
72 #include "win32-headers.h" | 64 #include "win32-headers.h" |
73 #include "win32-math.h" | 65 #include "win32-math.h" |
74 | 66 |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
125 void lazily_initialize_fast_exp(); | 117 void lazily_initialize_fast_exp(); |
126 | 118 |
127 // Forward declarations. | 119 // Forward declarations. |
128 class Socket; | 120 class Socket; |
129 | 121 |
130 // ---------------------------------------------------------------------------- | 122 // ---------------------------------------------------------------------------- |
131 // Fast TLS support | 123 // Fast TLS support |
132 | 124 |
133 #ifndef V8_NO_FAST_TLS | 125 #ifndef V8_NO_FAST_TLS |
134 | 126 |
135 #if defined(_MSC_VER) && V8_HOST_ARCH_IA32 | 127 #if V8_CC_MSVC && V8_HOST_ARCH_IA32 |
136 | 128 |
137 #define V8_FAST_TLS_SUPPORTED 1 | 129 #define V8_FAST_TLS_SUPPORTED 1 |
138 | 130 |
139 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); | 131 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); |
140 | 132 |
141 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { | 133 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { |
142 const intptr_t kTibInlineTlsOffset = 0xE10; | 134 const intptr_t kTibInlineTlsOffset = 0xE10; |
143 const intptr_t kTibExtraTlsOffset = 0xF94; | 135 const intptr_t kTibExtraTlsOffset = 0xF94; |
144 const intptr_t kMaxInlineSlots = 64; | 136 const intptr_t kMaxInlineSlots = 64; |
145 const intptr_t kMaxSlots = kMaxInlineSlots + 1024; | 137 const intptr_t kMaxSlots = kMaxInlineSlots + 1024; |
146 ASSERT(0 <= index && index < kMaxSlots); | 138 ASSERT(0 <= index && index < kMaxSlots); |
147 if (index < kMaxInlineSlots) { | 139 if (index < kMaxInlineSlots) { |
148 return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset + | 140 return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset + |
149 kPointerSize * index)); | 141 kPointerSize * index)); |
150 } | 142 } |
151 intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); | 143 intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); |
152 ASSERT(extra != 0); | 144 ASSERT(extra != 0); |
153 return *reinterpret_cast<intptr_t*>(extra + | 145 return *reinterpret_cast<intptr_t*>(extra + |
154 kPointerSize * (index - kMaxInlineSlots)); | 146 kPointerSize * (index - kMaxInlineSlots)); |
155 } | 147 } |
156 | 148 |
157 #elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) | 149 #elif V8_OS_DARWIN && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) |
158 | 150 |
159 #define V8_FAST_TLS_SUPPORTED 1 | 151 #define V8_FAST_TLS_SUPPORTED 1 |
160 | 152 |
161 extern intptr_t kMacTlsBaseOffset; | 153 extern intptr_t kMacTlsBaseOffset; |
162 | 154 |
163 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); | 155 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); |
164 | 156 |
165 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { | 157 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { |
166 intptr_t result; | 158 intptr_t result; |
167 #if V8_HOST_ARCH_IA32 | 159 #if V8_HOST_ARCH_IA32 |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
378 // EABI is used. | 370 // EABI is used. |
379 static bool ArmUsingHardFloat(); | 371 static bool ArmUsingHardFloat(); |
380 | 372 |
381 // Support runtime detection of FPU on MIPS CPUs. | 373 // Support runtime detection of FPU on MIPS CPUs. |
382 static bool MipsCpuHasFeature(CpuFeature feature); | 374 static bool MipsCpuHasFeature(CpuFeature feature); |
383 | 375 |
384 // Returns the activation frame alignment constraint or zero if | 376 // Returns the activation frame alignment constraint or zero if |
385 // the platform doesn't care. Guaranteed to be a power of two. | 377 // the platform doesn't care. Guaranteed to be a power of two. |
386 static int ActivationFrameAlignment(); | 378 static int ActivationFrameAlignment(); |
387 | 379 |
388 #if defined(V8_TARGET_ARCH_IA32) | 380 #if V8_TARGET_ARCH_IA32 |
389 // Limit below which the extra overhead of the MemCopy function is likely | 381 // Limit below which the extra overhead of the MemCopy function is likely |
390 // to outweigh the benefits of faster copying. | 382 // to outweigh the benefits of faster copying. |
391 static const int kMinComplexMemCopy = 64; | 383 static const int kMinComplexMemCopy = 64; |
392 | 384 |
393 // Copy memory area. No restrictions. | 385 // Copy memory area. No restrictions. |
394 static void MemMove(void* dest, const void* src, size_t size); | 386 static void MemMove(void* dest, const void* src, size_t size); |
395 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); | 387 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); |
396 | 388 |
397 // Keep the distinction of "move" vs. "copy" for the benefit of other | 389 // Keep the distinction of "move" vs. "copy" for the benefit of other |
398 // architectures. | 390 // architectures. |
399 static void MemCopy(void* dest, const void* src, size_t size) { | 391 static void MemCopy(void* dest, const void* src, size_t size) { |
400 MemMove(dest, src, size); | 392 MemMove(dest, src, size); |
401 } | 393 } |
402 #elif defined(V8_HOST_ARCH_ARM) | 394 #elif V8_HOST_ARCH_ARM |
403 typedef void (*MemCopyUint8Function)(uint8_t* dest, | 395 typedef void (*MemCopyUint8Function)(uint8_t* dest, |
404 const uint8_t* src, | 396 const uint8_t* src, |
405 size_t size); | 397 size_t size); |
406 static MemCopyUint8Function memcopy_uint8_function; | 398 static MemCopyUint8Function memcopy_uint8_function; |
407 static void MemCopyUint8Wrapper(uint8_t* dest, | 399 static void MemCopyUint8Wrapper(uint8_t* dest, |
408 const uint8_t* src, | 400 const uint8_t* src, |
409 size_t chars) { | 401 size_t chars) { |
410 memcpy(dest, src, chars); | 402 memcpy(dest, src, chars); |
411 } | 403 } |
412 // For values < 16, the assembler function is slower than the inlined C code. | 404 // For values < 16, the assembler function is slower than the inlined C code. |
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
817 static uint16_t HToN(uint16_t value); | 809 static uint16_t HToN(uint16_t value); |
818 static uint16_t NToH(uint16_t value); | 810 static uint16_t NToH(uint16_t value); |
819 static uint32_t HToN(uint32_t value); | 811 static uint32_t HToN(uint32_t value); |
820 static uint32_t NToH(uint32_t value); | 812 static uint32_t NToH(uint32_t value); |
821 }; | 813 }; |
822 | 814 |
823 | 815 |
824 } } // namespace v8::internal | 816 } } // namespace v8::internal |
825 | 817 |
826 #endif // V8_PLATFORM_H_ | 818 #endif // V8_PLATFORM_H_ |
OLD | NEW |