| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This module contains the platform-specific code. This make the rest of the | 5 // This module contains the platform-specific code. This make the rest of the |
| 6 // code less dependent on operating system, compilers and runtime libraries. | 6 // code less dependent on operating system, compilers and runtime libraries. |
| 7 // This module does specifically not deal with differences between different | 7 // This module does specifically not deal with differences between different |
| 8 // processor architecture. | 8 // processor architecture. |
| 9 // The platform classes have the same definition for all platforms. The | 9 // The platform classes have the same definition for all platforms. The |
| 10 // implementation for a particular platform is put in platform_<os>.cc. | 10 // implementation for a particular platform is put in platform_<os>.cc. |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 84 #define V8_FAST_TLS_SUPPORTED 1 | 84 #define V8_FAST_TLS_SUPPORTED 1 |
| 85 | 85 |
| 86 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); | 86 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index)); |
| 87 | 87 |
| 88 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { | 88 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { |
| 89 const intptr_t kTibInlineTlsOffset = 0xE10; | 89 const intptr_t kTibInlineTlsOffset = 0xE10; |
| 90 const intptr_t kTibExtraTlsOffset = 0xF94; | 90 const intptr_t kTibExtraTlsOffset = 0xF94; |
| 91 const intptr_t kMaxInlineSlots = 64; | 91 const intptr_t kMaxInlineSlots = 64; |
| 92 const intptr_t kMaxSlots = kMaxInlineSlots + 1024; | 92 const intptr_t kMaxSlots = kMaxInlineSlots + 1024; |
| 93 const intptr_t kPointerSize = sizeof(void*); | 93 const intptr_t kPointerSize = sizeof(void*); |
| 94 ASSERT(0 <= index && index < kMaxSlots); | 94 DCHECK(0 <= index && index < kMaxSlots); |
| 95 if (index < kMaxInlineSlots) { | 95 if (index < kMaxInlineSlots) { |
| 96 return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset + | 96 return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset + |
| 97 kPointerSize * index)); | 97 kPointerSize * index)); |
| 98 } | 98 } |
| 99 intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); | 99 intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset)); |
| 100 ASSERT(extra != 0); | 100 DCHECK(extra != 0); |
| 101 return *reinterpret_cast<intptr_t*>(extra + | 101 return *reinterpret_cast<intptr_t*>(extra + |
| 102 kPointerSize * (index - kMaxInlineSlots)); | 102 kPointerSize * (index - kMaxInlineSlots)); |
| 103 } | 103 } |
| 104 | 104 |
| 105 #elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) | 105 #elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) |
| 106 | 106 |
| 107 #define V8_FAST_TLS_SUPPORTED 1 | 107 #define V8_FAST_TLS_SUPPORTED 1 |
| 108 | 108 |
| 109 extern intptr_t kMacTlsBaseOffset; | 109 extern intptr_t kMacTlsBaseOffset; |
| 110 | 110 |
| (...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 344 bool IsReserved(); | 344 bool IsReserved(); |
| 345 | 345 |
| 346 // Initialize or resets an embedded VirtualMemory object. | 346 // Initialize or resets an embedded VirtualMemory object. |
| 347 void Reset(); | 347 void Reset(); |
| 348 | 348 |
| 349 // Returns the start address of the reserved memory. | 349 // Returns the start address of the reserved memory. |
| 350 // If the memory was reserved with an alignment, this address is not | 350 // If the memory was reserved with an alignment, this address is not |
| 351 // necessarily aligned. The user might need to round it up to a multiple of | 351 // necessarily aligned. The user might need to round it up to a multiple of |
| 352 // the alignment to get the start of the aligned block. | 352 // the alignment to get the start of the aligned block. |
| 353 void* address() { | 353 void* address() { |
| 354 ASSERT(IsReserved()); | 354 DCHECK(IsReserved()); |
| 355 return address_; | 355 return address_; |
| 356 } | 356 } |
| 357 | 357 |
| 358 // Returns the size of the reserved memory. The returned value is only | 358 // Returns the size of the reserved memory. The returned value is only |
| 359 // meaningful when IsReserved() returns true. | 359 // meaningful when IsReserved() returns true. |
| 360 // If the memory was reserved with an alignment, this size may be larger | 360 // If the memory was reserved with an alignment, this size may be larger |
| 361 // than the requested size. | 361 // than the requested size. |
| 362 size_t size() { return size_; } | 362 size_t size() { return size_; } |
| 363 | 363 |
| 364 // Commits real memory. Returns whether the operation succeeded. | 364 // Commits real memory. Returns whether the operation succeeded. |
| 365 bool Commit(void* address, size_t size, bool is_executable); | 365 bool Commit(void* address, size_t size, bool is_executable); |
| 366 | 366 |
| 367 // Uncommit real memory. Returns whether the operation succeeded. | 367 // Uncommit real memory. Returns whether the operation succeeded. |
| 368 bool Uncommit(void* address, size_t size); | 368 bool Uncommit(void* address, size_t size); |
| 369 | 369 |
| 370 // Creates a single guard page at the given address. | 370 // Creates a single guard page at the given address. |
| 371 bool Guard(void* address); | 371 bool Guard(void* address); |
| 372 | 372 |
| 373 void Release() { | 373 void Release() { |
| 374 ASSERT(IsReserved()); | 374 DCHECK(IsReserved()); |
| 375 // Notice: Order is important here. The VirtualMemory object might live | 375 // Notice: Order is important here. The VirtualMemory object might live |
| 376 // inside the allocated region. | 376 // inside the allocated region. |
| 377 void* address = address_; | 377 void* address = address_; |
| 378 size_t size = size_; | 378 size_t size = size_; |
| 379 Reset(); | 379 Reset(); |
| 380 bool result = ReleaseRegion(address, size); | 380 bool result = ReleaseRegion(address, size); |
| 381 USE(result); | 381 USE(result); |
| 382 ASSERT(result); | 382 DCHECK(result); |
| 383 } | 383 } |
| 384 | 384 |
| 385 // Assign control of the reserved region to a different VirtualMemory object. | 385 // Assign control of the reserved region to a different VirtualMemory object. |
| 386 // The old object is no longer functional (IsReserved() returns false). | 386 // The old object is no longer functional (IsReserved() returns false). |
| 387 void TakeControl(VirtualMemory* from) { | 387 void TakeControl(VirtualMemory* from) { |
| 388 ASSERT(!IsReserved()); | 388 DCHECK(!IsReserved()); |
| 389 address_ = from->address_; | 389 address_ = from->address_; |
| 390 size_ = from->size_; | 390 size_ = from->size_; |
| 391 from->Reset(); | 391 from->Reset(); |
| 392 } | 392 } |
| 393 | 393 |
| 394 static void* ReserveRegion(size_t size); | 394 static void* ReserveRegion(size_t size); |
| 395 | 395 |
| 396 static bool CommitRegion(void* base, size_t size, bool is_executable); | 396 static bool CommitRegion(void* base, size_t size, bool is_executable); |
| 397 | 397 |
| 398 static bool UncommitRegion(void* base, size_t size); | 398 static bool UncommitRegion(void* base, size_t size); |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 477 SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value))); | 477 SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value))); |
| 478 } | 478 } |
| 479 static bool HasThreadLocal(LocalStorageKey key) { | 479 static bool HasThreadLocal(LocalStorageKey key) { |
| 480 return GetThreadLocal(key) != NULL; | 480 return GetThreadLocal(key) != NULL; |
| 481 } | 481 } |
| 482 | 482 |
| 483 #ifdef V8_FAST_TLS_SUPPORTED | 483 #ifdef V8_FAST_TLS_SUPPORTED |
| 484 static inline void* GetExistingThreadLocal(LocalStorageKey key) { | 484 static inline void* GetExistingThreadLocal(LocalStorageKey key) { |
| 485 void* result = reinterpret_cast<void*>( | 485 void* result = reinterpret_cast<void*>( |
| 486 InternalGetExistingThreadLocal(static_cast<intptr_t>(key))); | 486 InternalGetExistingThreadLocal(static_cast<intptr_t>(key))); |
| 487 ASSERT(result == GetThreadLocal(key)); | 487 DCHECK(result == GetThreadLocal(key)); |
| 488 return result; | 488 return result; |
| 489 } | 489 } |
| 490 #else | 490 #else |
| 491 static inline void* GetExistingThreadLocal(LocalStorageKey key) { | 491 static inline void* GetExistingThreadLocal(LocalStorageKey key) { |
| 492 return GetThreadLocal(key); | 492 return GetThreadLocal(key); |
| 493 } | 493 } |
| 494 #endif | 494 #endif |
| 495 | 495 |
| 496 // A hint to the scheduler to let another thread run. | 496 // A hint to the scheduler to let another thread run. |
| 497 static void YieldCPU(); | 497 static void YieldCPU(); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 517 char name_[kMaxThreadNameLength]; | 517 char name_[kMaxThreadNameLength]; |
| 518 int stack_size_; | 518 int stack_size_; |
| 519 Semaphore* start_semaphore_; | 519 Semaphore* start_semaphore_; |
| 520 | 520 |
| 521 DISALLOW_COPY_AND_ASSIGN(Thread); | 521 DISALLOW_COPY_AND_ASSIGN(Thread); |
| 522 }; | 522 }; |
| 523 | 523 |
| 524 } } // namespace v8::base | 524 } } // namespace v8::base |
| 525 | 525 |
| 526 #endif // V8_BASE_PLATFORM_PLATFORM_H_ | 526 #endif // V8_BASE_PLATFORM_PLATFORM_H_ |
| OLD | NEW |