| Index: runtime/vm/thread.h
|
| diff --git a/runtime/vm/thread.h b/runtime/vm/thread.h
|
| index d4a3ea14a0f7203ff3176890bdcbe8c2f034bc81..07360ea8dcc6f431300ec52acfbc1c34355be7a5 100644
|
| --- a/runtime/vm/thread.h
|
| +++ b/runtime/vm/thread.h
|
| @@ -28,9 +28,9 @@ class Heap;
|
| class Instance;
|
| class Isolate;
|
| class Library;
|
| -class Log;
|
| class LongJumpScope;
|
| class Object;
|
| +class OSThread;
|
| class PcDescriptors;
|
| class RawBool;
|
| class RawObject;
|
| @@ -40,7 +40,6 @@ class RawString;
|
| class RuntimeEntry;
|
| class StackResource;
|
| class String;
|
| -class TimelineEventBlock;
|
| class TypeArguments;
|
| class TypeParameter;
|
| class Zone;
|
| @@ -97,16 +96,19 @@ class Zone;
|
| // a thread is allocated by EnsureInit before entering an isolate, and destroyed
|
| // automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
|
| // must currently be called manually (issue 23474).
|
| -class Thread {
|
| +class Thread : public BaseThread {
|
| public:
|
| + ~Thread();
|
| +
|
| // The currently executing thread, or NULL if not yet initialized.
|
| static Thread* Current() {
|
| - return reinterpret_cast<Thread*>(OSThread::GetThreadLocal(thread_key_));
|
| + BaseThread* thread = OSThread::GetCurrentTLS();
|
| + if (thread == NULL || thread->is_os_thread()) {
|
| + return NULL;
|
| + }
|
| + return reinterpret_cast<Thread*>(thread);
|
| }
|
|
|
| - // Initializes the current thread as a VM thread, if not already done.
|
| - static void EnsureInit();
|
| -
|
| // Makes the current thread enter 'isolate'.
|
| static void EnterIsolate(Isolate* isolate);
|
| // Makes the current thread exit its isolate.
|
| @@ -125,16 +127,14 @@ class Thread {
|
| // TODO(koda): Always run GC in separate thread.
|
| static void PrepareForGC();
|
|
|
| - // Called at VM startup.
|
| - static void InitOnceBeforeIsolate();
|
| - static void InitOnceAfterObjectAndStubCode();
|
| -
|
| - // Called at VM shutdown
|
| - static void Shutdown();
|
| - ~Thread();
|
| + // OSThread corresponding to this thread.
|
| + OSThread* os_thread() const { return os_thread_; }
|
| + void set_os_thread(OSThread* os_thread) {
|
| + os_thread_ = os_thread;
|
| + }
|
|
|
| // The topmost zone used for allocation in this thread.
|
| - Zone* zone() const { return state_.zone; }
|
| + Zone* zone() const { return zone_; }
|
|
|
| // The isolate that this thread is operating on, or NULL if none.
|
| Isolate* isolate() const { return isolate_; }
|
| @@ -150,8 +150,15 @@ class Thread {
|
| bool HasExitedDartCode() const;
|
|
|
| // The (topmost) CHA for the compilation in this thread.
|
| - CHA* cha() const;
|
| - void set_cha(CHA* value);
|
| + CHA* cha() const {
|
| + ASSERT(isolate_ != NULL);
|
| + return cha_;
|
| + }
|
| +
|
| + void set_cha(CHA* value) {
|
| + ASSERT(isolate_ != NULL);
|
| + cha_ = value;
|
| + }
|
|
|
| int32_t no_callback_scope_depth() const {
|
| return no_callback_scope_depth_;
|
| @@ -179,17 +186,17 @@ class Thread {
|
| return OFFSET_OF(Thread, store_buffer_block_);
|
| }
|
|
|
| - uword top_exit_frame_info() const { return state_.top_exit_frame_info; }
|
| + uword top_exit_frame_info() const { return top_exit_frame_info_; }
|
| static intptr_t top_exit_frame_info_offset() {
|
| - return OFFSET_OF(Thread, state_) + OFFSET_OF(State, top_exit_frame_info);
|
| + return OFFSET_OF(Thread, top_exit_frame_info_);
|
| }
|
|
|
| - StackResource* top_resource() const { return state_.top_resource; }
|
| + StackResource* top_resource() const { return top_resource_; }
|
| void set_top_resource(StackResource* value) {
|
| - state_.top_resource = value;
|
| + top_resource_ = value;
|
| }
|
| static intptr_t top_resource_offset() {
|
| - return OFFSET_OF(Thread, state_) + OFFSET_OF(State, top_resource);
|
| + return OFFSET_OF(Thread, top_resource_);
|
| }
|
|
|
| static intptr_t heap_offset() {
|
| @@ -198,7 +205,7 @@ class Thread {
|
|
|
| int32_t no_handle_scope_depth() const {
|
| #if defined(DEBUG)
|
| - return state_.no_handle_scope_depth;
|
| + return no_handle_scope_depth_;
|
| #else
|
| return 0;
|
| #endif
|
| @@ -206,21 +213,21 @@ class Thread {
|
|
|
| void IncrementNoHandleScopeDepth() {
|
| #if defined(DEBUG)
|
| - ASSERT(state_.no_handle_scope_depth < INT_MAX);
|
| - state_.no_handle_scope_depth += 1;
|
| + ASSERT(no_handle_scope_depth_ < INT_MAX);
|
| + no_handle_scope_depth_ += 1;
|
| #endif
|
| }
|
|
|
| void DecrementNoHandleScopeDepth() {
|
| #if defined(DEBUG)
|
| - ASSERT(state_.no_handle_scope_depth > 0);
|
| - state_.no_handle_scope_depth -= 1;
|
| + ASSERT(no_handle_scope_depth_ > 0);
|
| + no_handle_scope_depth_ -= 1;
|
| #endif
|
| }
|
|
|
| HandleScope* top_handle_scope() const {
|
| #if defined(DEBUG)
|
| - return state_.top_handle_scope;
|
| + return top_handle_scope_;
|
| #else
|
| return 0;
|
| #endif
|
| @@ -228,13 +235,13 @@ class Thread {
|
|
|
| void set_top_handle_scope(HandleScope* handle_scope) {
|
| #if defined(DEBUG)
|
| - state_.top_handle_scope = handle_scope;
|
| + top_handle_scope_ = handle_scope;
|
| #endif
|
| }
|
|
|
| int32_t no_safepoint_scope_depth() const {
|
| #if defined(DEBUG)
|
| - return state_.no_safepoint_scope_depth;
|
| + return no_safepoint_scope_depth_;
|
| #else
|
| return 0;
|
| #endif
|
| @@ -242,32 +249,18 @@ class Thread {
|
|
|
| void IncrementNoSafepointScopeDepth() {
|
| #if defined(DEBUG)
|
| - ASSERT(state_.no_safepoint_scope_depth < INT_MAX);
|
| - state_.no_safepoint_scope_depth += 1;
|
| + ASSERT(no_safepoint_scope_depth_ < INT_MAX);
|
| + no_safepoint_scope_depth_ += 1;
|
| #endif
|
| }
|
|
|
| void DecrementNoSafepointScopeDepth() {
|
| #if defined(DEBUG)
|
| - ASSERT(state_.no_safepoint_scope_depth > 0);
|
| - state_.no_safepoint_scope_depth -= 1;
|
| + ASSERT(no_safepoint_scope_depth_ > 0);
|
| + no_safepoint_scope_depth_ -= 1;
|
| #endif
|
| }
|
|
|
| - // Collection of isolate-specific state of a thread that is saved/restored
|
| - // on isolate exit/re-entry.
|
| - struct State {
|
| - Zone* zone;
|
| - uword top_exit_frame_info;
|
| - StackResource* top_resource;
|
| - LongJumpScope* long_jump_base;
|
| -#if defined(DEBUG)
|
| - HandleScope* top_handle_scope;
|
| - intptr_t no_handle_scope_depth;
|
| - int32_t no_safepoint_scope_depth;
|
| -#endif
|
| - };
|
| -
|
| #define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
|
| static intptr_t member_name##offset() { \
|
| return OFFSET_OF(Thread, member_name); \
|
| @@ -294,22 +287,6 @@ LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
|
| static bool ObjectAtOffset(intptr_t offset, Object* object);
|
| static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
|
|
|
| - Mutex* timeline_block_lock() {
|
| - return &timeline_block_lock_;
|
| - }
|
| -
|
| - // Only safe to access when holding |timeline_block_lock_|.
|
| - TimelineEventBlock* timeline_block() const {
|
| - return timeline_block_;
|
| - }
|
| -
|
| - // Only safe to access when holding |timeline_block_lock_|.
|
| - void set_timeline_block(TimelineEventBlock* block) {
|
| - timeline_block_ = block;
|
| - }
|
| -
|
| - class Log* log() const;
|
| -
|
| static const intptr_t kNoDeoptId = -1;
|
| static const intptr_t kDeoptIdStep = 2;
|
| static const intptr_t kDeoptIdBeforeOffset = 0;
|
| @@ -339,9 +316,9 @@ LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
|
| return (deopt_id % kDeoptIdStep) == kDeoptIdAfterOffset;
|
| }
|
|
|
| - LongJumpScope* long_jump_base() const { return state_.long_jump_base; }
|
| + LongJumpScope* long_jump_base() const { return long_jump_base_; }
|
| void set_long_jump_base(LongJumpScope* value) {
|
| - state_.long_jump_base = value;
|
| + long_jump_base_ = value;
|
| }
|
|
|
| uword vm_tag() const {
|
| @@ -354,36 +331,6 @@ LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
|
| return OFFSET_OF(Thread, vm_tag_);
|
| }
|
|
|
| - ThreadId id() const {
|
| - ASSERT(id_ != OSThread::kInvalidThreadId);
|
| - return id_;
|
| - }
|
| -
|
| - ThreadId join_id() const {
|
| - ASSERT(join_id_ != OSThread::kInvalidThreadJoinId);
|
| - return join_id_;
|
| - }
|
| -
|
| - ThreadId trace_id() const {
|
| - ASSERT(trace_id_ != OSThread::kInvalidThreadJoinId);
|
| - return trace_id_;
|
| - }
|
| -
|
| - const char* name() const {
|
| - return name_;
|
| - }
|
| -
|
| - void set_name(const char* name) {
|
| - ASSERT(Thread::Current() == this);
|
| - ASSERT(name_ == NULL);
|
| - name_ = name;
|
| - }
|
| -
|
| - // Used to temporarily disable or enable thread interrupts.
|
| - void DisableThreadInterrupts();
|
| - void EnableThreadInterrupts();
|
| - bool ThreadInterruptsEnabled();
|
| -
|
| #if defined(DEBUG)
|
| #define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
|
| void set_reusable_##object##_handle_scope_active(bool value) { \
|
| @@ -416,25 +363,34 @@ LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
|
| RawGrowableObjectArray* pending_functions();
|
|
|
| void VisitObjectPointers(ObjectPointerVisitor* visitor);
|
| -
|
| - static bool IsThreadInList(ThreadId join_id);
|
| + void InitVMConstants();
|
|
|
| private:
|
| template<class T> T* AllocateReusableHandle();
|
|
|
| - static ThreadLocalKey thread_key_;
|
| -
|
| - const ThreadId id_;
|
| - const ThreadId join_id_;
|
| - const ThreadId trace_id_;
|
| - uintptr_t thread_interrupt_disabled_;
|
| + OSThread* os_thread_;
|
| Isolate* isolate_;
|
| Heap* heap_;
|
| - State state_;
|
| - Mutex timeline_block_lock_;
|
| - TimelineEventBlock* timeline_block_;
|
| + Zone* zone_;
|
| + uword top_exit_frame_info_;
|
| + StackResource* top_resource_;
|
| + LongJumpScope* long_jump_base_;
|
| StoreBufferBlock* store_buffer_block_;
|
| - class Log* log_;
|
| + int32_t no_callback_scope_depth_;
|
| +#if defined(DEBUG)
|
| + HandleScope* top_handle_scope_;
|
| + intptr_t no_handle_scope_depth_;
|
| + int32_t no_safepoint_scope_depth_;
|
| +#endif
|
| + VMHandles reusable_handles_;
|
| +
|
| + // Compiler state:
|
| + CHA* cha_;
|
| + intptr_t deopt_id_; // Compilation specific counter.
|
| + uword vm_tag_;
|
| + RawGrowableObjectArray* pending_functions_;
|
| +
|
| + // State that is cached in the TLS for fast access in generated code.
|
| #define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
|
| type_name member_name;
|
| CACHED_CONSTANTS_LIST(DECLARE_MEMBERS)
|
| @@ -463,50 +419,25 @@ LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
|
| #undef REUSABLE_HANDLE_SCOPE_VARIABLE
|
| #endif // defined(DEBUG)
|
|
|
| - VMHandles reusable_handles_;
|
| -
|
| - // Compiler state:
|
| - CHA* cha_;
|
| - intptr_t deopt_id_; // Compilation specific counter.
|
| - uword vm_tag_;
|
| - RawGrowableObjectArray* pending_functions_;
|
| -
|
| - int32_t no_callback_scope_depth_;
|
| -
|
| - // All |Thread|s are registered in the thread list.
|
| - Thread* thread_list_next_;
|
| -
|
| - // A name for this thread.
|
| - const char* name_;
|
| -
|
| - static Thread* thread_list_head_;
|
| - static Mutex* thread_list_lock_;
|
| -
|
| - static void AddThreadToList(Thread* thread);
|
| - static void RemoveThreadFromList(Thread* thread);
|
| + Thread* next_; // Used to chain the thread structures in an isolate.
|
|
|
| - explicit Thread(bool init_vm_constants = true);
|
| -
|
| - void InitVMConstants();
|
| -
|
| - void ClearState();
|
| + explicit Thread(Isolate* isolate);
|
|
|
| void StoreBufferRelease(
|
| StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold);
|
| void StoreBufferAcquire();
|
|
|
| void set_zone(Zone* zone) {
|
| - state_.zone = zone;
|
| + zone_ = zone;
|
| }
|
|
|
| void set_top_exit_frame_info(uword top_exit_frame_info) {
|
| - state_.top_exit_frame_info = top_exit_frame_info;
|
| + top_exit_frame_info_ = top_exit_frame_info;
|
| }
|
|
|
| - static void SetCurrent(Thread* current);
|
| -
|
| - void Schedule(Isolate* isolate, bool bypass_safepoint = false);
|
| - void Unschedule(bool bypass_safepoint = false);
|
| + static void SetCurrent(Thread* current) {
|
| + OSThread::SetCurrentTLS(reinterpret_cast<uword>(current));
|
| + }
|
|
|
| #define REUSABLE_FRIEND_DECLARATION(name) \
|
| friend class Reusable##name##HandleScope;
|
| @@ -517,31 +448,12 @@ REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION)
|
| friend class Isolate;
|
| friend class Simulator;
|
| friend class StackZone;
|
| - friend class ThreadIterator;
|
| - friend class ThreadIteratorTestHelper;
|
| friend class ThreadRegistry;
|
|
|
| DISALLOW_COPY_AND_ASSIGN(Thread);
|
| };
|
|
|
|
|
| -// Note that this takes the thread list lock, prohibiting threads from coming
|
| -// on- or off-line.
|
| -class ThreadIterator : public ValueObject {
|
| - public:
|
| - ThreadIterator();
|
| - ~ThreadIterator();
|
| -
|
| - // Returns false when there are no more threads left.
|
| - bool HasNext() const;
|
| -
|
| - // Returns the current thread and moves forward.
|
| - Thread* Next();
|
| -
|
| - private:
|
| - Thread* next_;
|
| -};
|
| -
|
| #if defined(TARGET_OS_WINDOWS)
|
| // Clears the state of the current thread and frees the allocation.
|
| void WindowsThreadCleanUp();
|
|
|