Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(545)

Side by Side Diff: runtime/vm/thread.h

Issue 1439483003: - Add an OSThread structure which is the generic TLS structure for all C++ (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: code-review Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/stack_frame.cc ('k') | runtime/vm/thread.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #ifndef VM_THREAD_H_ 5 #ifndef VM_THREAD_H_
6 #define VM_THREAD_H_ 6 #define VM_THREAD_H_
7 7
8 #include "vm/globals.h" 8 #include "vm/globals.h"
9 #include "vm/handles.h" 9 #include "vm/handles.h"
10 #include "vm/os_thread.h" 10 #include "vm/os_thread.h"
(...skipping 10 matching lines...) Expand all
21 class Error; 21 class Error;
22 class ExceptionHandlers; 22 class ExceptionHandlers;
23 class Field; 23 class Field;
24 class Function; 24 class Function;
25 class GrowableObjectArray; 25 class GrowableObjectArray;
26 class HandleScope; 26 class HandleScope;
27 class Heap; 27 class Heap;
28 class Instance; 28 class Instance;
29 class Isolate; 29 class Isolate;
30 class Library; 30 class Library;
31 class Log;
32 class LongJumpScope; 31 class LongJumpScope;
33 class Object; 32 class Object;
33 class OSThread;
34 class PcDescriptors; 34 class PcDescriptors;
35 class RawBool; 35 class RawBool;
36 class RawObject; 36 class RawObject;
37 class RawCode; 37 class RawCode;
38 class RawGrowableObjectArray; 38 class RawGrowableObjectArray;
39 class RawString; 39 class RawString;
40 class RuntimeEntry; 40 class RuntimeEntry;
41 class StackResource; 41 class StackResource;
42 class String; 42 class String;
43 class TimelineEventBlock;
44 class TypeArguments; 43 class TypeArguments;
45 class TypeParameter; 44 class TypeParameter;
46 class Zone; 45 class Zone;
47 46
48 #define REUSABLE_HANDLE_LIST(V) \ 47 #define REUSABLE_HANDLE_LIST(V) \
49 V(AbstractType) \ 48 V(AbstractType) \
50 V(Array) \ 49 V(Array) \
51 V(Class) \ 50 V(Class) \
52 V(Code) \ 51 V(Code) \
53 V(Error) \ 52 V(Error) \
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
90 #define CACHED_CONSTANTS_LIST(V) \ 89 #define CACHED_CONSTANTS_LIST(V) \
91 CACHED_VM_OBJECTS_LIST(V) \ 90 CACHED_VM_OBJECTS_LIST(V) \
92 CACHED_ADDRESSES_LIST(V) \ 91 CACHED_ADDRESSES_LIST(V) \
93 92
94 93
95 // A VM thread; may be executing Dart code or performing helper tasks like 94 // A VM thread; may be executing Dart code or performing helper tasks like
96 // garbage collection or compilation. The Thread structure associated with 95 // garbage collection or compilation. The Thread structure associated with
97 // a thread is allocated by EnsureInit before entering an isolate, and destroyed 96 // a thread is allocated by EnsureInit before entering an isolate, and destroyed
98 // automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp 97 // automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
99 // must currently be called manually (issue 23474). 98 // must currently be called manually (issue 23474).
100 class Thread { 99 class Thread : public BaseThread {
101 public: 100 public:
101 ~Thread();
102
102 // The currently executing thread, or NULL if not yet initialized. 103 // The currently executing thread, or NULL if not yet initialized.
103 static Thread* Current() { 104 static Thread* Current() {
104 return reinterpret_cast<Thread*>(OSThread::GetThreadLocal(thread_key_)); 105 BaseThread* thread = OSThread::GetCurrentTLS();
106 if (thread == NULL || thread->is_os_thread()) {
107 return NULL;
108 }
109 return reinterpret_cast<Thread*>(thread);
105 } 110 }
106 111
107 // Initializes the current thread as a VM thread, if not already done.
108 static void EnsureInit();
109
110 // Makes the current thread enter 'isolate'. 112 // Makes the current thread enter 'isolate'.
111 static void EnterIsolate(Isolate* isolate); 113 static void EnterIsolate(Isolate* isolate);
112 // Makes the current thread exit its isolate. 114 // Makes the current thread exit its isolate.
113 static void ExitIsolate(); 115 static void ExitIsolate();
114 116
115 // A VM thread other than the main mutator thread can enter an isolate as a 117 // A VM thread other than the main mutator thread can enter an isolate as a
116 // "helper" to gain limited concurrent access to the isolate. One example is 118 // "helper" to gain limited concurrent access to the isolate. One example is
117 // SweeperTask (which uses the class table, which is copy-on-write). 119 // SweeperTask (which uses the class table, which is copy-on-write).
118 // TODO(koda): Properly synchronize heap access to expand allowed operations. 120 // TODO(koda): Properly synchronize heap access to expand allowed operations.
119 static void EnterIsolateAsHelper(Isolate* isolate, 121 static void EnterIsolateAsHelper(Isolate* isolate,
120 bool bypass_safepoint = false); 122 bool bypass_safepoint = false);
121 static void ExitIsolateAsHelper(bool bypass_safepoint = false); 123 static void ExitIsolateAsHelper(bool bypass_safepoint = false);
122 124
123 // Called when the current thread transitions from mutator to collector. 125 // Called when the current thread transitions from mutator to collector.
124 // Empties the store buffer block into the isolate. 126 // Empties the store buffer block into the isolate.
125 // TODO(koda): Always run GC in separate thread. 127 // TODO(koda): Always run GC in separate thread.
126 static void PrepareForGC(); 128 static void PrepareForGC();
127 129
128 // Called at VM startup. 130 // OSThread corresponding to this thread.
129 static void InitOnceBeforeIsolate(); 131 OSThread* os_thread() const { return os_thread_; }
130 static void InitOnceAfterObjectAndStubCode(); 132 void set_os_thread(OSThread* os_thread) {
131 133 os_thread_ = os_thread;
132 // Called at VM shutdown 134 }
133 static void Shutdown();
134 ~Thread();
135 135
136 // The topmost zone used for allocation in this thread. 136 // The topmost zone used for allocation in this thread.
137 Zone* zone() const { return state_.zone; } 137 Zone* zone() const { return zone_; }
138 138
139 // The isolate that this thread is operating on, or NULL if none. 139 // The isolate that this thread is operating on, or NULL if none.
140 Isolate* isolate() const { return isolate_; } 140 Isolate* isolate() const { return isolate_; }
141 static intptr_t isolate_offset() { 141 static intptr_t isolate_offset() {
142 return OFFSET_OF(Thread, isolate_); 142 return OFFSET_OF(Thread, isolate_);
143 } 143 }
144 bool IsMutatorThread() const; 144 bool IsMutatorThread() const;
145 145
146 // Is |this| executing Dart code? 146 // Is |this| executing Dart code?
147 bool IsExecutingDartCode() const; 147 bool IsExecutingDartCode() const;
148 148
149 // Has |this| exited Dart code? 149 // Has |this| exited Dart code?
150 bool HasExitedDartCode() const; 150 bool HasExitedDartCode() const;
151 151
152 // The (topmost) CHA for the compilation in this thread. 152 // The (topmost) CHA for the compilation in this thread.
153 CHA* cha() const; 153 CHA* cha() const {
154 void set_cha(CHA* value); 154 ASSERT(isolate_ != NULL);
155 return cha_;
156 }
157
158 void set_cha(CHA* value) {
159 ASSERT(isolate_ != NULL);
160 cha_ = value;
161 }
155 162
156 int32_t no_callback_scope_depth() const { 163 int32_t no_callback_scope_depth() const {
157 return no_callback_scope_depth_; 164 return no_callback_scope_depth_;
158 } 165 }
159 166
160 void IncrementNoCallbackScopeDepth() { 167 void IncrementNoCallbackScopeDepth() {
161 ASSERT(no_callback_scope_depth_ < INT_MAX); 168 ASSERT(no_callback_scope_depth_ < INT_MAX);
162 no_callback_scope_depth_ += 1; 169 no_callback_scope_depth_ += 1;
163 } 170 }
164 171
165 void DecrementNoCallbackScopeDepth() { 172 void DecrementNoCallbackScopeDepth() {
166 ASSERT(no_callback_scope_depth_ > 0); 173 ASSERT(no_callback_scope_depth_ > 0);
167 no_callback_scope_depth_ -= 1; 174 no_callback_scope_depth_ -= 1;
168 } 175 }
169 176
170 void StoreBufferAddObject(RawObject* obj); 177 void StoreBufferAddObject(RawObject* obj);
171 void StoreBufferAddObjectGC(RawObject* obj); 178 void StoreBufferAddObjectGC(RawObject* obj);
172 #if defined(TESTING) 179 #if defined(TESTING)
173 bool StoreBufferContains(RawObject* obj) const { 180 bool StoreBufferContains(RawObject* obj) const {
174 return store_buffer_block_->Contains(obj); 181 return store_buffer_block_->Contains(obj);
175 } 182 }
176 #endif 183 #endif
177 void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy); 184 void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
178 static intptr_t store_buffer_block_offset() { 185 static intptr_t store_buffer_block_offset() {
179 return OFFSET_OF(Thread, store_buffer_block_); 186 return OFFSET_OF(Thread, store_buffer_block_);
180 } 187 }
181 188
182 uword top_exit_frame_info() const { return state_.top_exit_frame_info; } 189 uword top_exit_frame_info() const { return top_exit_frame_info_; }
183 static intptr_t top_exit_frame_info_offset() { 190 static intptr_t top_exit_frame_info_offset() {
184 return OFFSET_OF(Thread, state_) + OFFSET_OF(State, top_exit_frame_info); 191 return OFFSET_OF(Thread, top_exit_frame_info_);
185 } 192 }
186 193
187 StackResource* top_resource() const { return state_.top_resource; } 194 StackResource* top_resource() const { return top_resource_; }
188 void set_top_resource(StackResource* value) { 195 void set_top_resource(StackResource* value) {
189 state_.top_resource = value; 196 top_resource_ = value;
190 } 197 }
191 static intptr_t top_resource_offset() { 198 static intptr_t top_resource_offset() {
192 return OFFSET_OF(Thread, state_) + OFFSET_OF(State, top_resource); 199 return OFFSET_OF(Thread, top_resource_);
193 } 200 }
194 201
195 static intptr_t heap_offset() { 202 static intptr_t heap_offset() {
196 return OFFSET_OF(Thread, heap_); 203 return OFFSET_OF(Thread, heap_);
197 } 204 }
198 205
199 int32_t no_handle_scope_depth() const { 206 int32_t no_handle_scope_depth() const {
200 #if defined(DEBUG) 207 #if defined(DEBUG)
201 return state_.no_handle_scope_depth; 208 return no_handle_scope_depth_;
202 #else 209 #else
203 return 0; 210 return 0;
204 #endif 211 #endif
205 } 212 }
206 213
207 void IncrementNoHandleScopeDepth() { 214 void IncrementNoHandleScopeDepth() {
208 #if defined(DEBUG) 215 #if defined(DEBUG)
209 ASSERT(state_.no_handle_scope_depth < INT_MAX); 216 ASSERT(no_handle_scope_depth_ < INT_MAX);
210 state_.no_handle_scope_depth += 1; 217 no_handle_scope_depth_ += 1;
211 #endif 218 #endif
212 } 219 }
213 220
214 void DecrementNoHandleScopeDepth() { 221 void DecrementNoHandleScopeDepth() {
215 #if defined(DEBUG) 222 #if defined(DEBUG)
216 ASSERT(state_.no_handle_scope_depth > 0); 223 ASSERT(no_handle_scope_depth_ > 0);
217 state_.no_handle_scope_depth -= 1; 224 no_handle_scope_depth_ -= 1;
218 #endif 225 #endif
219 } 226 }
220 227
221 HandleScope* top_handle_scope() const { 228 HandleScope* top_handle_scope() const {
222 #if defined(DEBUG) 229 #if defined(DEBUG)
223 return state_.top_handle_scope; 230 return top_handle_scope_;
224 #else 231 #else
225 return 0; 232 return 0;
226 #endif 233 #endif
227 } 234 }
228 235
229 void set_top_handle_scope(HandleScope* handle_scope) { 236 void set_top_handle_scope(HandleScope* handle_scope) {
230 #if defined(DEBUG) 237 #if defined(DEBUG)
231 state_.top_handle_scope = handle_scope; 238 top_handle_scope_ = handle_scope;
232 #endif 239 #endif
233 } 240 }
234 241
235 int32_t no_safepoint_scope_depth() const { 242 int32_t no_safepoint_scope_depth() const {
236 #if defined(DEBUG) 243 #if defined(DEBUG)
237 return state_.no_safepoint_scope_depth; 244 return no_safepoint_scope_depth_;
238 #else 245 #else
239 return 0; 246 return 0;
240 #endif 247 #endif
241 } 248 }
242 249
243 void IncrementNoSafepointScopeDepth() { 250 void IncrementNoSafepointScopeDepth() {
244 #if defined(DEBUG) 251 #if defined(DEBUG)
245 ASSERT(state_.no_safepoint_scope_depth < INT_MAX); 252 ASSERT(no_safepoint_scope_depth_ < INT_MAX);
246 state_.no_safepoint_scope_depth += 1; 253 no_safepoint_scope_depth_ += 1;
247 #endif 254 #endif
248 } 255 }
249 256
250 void DecrementNoSafepointScopeDepth() { 257 void DecrementNoSafepointScopeDepth() {
251 #if defined(DEBUG) 258 #if defined(DEBUG)
252 ASSERT(state_.no_safepoint_scope_depth > 0); 259 ASSERT(no_safepoint_scope_depth_ > 0);
253 state_.no_safepoint_scope_depth -= 1; 260 no_safepoint_scope_depth_ -= 1;
254 #endif 261 #endif
255 } 262 }
256 263
257 // Collection of isolate-specific state of a thread that is saved/restored
258 // on isolate exit/re-entry.
259 struct State {
260 Zone* zone;
261 uword top_exit_frame_info;
262 StackResource* top_resource;
263 LongJumpScope* long_jump_base;
264 #if defined(DEBUG)
265 HandleScope* top_handle_scope;
266 intptr_t no_handle_scope_depth;
267 int32_t no_safepoint_scope_depth;
268 #endif
269 };
270
271 #define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \ 264 #define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
272 static intptr_t member_name##offset() { \ 265 static intptr_t member_name##offset() { \
273 return OFFSET_OF(Thread, member_name); \ 266 return OFFSET_OF(Thread, member_name); \
274 } 267 }
275 CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD) 268 CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD)
276 #undef DEFINE_OFFSET_METHOD 269 #undef DEFINE_OFFSET_METHOD
277 270
278 #define DEFINE_OFFSET_METHOD(name) \ 271 #define DEFINE_OFFSET_METHOD(name) \
279 static intptr_t name##_entry_point_offset() { \ 272 static intptr_t name##_entry_point_offset() { \
280 return OFFSET_OF(Thread, name##_entry_point_); \ 273 return OFFSET_OF(Thread, name##_entry_point_); \
281 } 274 }
282 RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD) 275 RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
283 #undef DEFINE_OFFSET_METHOD 276 #undef DEFINE_OFFSET_METHOD
284 277
285 #define DEFINE_OFFSET_METHOD(returntype, name, ...) \ 278 #define DEFINE_OFFSET_METHOD(returntype, name, ...) \
286 static intptr_t name##_entry_point_offset() { \ 279 static intptr_t name##_entry_point_offset() { \
287 return OFFSET_OF(Thread, name##_entry_point_); \ 280 return OFFSET_OF(Thread, name##_entry_point_); \
288 } 281 }
289 LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD) 282 LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
290 #undef DEFINE_OFFSET_METHOD 283 #undef DEFINE_OFFSET_METHOD
291 284
292 static bool CanLoadFromThread(const Object& object); 285 static bool CanLoadFromThread(const Object& object);
293 static intptr_t OffsetFromThread(const Object& object); 286 static intptr_t OffsetFromThread(const Object& object);
294 static bool ObjectAtOffset(intptr_t offset, Object* object); 287 static bool ObjectAtOffset(intptr_t offset, Object* object);
295 static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry); 288 static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
296 289
297 Mutex* timeline_block_lock() {
298 return &timeline_block_lock_;
299 }
300
301 // Only safe to access when holding |timeline_block_lock_|.
302 TimelineEventBlock* timeline_block() const {
303 return timeline_block_;
304 }
305
306 // Only safe to access when holding |timeline_block_lock_|.
307 void set_timeline_block(TimelineEventBlock* block) {
308 timeline_block_ = block;
309 }
310
311 class Log* log() const;
312
313 static const intptr_t kNoDeoptId = -1; 290 static const intptr_t kNoDeoptId = -1;
314 static const intptr_t kDeoptIdStep = 2; 291 static const intptr_t kDeoptIdStep = 2;
315 static const intptr_t kDeoptIdBeforeOffset = 0; 292 static const intptr_t kDeoptIdBeforeOffset = 0;
316 static const intptr_t kDeoptIdAfterOffset = 1; 293 static const intptr_t kDeoptIdAfterOffset = 1;
317 intptr_t deopt_id() const { return deopt_id_; } 294 intptr_t deopt_id() const { return deopt_id_; }
318 void set_deopt_id(int value) { 295 void set_deopt_id(int value) {
319 ASSERT(value >= 0); 296 ASSERT(value >= 0);
320 deopt_id_ = value; 297 deopt_id_ = value;
321 } 298 }
322 intptr_t GetNextDeoptId() { 299 intptr_t GetNextDeoptId() {
323 ASSERT(deopt_id_ != kNoDeoptId); 300 ASSERT(deopt_id_ != kNoDeoptId);
324 const intptr_t id = deopt_id_; 301 const intptr_t id = deopt_id_;
325 deopt_id_ += kDeoptIdStep; 302 deopt_id_ += kDeoptIdStep;
326 return id; 303 return id;
327 } 304 }
328 305
329 static intptr_t ToDeoptAfter(intptr_t deopt_id) { 306 static intptr_t ToDeoptAfter(intptr_t deopt_id) {
330 ASSERT(IsDeoptBefore(deopt_id)); 307 ASSERT(IsDeoptBefore(deopt_id));
331 return deopt_id + kDeoptIdAfterOffset; 308 return deopt_id + kDeoptIdAfterOffset;
332 } 309 }
333 310
334 static bool IsDeoptBefore(intptr_t deopt_id) { 311 static bool IsDeoptBefore(intptr_t deopt_id) {
335 return (deopt_id % kDeoptIdStep) == kDeoptIdBeforeOffset; 312 return (deopt_id % kDeoptIdStep) == kDeoptIdBeforeOffset;
336 } 313 }
337 314
338 static bool IsDeoptAfter(intptr_t deopt_id) { 315 static bool IsDeoptAfter(intptr_t deopt_id) {
339 return (deopt_id % kDeoptIdStep) == kDeoptIdAfterOffset; 316 return (deopt_id % kDeoptIdStep) == kDeoptIdAfterOffset;
340 } 317 }
341 318
342 LongJumpScope* long_jump_base() const { return state_.long_jump_base; } 319 LongJumpScope* long_jump_base() const { return long_jump_base_; }
343 void set_long_jump_base(LongJumpScope* value) { 320 void set_long_jump_base(LongJumpScope* value) {
344 state_.long_jump_base = value; 321 long_jump_base_ = value;
345 } 322 }
346 323
347 uword vm_tag() const { 324 uword vm_tag() const {
348 return vm_tag_; 325 return vm_tag_;
349 } 326 }
350 void set_vm_tag(uword tag) { 327 void set_vm_tag(uword tag) {
351 vm_tag_ = tag; 328 vm_tag_ = tag;
352 } 329 }
353 static intptr_t vm_tag_offset() { 330 static intptr_t vm_tag_offset() {
354 return OFFSET_OF(Thread, vm_tag_); 331 return OFFSET_OF(Thread, vm_tag_);
355 } 332 }
356 333
357 ThreadId id() const {
358 ASSERT(id_ != OSThread::kInvalidThreadId);
359 return id_;
360 }
361
362 ThreadId join_id() const {
363 ASSERT(join_id_ != OSThread::kInvalidThreadJoinId);
364 return join_id_;
365 }
366
367 ThreadId trace_id() const {
368 ASSERT(trace_id_ != OSThread::kInvalidThreadJoinId);
369 return trace_id_;
370 }
371
372 const char* name() const {
373 return name_;
374 }
375
376 void set_name(const char* name) {
377 ASSERT(Thread::Current() == this);
378 ASSERT(name_ == NULL);
379 name_ = name;
380 }
381
382 // Used to temporarily disable or enable thread interrupts.
383 void DisableThreadInterrupts();
384 void EnableThreadInterrupts();
385 bool ThreadInterruptsEnabled();
386
387 #if defined(DEBUG) 334 #if defined(DEBUG)
388 #define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \ 335 #define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
389 void set_reusable_##object##_handle_scope_active(bool value) { \ 336 void set_reusable_##object##_handle_scope_active(bool value) { \
390 reusable_##object##_handle_scope_active_ = value; \ 337 reusable_##object##_handle_scope_active_ = value; \
391 } \ 338 } \
392 bool reusable_##object##_handle_scope_active() const { \ 339 bool reusable_##object##_handle_scope_active() const { \
393 return reusable_##object##_handle_scope_active_; \ 340 return reusable_##object##_handle_scope_active_; \
394 } 341 }
395 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS) 342 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS)
396 #undef REUSABLE_HANDLE_SCOPE_ACCESSORS 343 #undef REUSABLE_HANDLE_SCOPE_ACCESSORS
(...skipping 12 matching lines...) Expand all
409 #define REUSABLE_HANDLE(object) \ 356 #define REUSABLE_HANDLE(object) \
410 object& object##Handle() const { \ 357 object& object##Handle() const { \
411 return *object##_handle_; \ 358 return *object##_handle_; \
412 } 359 }
413 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE) 360 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE)
414 #undef REUSABLE_HANDLE 361 #undef REUSABLE_HANDLE
415 362
416 RawGrowableObjectArray* pending_functions(); 363 RawGrowableObjectArray* pending_functions();
417 364
418 void VisitObjectPointers(ObjectPointerVisitor* visitor); 365 void VisitObjectPointers(ObjectPointerVisitor* visitor);
419 366 void InitVMConstants();
420 static bool IsThreadInList(ThreadId join_id);
421 367
422 private: 368 private:
423 template<class T> T* AllocateReusableHandle(); 369 template<class T> T* AllocateReusableHandle();
424 370
425 static ThreadLocalKey thread_key_; 371 OSThread* os_thread_;
426
427 const ThreadId id_;
428 const ThreadId join_id_;
429 const ThreadId trace_id_;
430 uintptr_t thread_interrupt_disabled_;
431 Isolate* isolate_; 372 Isolate* isolate_;
432 Heap* heap_; 373 Heap* heap_;
433 State state_; 374 Zone* zone_;
434 Mutex timeline_block_lock_; 375 uword top_exit_frame_info_;
435 TimelineEventBlock* timeline_block_; 376 StackResource* top_resource_;
377 LongJumpScope* long_jump_base_;
436 StoreBufferBlock* store_buffer_block_; 378 StoreBufferBlock* store_buffer_block_;
437 class Log* log_; 379 int32_t no_callback_scope_depth_;
380 #if defined(DEBUG)
381 HandleScope* top_handle_scope_;
382 intptr_t no_handle_scope_depth_;
383 int32_t no_safepoint_scope_depth_;
384 #endif
385 VMHandles reusable_handles_;
386
387 // Compiler state:
388 CHA* cha_;
389 intptr_t deopt_id_; // Compilation specific counter.
390 uword vm_tag_;
391 RawGrowableObjectArray* pending_functions_;
392
393 // State that is cached in the TLS for fast access in generated code.
438 #define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \ 394 #define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
439 type_name member_name; 395 type_name member_name;
440 CACHED_CONSTANTS_LIST(DECLARE_MEMBERS) 396 CACHED_CONSTANTS_LIST(DECLARE_MEMBERS)
441 #undef DECLARE_MEMBERS 397 #undef DECLARE_MEMBERS
442 398
443 #define DECLARE_MEMBERS(name) \ 399 #define DECLARE_MEMBERS(name) \
444 uword name##_entry_point_; 400 uword name##_entry_point_;
445 RUNTIME_ENTRY_LIST(DECLARE_MEMBERS) 401 RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
446 #undef DECLARE_MEMBERS 402 #undef DECLARE_MEMBERS
447 403
448 #define DECLARE_MEMBERS(returntype, name, ...) \ 404 #define DECLARE_MEMBERS(returntype, name, ...) \
449 uword name##_entry_point_; 405 uword name##_entry_point_;
450 LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS) 406 LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
451 #undef DECLARE_MEMBERS 407 #undef DECLARE_MEMBERS
452 408
453 // Reusable handles support. 409 // Reusable handles support.
454 #define REUSABLE_HANDLE_FIELDS(object) \ 410 #define REUSABLE_HANDLE_FIELDS(object) \
455 object* object##_handle_; 411 object* object##_handle_;
456 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_FIELDS) 412 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_FIELDS)
457 #undef REUSABLE_HANDLE_FIELDS 413 #undef REUSABLE_HANDLE_FIELDS
458 414
459 #if defined(DEBUG) 415 #if defined(DEBUG)
460 #define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \ 416 #define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
461 bool reusable_##object##_handle_scope_active_; 417 bool reusable_##object##_handle_scope_active_;
462 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE); 418 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE);
463 #undef REUSABLE_HANDLE_SCOPE_VARIABLE 419 #undef REUSABLE_HANDLE_SCOPE_VARIABLE
464 #endif // defined(DEBUG) 420 #endif // defined(DEBUG)
465 421
466 VMHandles reusable_handles_; 422 Thread* next_; // Used to chain the thread structures in an isolate.
467 423
468 // Compiler state: 424 explicit Thread(Isolate* isolate);
469 CHA* cha_;
470 intptr_t deopt_id_; // Compilation specific counter.
471 uword vm_tag_;
472 RawGrowableObjectArray* pending_functions_;
473
474 int32_t no_callback_scope_depth_;
475
476 // All |Thread|s are registered in the thread list.
477 Thread* thread_list_next_;
478
479 // A name for this thread.
480 const char* name_;
481
482 static Thread* thread_list_head_;
483 static Mutex* thread_list_lock_;
484
485 static void AddThreadToList(Thread* thread);
486 static void RemoveThreadFromList(Thread* thread);
487
488 explicit Thread(bool init_vm_constants = true);
489
490 void InitVMConstants();
491
492 void ClearState();
493 425
494 void StoreBufferRelease( 426 void StoreBufferRelease(
495 StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold); 427 StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold);
496 void StoreBufferAcquire(); 428 void StoreBufferAcquire();
497 429
498 void set_zone(Zone* zone) { 430 void set_zone(Zone* zone) {
499 state_.zone = zone; 431 zone_ = zone;
500 } 432 }
501 433
502 void set_top_exit_frame_info(uword top_exit_frame_info) { 434 void set_top_exit_frame_info(uword top_exit_frame_info) {
503 state_.top_exit_frame_info = top_exit_frame_info; 435 top_exit_frame_info_ = top_exit_frame_info;
504 } 436 }
505 437
506 static void SetCurrent(Thread* current); 438 static void SetCurrent(Thread* current) {
507 439 OSThread::SetCurrentTLS(reinterpret_cast<uword>(current));
508 void Schedule(Isolate* isolate, bool bypass_safepoint = false); 440 }
509 void Unschedule(bool bypass_safepoint = false);
510 441
511 #define REUSABLE_FRIEND_DECLARATION(name) \ 442 #define REUSABLE_FRIEND_DECLARATION(name) \
512 friend class Reusable##name##HandleScope; 443 friend class Reusable##name##HandleScope;
513 REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION) 444 REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION)
514 #undef REUSABLE_FRIEND_DECLARATION 445 #undef REUSABLE_FRIEND_DECLARATION
515 446
516 friend class ApiZone; 447 friend class ApiZone;
517 friend class Isolate; 448 friend class Isolate;
518 friend class Simulator; 449 friend class Simulator;
519 friend class StackZone; 450 friend class StackZone;
520 friend class ThreadIterator;
521 friend class ThreadIteratorTestHelper;
522 friend class ThreadRegistry; 451 friend class ThreadRegistry;
523 452
524 DISALLOW_COPY_AND_ASSIGN(Thread); 453 DISALLOW_COPY_AND_ASSIGN(Thread);
525 }; 454 };
526 455
527 456
528 // Note that this takes the thread list lock, prohibiting threads from coming
529 // on- or off-line.
530 class ThreadIterator : public ValueObject {
531 public:
532 ThreadIterator();
533 ~ThreadIterator();
534
535 // Returns false when there are no more threads left.
536 bool HasNext() const;
537
538 // Returns the current thread and moves forward.
539 Thread* Next();
540
541 private:
542 Thread* next_;
543 };
544
545 #if defined(TARGET_OS_WINDOWS) 457 #if defined(TARGET_OS_WINDOWS)
546 // Clears the state of the current thread and frees the allocation. 458 // Clears the state of the current thread and frees the allocation.
547 void WindowsThreadCleanUp(); 459 void WindowsThreadCleanUp();
548 #endif 460 #endif
549 461
550 462
551 // Disable thread interrupts. 463 // Disable thread interrupts.
552 class DisableThreadInterruptsScope : public StackResource { 464 class DisableThreadInterruptsScope : public StackResource {
553 public: 465 public:
554 explicit DisableThreadInterruptsScope(Thread* thread); 466 explicit DisableThreadInterruptsScope(Thread* thread);
555 ~DisableThreadInterruptsScope(); 467 ~DisableThreadInterruptsScope();
556 }; 468 };
557 469
558 } // namespace dart 470 } // namespace dart
559 471
560 #endif // VM_THREAD_H_ 472 #endif // VM_THREAD_H_
OLDNEW
« no previous file with comments | « runtime/vm/stack_frame.cc ('k') | runtime/vm/thread.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698