| OLD | NEW |
| 1 // Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/thread.h" | 5 #include "vm/thread.h" |
| 6 | 6 |
| 7 #include "vm/compiler_stats.h" | 7 #include "vm/compiler_stats.h" |
| 8 #include "vm/dart_api_state.h" | 8 #include "vm/dart_api_state.h" |
| 9 #include "vm/growable_array.h" | 9 #include "vm/growable_array.h" |
| 10 #include "vm/isolate.h" | 10 #include "vm/isolate.h" |
| 11 #include "vm/json_stream.h" | 11 #include "vm/json_stream.h" |
| 12 #include "vm/lockers.h" | 12 #include "vm/lockers.h" |
| 13 #include "vm/log.h" | 13 #include "vm/log.h" |
| 14 #include "vm/message_handler.h" | 14 #include "vm/message_handler.h" |
| 15 #include "vm/native_entry.h" | 15 #include "vm/native_entry.h" |
| 16 #include "vm/object.h" | 16 #include "vm/object.h" |
| 17 #include "vm/os_thread.h" | 17 #include "vm/os_thread.h" |
| 18 #include "vm/profiler.h" | 18 #include "vm/profiler.h" |
| 19 #include "vm/runtime_entry.h" | 19 #include "vm/runtime_entry.h" |
| 20 #include "vm/stub_code.h" | 20 #include "vm/stub_code.h" |
| 21 #include "vm/symbols.h" | 21 #include "vm/symbols.h" |
| 22 #include "vm/thread_interrupter.h" | 22 #include "vm/thread_interrupter.h" |
| 23 #include "vm/thread_registry.h" | 23 #include "vm/thread_registry.h" |
| 24 #include "vm/timeline.h" | 24 #include "vm/timeline.h" |
| 25 #include "vm/zone.h" | 25 #include "vm/zone.h" |
| 26 | 26 |
| 27 namespace dart { | 27 namespace dart { |
| 28 | 28 |
| 29 | |
| 30 DECLARE_FLAG(bool, trace_service); | 29 DECLARE_FLAG(bool, trace_service); |
| 31 DECLARE_FLAG(bool, trace_service_verbose); | 30 DECLARE_FLAG(bool, trace_service_verbose); |
| 32 | 31 |
| 33 | |
| 34 Thread::~Thread() { | 32 Thread::~Thread() { |
| 35 // We should cleanly exit any isolate before destruction. | 33 // We should cleanly exit any isolate before destruction. |
| 36 ASSERT(isolate_ == NULL); | 34 ASSERT(isolate_ == NULL); |
| 37 if (compiler_stats_ != NULL) { | 35 if (compiler_stats_ != NULL) { |
| 38 delete compiler_stats_; | 36 delete compiler_stats_; |
| 39 compiler_stats_ = NULL; | 37 compiler_stats_ = NULL; |
| 40 } | 38 } |
| 41 // There should be no top api scopes at this point. | 39 // There should be no top api scopes at this point. |
| 42 ASSERT(api_top_scope() == NULL); | 40 ASSERT(api_top_scope() == NULL); |
| 43 // Delete the resusable api scope if there is one. | 41 // Delete the resusable api scope if there is one. |
| 44 if (api_reusable_scope_) { | 42 if (api_reusable_scope_) { |
| 45 delete api_reusable_scope_; | 43 delete api_reusable_scope_; |
| 46 api_reusable_scope_ = NULL; | 44 api_reusable_scope_ = NULL; |
| 47 } | 45 } |
| 48 delete thread_lock_; | 46 delete thread_lock_; |
| 49 thread_lock_ = NULL; | 47 thread_lock_ = NULL; |
| 50 } | 48 } |
| 51 | 49 |
| 52 #if defined(DEBUG) | 50 #if defined(DEBUG) |
| 53 #define REUSABLE_HANDLE_SCOPE_INIT(object) \ | 51 #define REUSABLE_HANDLE_SCOPE_INIT(object) \ |
| 54 reusable_##object##_handle_scope_active_(false), | 52 reusable_##object##_handle_scope_active_(false), |
| 55 #else | 53 #else |
| 56 #define REUSABLE_HANDLE_SCOPE_INIT(object) | 54 #define REUSABLE_HANDLE_SCOPE_INIT(object) |
| 57 #endif // defined(DEBUG) | 55 #endif // defined(DEBUG) |
| 58 | 56 |
| 59 #define REUSABLE_HANDLE_INITIALIZERS(object) object##_handle_(NULL), | 57 #define REUSABLE_HANDLE_INITIALIZERS(object) object##_handle_(NULL), |
| 60 | 58 |
| 61 | |
| 62 Thread::Thread(Isolate* isolate) | 59 Thread::Thread(Isolate* isolate) |
| 63 : BaseThread(false), | 60 : BaseThread(false), |
| 64 stack_limit_(0), | 61 stack_limit_(0), |
| 65 stack_overflow_flags_(0), | 62 stack_overflow_flags_(0), |
| 66 isolate_(NULL), | 63 isolate_(NULL), |
| 67 heap_(NULL), | 64 heap_(NULL), |
| 68 top_exit_frame_info_(0), | 65 top_exit_frame_info_(0), |
| 69 store_buffer_block_(NULL), | 66 store_buffer_block_(NULL), |
| 70 vm_tag_(0), | 67 vm_tag_(0), |
| 71 task_kind_(kUnknownTask), | 68 task_kind_(kUnknownTask), |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 141 Zone* current = zone_; | 138 Zone* current = zone_; |
| 142 uintptr_t total_zone_capacity = 0; | 139 uintptr_t total_zone_capacity = 0; |
| 143 while (current != NULL) { | 140 while (current != NULL) { |
| 144 total_zone_capacity += current->CapacityInBytes(); | 141 total_zone_capacity += current->CapacityInBytes(); |
| 145 current = current->previous(); | 142 current = current->previous(); |
| 146 } | 143 } |
| 147 ASSERT(current_zone_capacity_ == total_zone_capacity); | 144 ASSERT(current_zone_capacity_ == total_zone_capacity); |
| 148 } | 145 } |
| 149 } | 146 } |
| 150 | 147 |
| 151 | |
| 152 static const struct ALIGN16 { | 148 static const struct ALIGN16 { |
| 153 uint64_t a; | 149 uint64_t a; |
| 154 uint64_t b; | 150 uint64_t b; |
| 155 } double_negate_constant = {0x8000000000000000LL, 0x8000000000000000LL}; | 151 } double_negate_constant = {0x8000000000000000LL, 0x8000000000000000LL}; |
| 156 | 152 |
| 157 static const struct ALIGN16 { | 153 static const struct ALIGN16 { |
| 158 uint64_t a; | 154 uint64_t a; |
| 159 uint64_t b; | 155 uint64_t b; |
| 160 } double_abs_constant = {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL}; | 156 } double_abs_constant = {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL}; |
| 161 | 157 |
| (...skipping 18 matching lines...) Expand all Loading... |
| 180 uint32_t d; | 176 uint32_t d; |
| 181 } float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF}; | 177 } float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF}; |
| 182 | 178 |
| 183 static const struct ALIGN16 { | 179 static const struct ALIGN16 { |
| 184 uint32_t a; | 180 uint32_t a; |
| 185 uint32_t b; | 181 uint32_t b; |
| 186 uint32_t c; | 182 uint32_t c; |
| 187 uint32_t d; | 183 uint32_t d; |
| 188 } float_zerow_constant = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000}; | 184 } float_zerow_constant = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000}; |
| 189 | 185 |
| 190 | |
| 191 void Thread::InitVMConstants() { | 186 void Thread::InitVMConstants() { |
| 192 #define ASSERT_VM_HEAP(type_name, member_name, init_expr, default_init_value) \ | 187 #define ASSERT_VM_HEAP(type_name, member_name, init_expr, default_init_value) \ |
| 193 ASSERT((init_expr)->IsOldObject()); | 188 ASSERT((init_expr)->IsOldObject()); |
| 194 CACHED_VM_OBJECTS_LIST(ASSERT_VM_HEAP) | 189 CACHED_VM_OBJECTS_LIST(ASSERT_VM_HEAP) |
| 195 #undef ASSERT_VM_HEAP | 190 #undef ASSERT_VM_HEAP |
| 196 | 191 |
| 197 #define INIT_VALUE(type_name, member_name, init_expr, default_init_value) \ | 192 #define INIT_VALUE(type_name, member_name, init_expr, default_init_value) \ |
| 198 ASSERT(member_name == default_init_value); \ | 193 ASSERT(member_name == default_init_value); \ |
| 199 member_name = (init_expr); | 194 member_name = (init_expr); |
| 200 CACHED_CONSTANTS_LIST(INIT_VALUE) | 195 CACHED_CONSTANTS_LIST(INIT_VALUE) |
| (...skipping 11 matching lines...) Expand all Loading... |
| 212 LEAF_RUNTIME_ENTRY_LIST(INIT_VALUE) | 207 LEAF_RUNTIME_ENTRY_LIST(INIT_VALUE) |
| 213 #undef INIT_VALUE | 208 #undef INIT_VALUE |
| 214 | 209 |
| 215 // Setup the thread specific reusable handles. | 210 // Setup the thread specific reusable handles. |
| 216 #define REUSABLE_HANDLE_ALLOCATION(object) \ | 211 #define REUSABLE_HANDLE_ALLOCATION(object) \ |
| 217 this->object##_handle_ = this->AllocateReusableHandle<object>(); | 212 this->object##_handle_ = this->AllocateReusableHandle<object>(); |
| 218 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_ALLOCATION) | 213 REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_ALLOCATION) |
| 219 #undef REUSABLE_HANDLE_ALLOCATION | 214 #undef REUSABLE_HANDLE_ALLOCATION |
| 220 } | 215 } |
| 221 | 216 |
| 222 | |
| 223 #ifndef PRODUCT | 217 #ifndef PRODUCT |
| 224 // Collect information about each individual zone associated with this thread. | 218 // Collect information about each individual zone associated with this thread. |
| 225 void Thread::PrintJSON(JSONStream* stream) const { | 219 void Thread::PrintJSON(JSONStream* stream) const { |
| 226 JSONObject jsobj(stream); | 220 JSONObject jsobj(stream); |
| 227 jsobj.AddProperty("type", "_Thread"); | 221 jsobj.AddProperty("type", "_Thread"); |
| 228 jsobj.AddPropertyF("id", "threads/%" Pd "", | 222 jsobj.AddPropertyF("id", "threads/%" Pd "", |
| 229 OSThread::ThreadIdToIntPtr(os_thread()->trace_id())); | 223 OSThread::ThreadIdToIntPtr(os_thread()->trace_id())); |
| 230 jsobj.AddProperty("kind", TaskKindToCString(task_kind())); | 224 jsobj.AddProperty("kind", TaskKindToCString(task_kind())); |
| 231 jsobj.AddPropertyF("_zoneHighWatermark", "%" Pu "", zone_high_watermark_); | 225 jsobj.AddPropertyF("_zoneHighWatermark", "%" Pu "", zone_high_watermark_); |
| 232 jsobj.AddPropertyF("_zoneCapacity", "%" Pu "", current_zone_capacity_); | 226 jsobj.AddPropertyF("_zoneCapacity", "%" Pu "", current_zone_capacity_); |
| 233 } | 227 } |
| 234 #endif | 228 #endif |
| 235 | 229 |
| 236 | |
| 237 RawGrowableObjectArray* Thread::pending_functions() { | 230 RawGrowableObjectArray* Thread::pending_functions() { |
| 238 if (pending_functions_ == GrowableObjectArray::null()) { | 231 if (pending_functions_ == GrowableObjectArray::null()) { |
| 239 pending_functions_ = GrowableObjectArray::New(Heap::kOld); | 232 pending_functions_ = GrowableObjectArray::New(Heap::kOld); |
| 240 } | 233 } |
| 241 return pending_functions_; | 234 return pending_functions_; |
| 242 } | 235 } |
| 243 | 236 |
| 244 | |
| 245 void Thread::clear_pending_functions() { | 237 void Thread::clear_pending_functions() { |
| 246 pending_functions_ = GrowableObjectArray::null(); | 238 pending_functions_ = GrowableObjectArray::null(); |
| 247 } | 239 } |
| 248 | 240 |
| 249 | |
| 250 void Thread::set_active_exception(const Object& value) { | 241 void Thread::set_active_exception(const Object& value) { |
| 251 ASSERT(!value.IsNull()); | 242 ASSERT(!value.IsNull()); |
| 252 active_exception_ = value.raw(); | 243 active_exception_ = value.raw(); |
| 253 } | 244 } |
| 254 | 245 |
| 255 | |
| 256 void Thread::set_active_stacktrace(const Object& value) { | 246 void Thread::set_active_stacktrace(const Object& value) { |
| 257 active_stacktrace_ = value.raw(); | 247 active_stacktrace_ = value.raw(); |
| 258 } | 248 } |
| 259 | 249 |
| 260 | |
| 261 RawError* Thread::sticky_error() const { | 250 RawError* Thread::sticky_error() const { |
| 262 return sticky_error_; | 251 return sticky_error_; |
| 263 } | 252 } |
| 264 | 253 |
| 265 | |
| 266 void Thread::set_sticky_error(const Error& value) { | 254 void Thread::set_sticky_error(const Error& value) { |
| 267 ASSERT(!value.IsNull()); | 255 ASSERT(!value.IsNull()); |
| 268 sticky_error_ = value.raw(); | 256 sticky_error_ = value.raw(); |
| 269 } | 257 } |
| 270 | 258 |
| 271 | |
| 272 void Thread::clear_sticky_error() { | 259 void Thread::clear_sticky_error() { |
| 273 sticky_error_ = Error::null(); | 260 sticky_error_ = Error::null(); |
| 274 } | 261 } |
| 275 | 262 |
| 276 | |
| 277 RawError* Thread::get_and_clear_sticky_error() { | 263 RawError* Thread::get_and_clear_sticky_error() { |
| 278 NoSafepointScope nss; | 264 NoSafepointScope nss; |
| 279 RawError* return_value = sticky_error_; | 265 RawError* return_value = sticky_error_; |
| 280 sticky_error_ = Error::null(); | 266 sticky_error_ = Error::null(); |
| 281 return return_value; | 267 return return_value; |
| 282 } | 268 } |
| 283 | 269 |
| 284 | |
| 285 const char* Thread::TaskKindToCString(TaskKind kind) { | 270 const char* Thread::TaskKindToCString(TaskKind kind) { |
| 286 switch (kind) { | 271 switch (kind) { |
| 287 case kUnknownTask: | 272 case kUnknownTask: |
| 288 return "kUnknownTask"; | 273 return "kUnknownTask"; |
| 289 case kMutatorTask: | 274 case kMutatorTask: |
| 290 return "kMutatorTask"; | 275 return "kMutatorTask"; |
| 291 case kCompilerTask: | 276 case kCompilerTask: |
| 292 return "kCompilerTask"; | 277 return "kCompilerTask"; |
| 293 case kSweeperTask: | 278 case kSweeperTask: |
| 294 return "kSweeperTask"; | 279 return "kSweeperTask"; |
| 295 case kMarkerTask: | 280 case kMarkerTask: |
| 296 return "kMarkerTask"; | 281 return "kMarkerTask"; |
| 297 default: | 282 default: |
| 298 UNREACHABLE(); | 283 UNREACHABLE(); |
| 299 return ""; | 284 return ""; |
| 300 } | 285 } |
| 301 } | 286 } |
| 302 | 287 |
| 303 | |
| 304 RawStackTrace* Thread::async_stack_trace() const { | 288 RawStackTrace* Thread::async_stack_trace() const { |
| 305 return async_stack_trace_; | 289 return async_stack_trace_; |
| 306 } | 290 } |
| 307 | 291 |
| 308 | |
| 309 void Thread::set_async_stack_trace(const StackTrace& stack_trace) { | 292 void Thread::set_async_stack_trace(const StackTrace& stack_trace) { |
| 310 ASSERT(!stack_trace.IsNull()); | 293 ASSERT(!stack_trace.IsNull()); |
| 311 async_stack_trace_ = stack_trace.raw(); | 294 async_stack_trace_ = stack_trace.raw(); |
| 312 } | 295 } |
| 313 | 296 |
| 314 | |
| 315 void Thread::set_raw_async_stack_trace(RawStackTrace* raw_stack_trace) { | 297 void Thread::set_raw_async_stack_trace(RawStackTrace* raw_stack_trace) { |
| 316 async_stack_trace_ = raw_stack_trace; | 298 async_stack_trace_ = raw_stack_trace; |
| 317 } | 299 } |
| 318 | 300 |
| 319 | |
| 320 void Thread::clear_async_stack_trace() { | 301 void Thread::clear_async_stack_trace() { |
| 321 async_stack_trace_ = StackTrace::null(); | 302 async_stack_trace_ = StackTrace::null(); |
| 322 } | 303 } |
| 323 | 304 |
| 324 | |
| 325 bool Thread::EnterIsolate(Isolate* isolate) { | 305 bool Thread::EnterIsolate(Isolate* isolate) { |
| 326 const bool kIsMutatorThread = true; | 306 const bool kIsMutatorThread = true; |
| 327 Thread* thread = isolate->ScheduleThread(kIsMutatorThread); | 307 Thread* thread = isolate->ScheduleThread(kIsMutatorThread); |
| 328 if (thread != NULL) { | 308 if (thread != NULL) { |
| 329 ASSERT(thread->store_buffer_block_ == NULL); | 309 ASSERT(thread->store_buffer_block_ == NULL); |
| 330 thread->task_kind_ = kMutatorTask; | 310 thread->task_kind_ = kMutatorTask; |
| 331 thread->StoreBufferAcquire(); | 311 thread->StoreBufferAcquire(); |
| 332 return true; | 312 return true; |
| 333 } | 313 } |
| 334 return false; | 314 return false; |
| 335 } | 315 } |
| 336 | 316 |
| 337 | |
| 338 void Thread::ExitIsolate() { | 317 void Thread::ExitIsolate() { |
| 339 Thread* thread = Thread::Current(); | 318 Thread* thread = Thread::Current(); |
| 340 ASSERT(thread != NULL && thread->IsMutatorThread()); | 319 ASSERT(thread != NULL && thread->IsMutatorThread()); |
| 341 DEBUG_ASSERT(!thread->IsAnyReusableHandleScopeActive()); | 320 DEBUG_ASSERT(!thread->IsAnyReusableHandleScopeActive()); |
| 342 thread->task_kind_ = kUnknownTask; | 321 thread->task_kind_ = kUnknownTask; |
| 343 Isolate* isolate = thread->isolate(); | 322 Isolate* isolate = thread->isolate(); |
| 344 ASSERT(isolate != NULL); | 323 ASSERT(isolate != NULL); |
| 345 ASSERT(thread->execution_state() == Thread::kThreadInVM); | 324 ASSERT(thread->execution_state() == Thread::kThreadInVM); |
| 346 // Clear since GC will not visit the thread once it is unscheduled. | 325 // Clear since GC will not visit the thread once it is unscheduled. |
| 347 thread->ClearReusableHandles(); | 326 thread->ClearReusableHandles(); |
| 348 thread->StoreBufferRelease(); | 327 thread->StoreBufferRelease(); |
| 349 if (isolate->is_runnable()) { | 328 if (isolate->is_runnable()) { |
| 350 thread->set_vm_tag(VMTag::kIdleTagId); | 329 thread->set_vm_tag(VMTag::kIdleTagId); |
| 351 } else { | 330 } else { |
| 352 thread->set_vm_tag(VMTag::kLoadWaitTagId); | 331 thread->set_vm_tag(VMTag::kLoadWaitTagId); |
| 353 } | 332 } |
| 354 const bool kIsMutatorThread = true; | 333 const bool kIsMutatorThread = true; |
| 355 isolate->UnscheduleThread(thread, kIsMutatorThread); | 334 isolate->UnscheduleThread(thread, kIsMutatorThread); |
| 356 } | 335 } |
| 357 | 336 |
| 358 | |
| 359 bool Thread::EnterIsolateAsHelper(Isolate* isolate, | 337 bool Thread::EnterIsolateAsHelper(Isolate* isolate, |
| 360 TaskKind kind, | 338 TaskKind kind, |
| 361 bool bypass_safepoint) { | 339 bool bypass_safepoint) { |
| 362 ASSERT(kind != kMutatorTask); | 340 ASSERT(kind != kMutatorTask); |
| 363 const bool kIsNotMutatorThread = false; | 341 const bool kIsNotMutatorThread = false; |
| 364 Thread* thread = | 342 Thread* thread = |
| 365 isolate->ScheduleThread(kIsNotMutatorThread, bypass_safepoint); | 343 isolate->ScheduleThread(kIsNotMutatorThread, bypass_safepoint); |
| 366 if (thread != NULL) { | 344 if (thread != NULL) { |
| 367 ASSERT(thread->store_buffer_block_ == NULL); | 345 ASSERT(thread->store_buffer_block_ == NULL); |
| 368 // TODO(koda): Use StoreBufferAcquire once we properly flush | 346 // TODO(koda): Use StoreBufferAcquire once we properly flush |
| 369 // before Scavenge. | 347 // before Scavenge. |
| 370 thread->store_buffer_block_ = | 348 thread->store_buffer_block_ = |
| 371 thread->isolate()->store_buffer()->PopEmptyBlock(); | 349 thread->isolate()->store_buffer()->PopEmptyBlock(); |
| 372 // This thread should not be the main mutator. | 350 // This thread should not be the main mutator. |
| 373 thread->task_kind_ = kind; | 351 thread->task_kind_ = kind; |
| 374 ASSERT(!thread->IsMutatorThread()); | 352 ASSERT(!thread->IsMutatorThread()); |
| 375 return true; | 353 return true; |
| 376 } | 354 } |
| 377 return false; | 355 return false; |
| 378 } | 356 } |
| 379 | 357 |
| 380 | |
| 381 void Thread::ExitIsolateAsHelper(bool bypass_safepoint) { | 358 void Thread::ExitIsolateAsHelper(bool bypass_safepoint) { |
| 382 Thread* thread = Thread::Current(); | 359 Thread* thread = Thread::Current(); |
| 383 ASSERT(thread != NULL); | 360 ASSERT(thread != NULL); |
| 384 ASSERT(!thread->IsMutatorThread()); | 361 ASSERT(!thread->IsMutatorThread()); |
| 385 ASSERT(thread->execution_state() == Thread::kThreadInVM); | 362 ASSERT(thread->execution_state() == Thread::kThreadInVM); |
| 386 thread->task_kind_ = kUnknownTask; | 363 thread->task_kind_ = kUnknownTask; |
| 387 // Clear since GC will not visit the thread once it is unscheduled. | 364 // Clear since GC will not visit the thread once it is unscheduled. |
| 388 thread->ClearReusableHandles(); | 365 thread->ClearReusableHandles(); |
| 389 thread->StoreBufferRelease(); | 366 thread->StoreBufferRelease(); |
| 390 Isolate* isolate = thread->isolate(); | 367 Isolate* isolate = thread->isolate(); |
| 391 ASSERT(isolate != NULL); | 368 ASSERT(isolate != NULL); |
| 392 const bool kIsNotMutatorThread = false; | 369 const bool kIsNotMutatorThread = false; |
| 393 isolate->UnscheduleThread(thread, kIsNotMutatorThread, bypass_safepoint); | 370 isolate->UnscheduleThread(thread, kIsNotMutatorThread, bypass_safepoint); |
| 394 } | 371 } |
| 395 | 372 |
| 396 | |
| 397 void Thread::PrepareForGC() { | 373 void Thread::PrepareForGC() { |
| 398 ASSERT(IsAtSafepoint()); | 374 ASSERT(IsAtSafepoint()); |
| 399 // Prevent scheduling another GC by ignoring the threshold. | 375 // Prevent scheduling another GC by ignoring the threshold. |
| 400 ASSERT(store_buffer_block_ != NULL); | 376 ASSERT(store_buffer_block_ != NULL); |
| 401 StoreBufferRelease(StoreBuffer::kIgnoreThreshold); | 377 StoreBufferRelease(StoreBuffer::kIgnoreThreshold); |
| 402 // Make sure to get an *empty* block; the isolate needs all entries | 378 // Make sure to get an *empty* block; the isolate needs all entries |
| 403 // at GC time. | 379 // at GC time. |
| 404 // TODO(koda): Replace with an epilogue (PrepareAfterGC) that acquires. | 380 // TODO(koda): Replace with an epilogue (PrepareAfterGC) that acquires. |
| 405 store_buffer_block_ = isolate()->store_buffer()->PopEmptyBlock(); | 381 store_buffer_block_ = isolate()->store_buffer()->PopEmptyBlock(); |
| 406 } | 382 } |
| 407 | 383 |
| 408 | |
| 409 void Thread::SetStackLimitFromStackBase(uword stack_base) { | 384 void Thread::SetStackLimitFromStackBase(uword stack_base) { |
| 410 // Set stack limit. | 385 // Set stack limit. |
| 411 #if !defined(TARGET_ARCH_DBC) | 386 #if !defined(TARGET_ARCH_DBC) |
| 412 #if defined(USING_SIMULATOR) | 387 #if defined(USING_SIMULATOR) |
| 413 // Ignore passed-in native stack top and use Simulator stack top. | 388 // Ignore passed-in native stack top and use Simulator stack top. |
| 414 Simulator* sim = Simulator::Current(); // May allocate a simulator. | 389 Simulator* sim = Simulator::Current(); // May allocate a simulator. |
| 415 ASSERT(isolate()->simulator() == sim); // Isolate's simulator is current one. | 390 ASSERT(isolate()->simulator() == sim); // Isolate's simulator is current one. |
| 416 stack_base = sim->StackTop(); | 391 stack_base = sim->StackTop(); |
| 417 // The overflow area is accounted for by the simulator. | 392 // The overflow area is accounted for by the simulator. |
| 418 #endif | 393 #endif |
| 419 SetStackLimit(stack_base - OSThread::GetSpecifiedStackSize()); | 394 SetStackLimit(stack_base - OSThread::GetSpecifiedStackSize()); |
| 420 #else | 395 #else |
| 421 SetStackLimit(Simulator::Current()->StackTop()); | 396 SetStackLimit(Simulator::Current()->StackTop()); |
| 422 #endif // !defined(TARGET_ARCH_DBC) | 397 #endif // !defined(TARGET_ARCH_DBC) |
| 423 } | 398 } |
| 424 | 399 |
| 425 | |
| 426 void Thread::SetStackLimit(uword limit) { | 400 void Thread::SetStackLimit(uword limit) { |
| 427 // The thread setting the stack limit is not necessarily the thread which | 401 // The thread setting the stack limit is not necessarily the thread which |
| 428 // the stack limit is being set on. | 402 // the stack limit is being set on. |
| 429 MonitorLocker ml(thread_lock_); | 403 MonitorLocker ml(thread_lock_); |
| 430 if (stack_limit_ == saved_stack_limit_) { | 404 if (stack_limit_ == saved_stack_limit_) { |
| 431 // No interrupt pending, set stack_limit_ too. | 405 // No interrupt pending, set stack_limit_ too. |
| 432 stack_limit_ = limit; | 406 stack_limit_ = limit; |
| 433 } | 407 } |
| 434 saved_stack_limit_ = limit; | 408 saved_stack_limit_ = limit; |
| 435 } | 409 } |
| 436 | 410 |
| 437 | |
| 438 void Thread::ClearStackLimit() { | 411 void Thread::ClearStackLimit() { |
| 439 SetStackLimit(~static_cast<uword>(0)); | 412 SetStackLimit(~static_cast<uword>(0)); |
| 440 } | 413 } |
| 441 | 414 |
| 442 | |
| 443 /* static */ | 415 /* static */ |
| 444 uword Thread::GetCurrentStackPointer() { | 416 uword Thread::GetCurrentStackPointer() { |
| 445 #if !defined(TARGET_ARCH_DBC) | 417 #if !defined(TARGET_ARCH_DBC) |
| 446 // Since AddressSanitizer's detect_stack_use_after_return instruments the | 418 // Since AddressSanitizer's detect_stack_use_after_return instruments the |
| 447 // C++ code to give out fake stack addresses, we call a stub in that case. | 419 // C++ code to give out fake stack addresses, we call a stub in that case. |
| 448 ASSERT(StubCode::GetCStackPointer_entry() != NULL); | 420 ASSERT(StubCode::GetCStackPointer_entry() != NULL); |
| 449 uword (*func)() = reinterpret_cast<uword (*)()>( | 421 uword (*func)() = reinterpret_cast<uword (*)()>( |
| 450 StubCode::GetCStackPointer_entry()->EntryPoint()); | 422 StubCode::GetCStackPointer_entry()->EntryPoint()); |
| 451 #else | 423 #else |
| 452 uword (*func)() = NULL; | 424 uword (*func)() = NULL; |
| 453 #endif | 425 #endif |
| 454 // But for performance (and to support simulators), we normally use a local. | 426 // But for performance (and to support simulators), we normally use a local. |
| 455 #if defined(__has_feature) | 427 #if defined(__has_feature) |
| 456 #if __has_feature(address_sanitizer) || __has_feature(safe_stack) | 428 #if __has_feature(address_sanitizer) || __has_feature(safe_stack) |
| 457 uword current_sp = func(); | 429 uword current_sp = func(); |
| 458 return current_sp; | 430 return current_sp; |
| 459 #else | 431 #else |
| 460 uword stack_allocated_local_address = reinterpret_cast<uword>(&func); | 432 uword stack_allocated_local_address = reinterpret_cast<uword>(&func); |
| 461 return stack_allocated_local_address; | 433 return stack_allocated_local_address; |
| 462 #endif | 434 #endif |
| 463 #else | 435 #else |
| 464 uword stack_allocated_local_address = reinterpret_cast<uword>(&func); | 436 uword stack_allocated_local_address = reinterpret_cast<uword>(&func); |
| 465 return stack_allocated_local_address; | 437 return stack_allocated_local_address; |
| 466 #endif | 438 #endif |
| 467 } | 439 } |
| 468 | 440 |
| 469 | |
| 470 void Thread::ScheduleInterrupts(uword interrupt_bits) { | 441 void Thread::ScheduleInterrupts(uword interrupt_bits) { |
| 471 MonitorLocker ml(thread_lock_); | 442 MonitorLocker ml(thread_lock_); |
| 472 ScheduleInterruptsLocked(interrupt_bits); | 443 ScheduleInterruptsLocked(interrupt_bits); |
| 473 } | 444 } |
| 474 | 445 |
| 475 | |
| 476 void Thread::ScheduleInterruptsLocked(uword interrupt_bits) { | 446 void Thread::ScheduleInterruptsLocked(uword interrupt_bits) { |
| 477 ASSERT(thread_lock_->IsOwnedByCurrentThread()); | 447 ASSERT(thread_lock_->IsOwnedByCurrentThread()); |
| 478 ASSERT((interrupt_bits & ~kInterruptsMask) == 0); // Must fit in mask. | 448 ASSERT((interrupt_bits & ~kInterruptsMask) == 0); // Must fit in mask. |
| 479 | 449 |
| 480 // Check to see if any of the requested interrupts should be deferred. | 450 // Check to see if any of the requested interrupts should be deferred. |
| 481 uword defer_bits = interrupt_bits & deferred_interrupts_mask_; | 451 uword defer_bits = interrupt_bits & deferred_interrupts_mask_; |
| 482 if (defer_bits != 0) { | 452 if (defer_bits != 0) { |
| 483 deferred_interrupts_ |= defer_bits; | 453 deferred_interrupts_ |= defer_bits; |
| 484 interrupt_bits &= ~deferred_interrupts_mask_; | 454 interrupt_bits &= ~deferred_interrupts_mask_; |
| 485 if (interrupt_bits == 0) { | 455 if (interrupt_bits == 0) { |
| 486 return; | 456 return; |
| 487 } | 457 } |
| 488 } | 458 } |
| 489 | 459 |
| 490 if (stack_limit_ == saved_stack_limit_) { | 460 if (stack_limit_ == saved_stack_limit_) { |
| 491 stack_limit_ = kInterruptStackLimit & ~kInterruptsMask; | 461 stack_limit_ = kInterruptStackLimit & ~kInterruptsMask; |
| 492 } | 462 } |
| 493 stack_limit_ |= interrupt_bits; | 463 stack_limit_ |= interrupt_bits; |
| 494 } | 464 } |
| 495 | 465 |
| 496 | |
| 497 uword Thread::GetAndClearInterrupts() { | 466 uword Thread::GetAndClearInterrupts() { |
| 498 MonitorLocker ml(thread_lock_); | 467 MonitorLocker ml(thread_lock_); |
| 499 if (stack_limit_ == saved_stack_limit_) { | 468 if (stack_limit_ == saved_stack_limit_) { |
| 500 return 0; // No interrupt was requested. | 469 return 0; // No interrupt was requested. |
| 501 } | 470 } |
| 502 uword interrupt_bits = stack_limit_ & kInterruptsMask; | 471 uword interrupt_bits = stack_limit_ & kInterruptsMask; |
| 503 stack_limit_ = saved_stack_limit_; | 472 stack_limit_ = saved_stack_limit_; |
| 504 return interrupt_bits; | 473 return interrupt_bits; |
| 505 } | 474 } |
| 506 | 475 |
| 507 | |
| 508 bool Thread::ZoneIsOwnedByThread(Zone* zone) const { | 476 bool Thread::ZoneIsOwnedByThread(Zone* zone) const { |
| 509 ASSERT(zone != NULL); | 477 ASSERT(zone != NULL); |
| 510 Zone* current = zone_; | 478 Zone* current = zone_; |
| 511 while (current != NULL) { | 479 while (current != NULL) { |
| 512 if (current == zone) { | 480 if (current == zone) { |
| 513 return true; | 481 return true; |
| 514 } | 482 } |
| 515 current = current->previous(); | 483 current = current->previous(); |
| 516 } | 484 } |
| 517 return false; | 485 return false; |
| 518 } | 486 } |
| 519 | 487 |
| 520 | |
| 521 void Thread::DeferOOBMessageInterrupts() { | 488 void Thread::DeferOOBMessageInterrupts() { |
| 522 MonitorLocker ml(thread_lock_); | 489 MonitorLocker ml(thread_lock_); |
| 523 defer_oob_messages_count_++; | 490 defer_oob_messages_count_++; |
| 524 if (defer_oob_messages_count_ > 1) { | 491 if (defer_oob_messages_count_ > 1) { |
| 525 // OOB message interrupts are already deferred. | 492 // OOB message interrupts are already deferred. |
| 526 return; | 493 return; |
| 527 } | 494 } |
| 528 ASSERT(deferred_interrupts_mask_ == 0); | 495 ASSERT(deferred_interrupts_mask_ == 0); |
| 529 deferred_interrupts_mask_ = kMessageInterrupt; | 496 deferred_interrupts_mask_ = kMessageInterrupt; |
| 530 | 497 |
| 531 if (stack_limit_ != saved_stack_limit_) { | 498 if (stack_limit_ != saved_stack_limit_) { |
| 532 // Defer any interrupts which are currently pending. | 499 // Defer any interrupts which are currently pending. |
| 533 deferred_interrupts_ = stack_limit_ & deferred_interrupts_mask_; | 500 deferred_interrupts_ = stack_limit_ & deferred_interrupts_mask_; |
| 534 | 501 |
| 535 // Clear deferrable interrupts, if present. | 502 // Clear deferrable interrupts, if present. |
| 536 stack_limit_ &= ~deferred_interrupts_mask_; | 503 stack_limit_ &= ~deferred_interrupts_mask_; |
| 537 | 504 |
| 538 if ((stack_limit_ & kInterruptsMask) == 0) { | 505 if ((stack_limit_ & kInterruptsMask) == 0) { |
| 539 // No other pending interrupts. Restore normal stack limit. | 506 // No other pending interrupts. Restore normal stack limit. |
| 540 stack_limit_ = saved_stack_limit_; | 507 stack_limit_ = saved_stack_limit_; |
| 541 } | 508 } |
| 542 } | 509 } |
| 543 if (FLAG_trace_service && FLAG_trace_service_verbose) { | 510 if (FLAG_trace_service && FLAG_trace_service_verbose) { |
| 544 OS::Print("[+%" Pd64 "ms] Isolate %s deferring OOB interrupts\n", | 511 OS::Print("[+%" Pd64 "ms] Isolate %s deferring OOB interrupts\n", |
| 545 Dart::UptimeMillis(), isolate()->name()); | 512 Dart::UptimeMillis(), isolate()->name()); |
| 546 } | 513 } |
| 547 } | 514 } |
| 548 | 515 |
| 549 | |
| 550 void Thread::RestoreOOBMessageInterrupts() { | 516 void Thread::RestoreOOBMessageInterrupts() { |
| 551 MonitorLocker ml(thread_lock_); | 517 MonitorLocker ml(thread_lock_); |
| 552 defer_oob_messages_count_--; | 518 defer_oob_messages_count_--; |
| 553 if (defer_oob_messages_count_ > 0) { | 519 if (defer_oob_messages_count_ > 0) { |
| 554 return; | 520 return; |
| 555 } | 521 } |
| 556 ASSERT(defer_oob_messages_count_ == 0); | 522 ASSERT(defer_oob_messages_count_ == 0); |
| 557 ASSERT(deferred_interrupts_mask_ == kMessageInterrupt); | 523 ASSERT(deferred_interrupts_mask_ == kMessageInterrupt); |
| 558 deferred_interrupts_mask_ = 0; | 524 deferred_interrupts_mask_ = 0; |
| 559 if (deferred_interrupts_ != 0) { | 525 if (deferred_interrupts_ != 0) { |
| 560 if (stack_limit_ == saved_stack_limit_) { | 526 if (stack_limit_ == saved_stack_limit_) { |
| 561 stack_limit_ = kInterruptStackLimit & ~kInterruptsMask; | 527 stack_limit_ = kInterruptStackLimit & ~kInterruptsMask; |
| 562 } | 528 } |
| 563 stack_limit_ |= deferred_interrupts_; | 529 stack_limit_ |= deferred_interrupts_; |
| 564 deferred_interrupts_ = 0; | 530 deferred_interrupts_ = 0; |
| 565 } | 531 } |
| 566 if (FLAG_trace_service && FLAG_trace_service_verbose) { | 532 if (FLAG_trace_service && FLAG_trace_service_verbose) { |
| 567 OS::Print("[+%" Pd64 "ms] Isolate %s restoring OOB interrupts\n", | 533 OS::Print("[+%" Pd64 "ms] Isolate %s restoring OOB interrupts\n", |
| 568 Dart::UptimeMillis(), isolate()->name()); | 534 Dart::UptimeMillis(), isolate()->name()); |
| 569 } | 535 } |
| 570 } | 536 } |
| 571 | 537 |
| 572 | |
| 573 RawError* Thread::HandleInterrupts() { | 538 RawError* Thread::HandleInterrupts() { |
| 574 uword interrupt_bits = GetAndClearInterrupts(); | 539 uword interrupt_bits = GetAndClearInterrupts(); |
| 575 if ((interrupt_bits & kVMInterrupt) != 0) { | 540 if ((interrupt_bits & kVMInterrupt) != 0) { |
| 576 if (isolate()->store_buffer()->Overflowed()) { | 541 if (isolate()->store_buffer()->Overflowed()) { |
| 577 if (FLAG_verbose_gc) { | 542 if (FLAG_verbose_gc) { |
| 578 OS::PrintErr("Scavenge scheduled by store buffer overflow.\n"); | 543 OS::PrintErr("Scavenge scheduled by store buffer overflow.\n"); |
| 579 } | 544 } |
| 580 heap()->CollectGarbage(Heap::kNew); | 545 heap()->CollectGarbage(Heap::kNew); |
| 581 } | 546 } |
| 582 } | 547 } |
| (...skipping 12 matching lines...) Expand all Loading... |
| 595 Thread* thread = Thread::Current(); | 560 Thread* thread = Thread::Current(); |
| 596 const Error& error = Error::Handle(thread->sticky_error()); | 561 const Error& error = Error::Handle(thread->sticky_error()); |
| 597 ASSERT(!error.IsNull() && error.IsUnwindError()); | 562 ASSERT(!error.IsNull() && error.IsUnwindError()); |
| 598 thread->clear_sticky_error(); | 563 thread->clear_sticky_error(); |
| 599 return error.raw(); | 564 return error.raw(); |
| 600 } | 565 } |
| 601 } | 566 } |
| 602 return Error::null(); | 567 return Error::null(); |
| 603 } | 568 } |
| 604 | 569 |
| 605 | |
| 606 uword Thread::GetAndClearStackOverflowFlags() { | 570 uword Thread::GetAndClearStackOverflowFlags() { |
| 607 uword stack_overflow_flags = stack_overflow_flags_; | 571 uword stack_overflow_flags = stack_overflow_flags_; |
| 608 stack_overflow_flags_ = 0; | 572 stack_overflow_flags_ = 0; |
| 609 return stack_overflow_flags; | 573 return stack_overflow_flags; |
| 610 } | 574 } |
| 611 | 575 |
| 612 | |
| 613 void Thread::StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy) { | 576 void Thread::StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy) { |
| 614 StoreBufferRelease(policy); | 577 StoreBufferRelease(policy); |
| 615 StoreBufferAcquire(); | 578 StoreBufferAcquire(); |
| 616 } | 579 } |
| 617 | 580 |
| 618 | |
| 619 void Thread::StoreBufferAddObject(RawObject* obj) { | 581 void Thread::StoreBufferAddObject(RawObject* obj) { |
| 620 store_buffer_block_->Push(obj); | 582 store_buffer_block_->Push(obj); |
| 621 if (store_buffer_block_->IsFull()) { | 583 if (store_buffer_block_->IsFull()) { |
| 622 StoreBufferBlockProcess(StoreBuffer::kCheckThreshold); | 584 StoreBufferBlockProcess(StoreBuffer::kCheckThreshold); |
| 623 } | 585 } |
| 624 } | 586 } |
| 625 | 587 |
| 626 | |
| 627 void Thread::StoreBufferAddObjectGC(RawObject* obj) { | 588 void Thread::StoreBufferAddObjectGC(RawObject* obj) { |
| 628 store_buffer_block_->Push(obj); | 589 store_buffer_block_->Push(obj); |
| 629 if (store_buffer_block_->IsFull()) { | 590 if (store_buffer_block_->IsFull()) { |
| 630 StoreBufferBlockProcess(StoreBuffer::kIgnoreThreshold); | 591 StoreBufferBlockProcess(StoreBuffer::kIgnoreThreshold); |
| 631 } | 592 } |
| 632 } | 593 } |
| 633 | 594 |
| 634 | |
| 635 void Thread::StoreBufferRelease(StoreBuffer::ThresholdPolicy policy) { | 595 void Thread::StoreBufferRelease(StoreBuffer::ThresholdPolicy policy) { |
| 636 StoreBufferBlock* block = store_buffer_block_; | 596 StoreBufferBlock* block = store_buffer_block_; |
| 637 store_buffer_block_ = NULL; | 597 store_buffer_block_ = NULL; |
| 638 isolate()->store_buffer()->PushBlock(block, policy); | 598 isolate()->store_buffer()->PushBlock(block, policy); |
| 639 } | 599 } |
| 640 | 600 |
| 641 | |
| 642 void Thread::StoreBufferAcquire() { | 601 void Thread::StoreBufferAcquire() { |
| 643 store_buffer_block_ = isolate()->store_buffer()->PopNonFullBlock(); | 602 store_buffer_block_ = isolate()->store_buffer()->PopNonFullBlock(); |
| 644 } | 603 } |
| 645 | 604 |
| 646 | |
| 647 bool Thread::IsMutatorThread() const { | 605 bool Thread::IsMutatorThread() const { |
| 648 return ((isolate_ != NULL) && (isolate_->mutator_thread() == this)); | 606 return ((isolate_ != NULL) && (isolate_->mutator_thread() == this)); |
| 649 } | 607 } |
| 650 | 608 |
| 651 | |
| 652 bool Thread::CanCollectGarbage() const { | 609 bool Thread::CanCollectGarbage() const { |
| 653 // We grow the heap instead of triggering a garbage collection when a | 610 // We grow the heap instead of triggering a garbage collection when a |
| 654 // thread is at a safepoint in the following situations : | 611 // thread is at a safepoint in the following situations : |
| 655 // - background compiler thread finalizing and installing code | 612 // - background compiler thread finalizing and installing code |
| 656 // - disassembly of the generated code is done after compilation | 613 // - disassembly of the generated code is done after compilation |
| 657 // So essentially we state that garbage collection is possible only | 614 // So essentially we state that garbage collection is possible only |
| 658 // when we are not at a safepoint. | 615 // when we are not at a safepoint. |
| 659 return !IsAtSafepoint(); | 616 return !IsAtSafepoint(); |
| 660 } | 617 } |
| 661 | 618 |
| 662 | |
| 663 bool Thread::IsExecutingDartCode() const { | 619 bool Thread::IsExecutingDartCode() const { |
| 664 return (top_exit_frame_info() == 0) && (vm_tag() == VMTag::kDartTagId); | 620 return (top_exit_frame_info() == 0) && (vm_tag() == VMTag::kDartTagId); |
| 665 } | 621 } |
| 666 | 622 |
| 667 | |
| 668 bool Thread::HasExitedDartCode() const { | 623 bool Thread::HasExitedDartCode() const { |
| 669 return (top_exit_frame_info() != 0) && (vm_tag() != VMTag::kDartTagId); | 624 return (top_exit_frame_info() != 0) && (vm_tag() != VMTag::kDartTagId); |
| 670 } | 625 } |
| 671 | 626 |
| 672 | |
| 673 template <class C> | 627 template <class C> |
| 674 C* Thread::AllocateReusableHandle() { | 628 C* Thread::AllocateReusableHandle() { |
| 675 C* handle = reinterpret_cast<C*>(reusable_handles_.AllocateScopedHandle()); | 629 C* handle = reinterpret_cast<C*>(reusable_handles_.AllocateScopedHandle()); |
| 676 C::initializeHandle(handle, C::null()); | 630 C::initializeHandle(handle, C::null()); |
| 677 return handle; | 631 return handle; |
| 678 } | 632 } |
| 679 | 633 |
| 680 | |
| 681 void Thread::ClearReusableHandles() { | 634 void Thread::ClearReusableHandles() { |
| 682 #define CLEAR_REUSABLE_HANDLE(object) *object##_handle_ = object::null(); | 635 #define CLEAR_REUSABLE_HANDLE(object) *object##_handle_ = object::null(); |
| 683 REUSABLE_HANDLE_LIST(CLEAR_REUSABLE_HANDLE) | 636 REUSABLE_HANDLE_LIST(CLEAR_REUSABLE_HANDLE) |
| 684 #undef CLEAR_REUSABLE_HANDLE | 637 #undef CLEAR_REUSABLE_HANDLE |
| 685 } | 638 } |
| 686 | 639 |
| 687 | |
| 688 void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor, | 640 void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor, |
| 689 bool validate_frames) { | 641 bool validate_frames) { |
| 690 ASSERT(visitor != NULL); | 642 ASSERT(visitor != NULL); |
| 691 | 643 |
| 692 if (zone_ != NULL) { | 644 if (zone_ != NULL) { |
| 693 zone_->VisitObjectPointers(visitor); | 645 zone_->VisitObjectPointers(visitor); |
| 694 } | 646 } |
| 695 | 647 |
| 696 // Visit objects in thread specific handles area. | 648 // Visit objects in thread specific handles area. |
| 697 reusable_handles_.VisitObjectPointers(visitor); | 649 reusable_handles_.VisitObjectPointers(visitor); |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 729 // Iterate over all the stack frames and visit objects on the stack. | 681 // Iterate over all the stack frames and visit objects on the stack. |
| 730 StackFrameIterator frames_iterator(top_exit_frame_info(), validation_policy, | 682 StackFrameIterator frames_iterator(top_exit_frame_info(), validation_policy, |
| 731 this, cross_thread_policy); | 683 this, cross_thread_policy); |
| 732 StackFrame* frame = frames_iterator.NextFrame(); | 684 StackFrame* frame = frames_iterator.NextFrame(); |
| 733 while (frame != NULL) { | 685 while (frame != NULL) { |
| 734 frame->VisitObjectPointers(visitor); | 686 frame->VisitObjectPointers(visitor); |
| 735 frame = frames_iterator.NextFrame(); | 687 frame = frames_iterator.NextFrame(); |
| 736 } | 688 } |
| 737 } | 689 } |
| 738 | 690 |
| 739 | |
| 740 bool Thread::CanLoadFromThread(const Object& object) { | 691 bool Thread::CanLoadFromThread(const Object& object) { |
| 741 #define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \ | 692 #define CHECK_OBJECT(type_name, member_name, expr, default_init_value) \ |
| 742 if (object.raw() == expr) return true; | 693 if (object.raw() == expr) return true; |
| 743 CACHED_VM_OBJECTS_LIST(CHECK_OBJECT) | 694 CACHED_VM_OBJECTS_LIST(CHECK_OBJECT) |
| 744 #undef CHECK_OBJECT | 695 #undef CHECK_OBJECT |
| 745 return false; | 696 return false; |
| 746 } | 697 } |
| 747 | 698 |
| 748 | |
| 749 intptr_t Thread::OffsetFromThread(const Object& object) { | 699 intptr_t Thread::OffsetFromThread(const Object& object) { |
| 750 #define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \ | 700 #define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \ |
| 751 ASSERT((expr)->IsVMHeapObject()); \ | 701 ASSERT((expr)->IsVMHeapObject()); \ |
| 752 if (object.raw() == expr) return Thread::member_name##offset(); | 702 if (object.raw() == expr) return Thread::member_name##offset(); |
| 753 CACHED_VM_OBJECTS_LIST(COMPUTE_OFFSET) | 703 CACHED_VM_OBJECTS_LIST(COMPUTE_OFFSET) |
| 754 #undef COMPUTE_OFFSET | 704 #undef COMPUTE_OFFSET |
| 755 UNREACHABLE(); | 705 UNREACHABLE(); |
| 756 return -1; | 706 return -1; |
| 757 } | 707 } |
| 758 | 708 |
| 759 | |
| 760 bool Thread::ObjectAtOffset(intptr_t offset, Object* object) { | 709 bool Thread::ObjectAtOffset(intptr_t offset, Object* object) { |
| 761 if (Isolate::Current() == Dart::vm_isolate()) { | 710 if (Isolate::Current() == Dart::vm_isolate()) { |
| 762 // --disassemble-stubs runs before all the references through | 711 // --disassemble-stubs runs before all the references through |
| 763 // thread have targets | 712 // thread have targets |
| 764 return false; | 713 return false; |
| 765 } | 714 } |
| 766 | 715 |
| 767 #define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \ | 716 #define COMPUTE_OFFSET(type_name, member_name, expr, default_init_value) \ |
| 768 if (Thread::member_name##offset() == offset) { \ | 717 if (Thread::member_name##offset() == offset) { \ |
| 769 *object = expr; \ | 718 *object = expr; \ |
| 770 return true; \ | 719 return true; \ |
| 771 } | 720 } |
| 772 CACHED_VM_OBJECTS_LIST(COMPUTE_OFFSET) | 721 CACHED_VM_OBJECTS_LIST(COMPUTE_OFFSET) |
| 773 #undef COMPUTE_OFFSET | 722 #undef COMPUTE_OFFSET |
| 774 return false; | 723 return false; |
| 775 } | 724 } |
| 776 | 725 |
| 777 | |
| 778 intptr_t Thread::OffsetFromThread(const RuntimeEntry* runtime_entry) { | 726 intptr_t Thread::OffsetFromThread(const RuntimeEntry* runtime_entry) { |
| 779 #define COMPUTE_OFFSET(name) \ | 727 #define COMPUTE_OFFSET(name) \ |
| 780 if (runtime_entry->function() == k##name##RuntimeEntry.function()) { \ | 728 if (runtime_entry->function() == k##name##RuntimeEntry.function()) { \ |
| 781 return Thread::name##_entry_point_offset(); \ | 729 return Thread::name##_entry_point_offset(); \ |
| 782 } | 730 } |
| 783 RUNTIME_ENTRY_LIST(COMPUTE_OFFSET) | 731 RUNTIME_ENTRY_LIST(COMPUTE_OFFSET) |
| 784 #undef COMPUTE_OFFSET | 732 #undef COMPUTE_OFFSET |
| 785 | 733 |
| 786 #define COMPUTE_OFFSET(returntype, name, ...) \ | 734 #define COMPUTE_OFFSET(returntype, name, ...) \ |
| 787 if (runtime_entry->function() == k##name##RuntimeEntry.function()) { \ | 735 if (runtime_entry->function() == k##name##RuntimeEntry.function()) { \ |
| 788 return Thread::name##_entry_point_offset(); \ | 736 return Thread::name##_entry_point_offset(); \ |
| 789 } | 737 } |
| 790 LEAF_RUNTIME_ENTRY_LIST(COMPUTE_OFFSET) | 738 LEAF_RUNTIME_ENTRY_LIST(COMPUTE_OFFSET) |
| 791 #undef COMPUTE_OFFSET | 739 #undef COMPUTE_OFFSET |
| 792 | 740 |
| 793 UNREACHABLE(); | 741 UNREACHABLE(); |
| 794 return -1; | 742 return -1; |
| 795 } | 743 } |
| 796 | 744 |
| 797 | |
| 798 bool Thread::IsValidHandle(Dart_Handle object) const { | 745 bool Thread::IsValidHandle(Dart_Handle object) const { |
| 799 return IsValidLocalHandle(object) || IsValidZoneHandle(object) || | 746 return IsValidLocalHandle(object) || IsValidZoneHandle(object) || |
| 800 IsValidScopedHandle(object); | 747 IsValidScopedHandle(object); |
| 801 } | 748 } |
| 802 | 749 |
| 803 | |
| 804 bool Thread::IsValidLocalHandle(Dart_Handle object) const { | 750 bool Thread::IsValidLocalHandle(Dart_Handle object) const { |
| 805 ApiLocalScope* scope = api_top_scope_; | 751 ApiLocalScope* scope = api_top_scope_; |
| 806 while (scope != NULL) { | 752 while (scope != NULL) { |
| 807 if (scope->local_handles()->IsValidHandle(object)) { | 753 if (scope->local_handles()->IsValidHandle(object)) { |
| 808 return true; | 754 return true; |
| 809 } | 755 } |
| 810 scope = scope->previous(); | 756 scope = scope->previous(); |
| 811 } | 757 } |
| 812 return false; | 758 return false; |
| 813 } | 759 } |
| 814 | 760 |
| 815 | |
| 816 intptr_t Thread::CountLocalHandles() const { | 761 intptr_t Thread::CountLocalHandles() const { |
| 817 intptr_t total = 0; | 762 intptr_t total = 0; |
| 818 ApiLocalScope* scope = api_top_scope_; | 763 ApiLocalScope* scope = api_top_scope_; |
| 819 while (scope != NULL) { | 764 while (scope != NULL) { |
| 820 total += scope->local_handles()->CountHandles(); | 765 total += scope->local_handles()->CountHandles(); |
| 821 scope = scope->previous(); | 766 scope = scope->previous(); |
| 822 } | 767 } |
| 823 return total; | 768 return total; |
| 824 } | 769 } |
| 825 | 770 |
| 826 | |
| 827 bool Thread::IsValidZoneHandle(Dart_Handle object) const { | 771 bool Thread::IsValidZoneHandle(Dart_Handle object) const { |
| 828 Zone* zone = zone_; | 772 Zone* zone = zone_; |
| 829 while (zone != NULL) { | 773 while (zone != NULL) { |
| 830 if (zone->handles()->IsValidZoneHandle(reinterpret_cast<uword>(object))) { | 774 if (zone->handles()->IsValidZoneHandle(reinterpret_cast<uword>(object))) { |
| 831 return true; | 775 return true; |
| 832 } | 776 } |
| 833 zone = zone->previous(); | 777 zone = zone->previous(); |
| 834 } | 778 } |
| 835 return false; | 779 return false; |
| 836 } | 780 } |
| 837 | 781 |
| 838 | |
| 839 intptr_t Thread::CountZoneHandles() const { | 782 intptr_t Thread::CountZoneHandles() const { |
| 840 intptr_t count = 0; | 783 intptr_t count = 0; |
| 841 Zone* zone = zone_; | 784 Zone* zone = zone_; |
| 842 while (zone != NULL) { | 785 while (zone != NULL) { |
| 843 count += zone->handles()->CountZoneHandles(); | 786 count += zone->handles()->CountZoneHandles(); |
| 844 zone = zone->previous(); | 787 zone = zone->previous(); |
| 845 } | 788 } |
| 846 ASSERT(count >= 0); | 789 ASSERT(count >= 0); |
| 847 return count; | 790 return count; |
| 848 } | 791 } |
| 849 | 792 |
| 850 | |
| 851 bool Thread::IsValidScopedHandle(Dart_Handle object) const { | 793 bool Thread::IsValidScopedHandle(Dart_Handle object) const { |
| 852 Zone* zone = zone_; | 794 Zone* zone = zone_; |
| 853 while (zone != NULL) { | 795 while (zone != NULL) { |
| 854 if (zone->handles()->IsValidScopedHandle(reinterpret_cast<uword>(object))) { | 796 if (zone->handles()->IsValidScopedHandle(reinterpret_cast<uword>(object))) { |
| 855 return true; | 797 return true; |
| 856 } | 798 } |
| 857 zone = zone->previous(); | 799 zone = zone->previous(); |
| 858 } | 800 } |
| 859 return false; | 801 return false; |
| 860 } | 802 } |
| 861 | 803 |
| 862 | |
| 863 intptr_t Thread::CountScopedHandles() const { | 804 intptr_t Thread::CountScopedHandles() const { |
| 864 intptr_t count = 0; | 805 intptr_t count = 0; |
| 865 Zone* zone = zone_; | 806 Zone* zone = zone_; |
| 866 while (zone != NULL) { | 807 while (zone != NULL) { |
| 867 count += zone->handles()->CountScopedHandles(); | 808 count += zone->handles()->CountScopedHandles(); |
| 868 zone = zone->previous(); | 809 zone = zone->previous(); |
| 869 } | 810 } |
| 870 ASSERT(count >= 0); | 811 ASSERT(count >= 0); |
| 871 return count; | 812 return count; |
| 872 } | 813 } |
| 873 | 814 |
| 874 | |
| 875 int Thread::ZoneSizeInBytes() const { | 815 int Thread::ZoneSizeInBytes() const { |
| 876 int total = 0; | 816 int total = 0; |
| 877 ApiLocalScope* scope = api_top_scope_; | 817 ApiLocalScope* scope = api_top_scope_; |
| 878 while (scope != NULL) { | 818 while (scope != NULL) { |
| 879 total += scope->zone()->SizeInBytes(); | 819 total += scope->zone()->SizeInBytes(); |
| 880 scope = scope->previous(); | 820 scope = scope->previous(); |
| 881 } | 821 } |
| 882 return total; | 822 return total; |
| 883 } | 823 } |
| 884 | 824 |
| 885 | |
| 886 void Thread::UnwindScopes(uword stack_marker) { | 825 void Thread::UnwindScopes(uword stack_marker) { |
| 887 // Unwind all scopes using the same stack_marker, i.e. all scopes allocated | 826 // Unwind all scopes using the same stack_marker, i.e. all scopes allocated |
| 888 // under the same top_exit_frame_info. | 827 // under the same top_exit_frame_info. |
| 889 ApiLocalScope* scope = api_top_scope_; | 828 ApiLocalScope* scope = api_top_scope_; |
| 890 while (scope != NULL && scope->stack_marker() != 0 && | 829 while (scope != NULL && scope->stack_marker() != 0 && |
| 891 scope->stack_marker() == stack_marker) { | 830 scope->stack_marker() == stack_marker) { |
| 892 api_top_scope_ = scope->previous(); | 831 api_top_scope_ = scope->previous(); |
| 893 delete scope; | 832 delete scope; |
| 894 scope = api_top_scope_; | 833 scope = api_top_scope_; |
| 895 } | 834 } |
| 896 } | 835 } |
| 897 | 836 |
| 898 | |
| 899 void Thread::EnterSafepointUsingLock() { | 837 void Thread::EnterSafepointUsingLock() { |
| 900 isolate()->safepoint_handler()->EnterSafepointUsingLock(this); | 838 isolate()->safepoint_handler()->EnterSafepointUsingLock(this); |
| 901 } | 839 } |
| 902 | 840 |
| 903 | |
| 904 void Thread::ExitSafepointUsingLock() { | 841 void Thread::ExitSafepointUsingLock() { |
| 905 isolate()->safepoint_handler()->ExitSafepointUsingLock(this); | 842 isolate()->safepoint_handler()->ExitSafepointUsingLock(this); |
| 906 } | 843 } |
| 907 | 844 |
| 908 | |
| 909 void Thread::BlockForSafepoint() { | 845 void Thread::BlockForSafepoint() { |
| 910 isolate()->safepoint_handler()->BlockForSafepoint(this); | 846 isolate()->safepoint_handler()->BlockForSafepoint(this); |
| 911 } | 847 } |
| 912 | 848 |
| 913 | |
| 914 DisableThreadInterruptsScope::DisableThreadInterruptsScope(Thread* thread) | 849 DisableThreadInterruptsScope::DisableThreadInterruptsScope(Thread* thread) |
| 915 : StackResource(thread) { | 850 : StackResource(thread) { |
| 916 if (thread != NULL) { | 851 if (thread != NULL) { |
| 917 OSThread* os_thread = thread->os_thread(); | 852 OSThread* os_thread = thread->os_thread(); |
| 918 ASSERT(os_thread != NULL); | 853 ASSERT(os_thread != NULL); |
| 919 os_thread->DisableThreadInterrupts(); | 854 os_thread->DisableThreadInterrupts(); |
| 920 } | 855 } |
| 921 } | 856 } |
| 922 | 857 |
| 923 | |
| 924 DisableThreadInterruptsScope::~DisableThreadInterruptsScope() { | 858 DisableThreadInterruptsScope::~DisableThreadInterruptsScope() { |
| 925 if (thread() != NULL) { | 859 if (thread() != NULL) { |
| 926 OSThread* os_thread = thread()->os_thread(); | 860 OSThread* os_thread = thread()->os_thread(); |
| 927 ASSERT(os_thread != NULL); | 861 ASSERT(os_thread != NULL); |
| 928 os_thread->EnableThreadInterrupts(); | 862 os_thread->EnableThreadInterrupts(); |
| 929 } | 863 } |
| 930 } | 864 } |
| 931 | 865 |
| 932 } // namespace dart | 866 } // namespace dart |
| OLD | NEW |