| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 54 static const int kSamplerThresholdDelta = 1; | 54 static const int kSamplerThresholdDelta = 1; |
| 55 | 55 |
| 56 static const int kSamplerThresholdSizeFactorInit = 3; | 56 static const int kSamplerThresholdSizeFactorInit = 3; |
| 57 | 57 |
| 58 static const int kSizeLimit = 1500; | 58 static const int kSizeLimit = 1500; |
| 59 | 59 |
| 60 | 60 |
| 61 Atomic32 RuntimeProfiler::state_ = 0; | 61 Atomic32 RuntimeProfiler::state_ = 0; |
| 62 // TODO(isolates): Create the semaphore lazily and clean it up when no | 62 // TODO(isolates): Create the semaphore lazily and clean it up when no |
| 63 // longer required. | 63 // longer required. |
| 64 #ifdef ENABLE_LOGGING_AND_PROFILING | |
| 65 Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0); | 64 Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0); |
| 66 #endif | |
| 67 | 65 |
| 68 #ifdef DEBUG | 66 #ifdef DEBUG |
| 69 bool RuntimeProfiler::has_been_globally_setup_ = false; | 67 bool RuntimeProfiler::has_been_globally_setup_ = false; |
| 70 #endif | 68 #endif |
| 71 bool RuntimeProfiler::enabled_ = false; | 69 bool RuntimeProfiler::enabled_ = false; |
| 72 | 70 |
| 73 | 71 |
| 74 RuntimeProfiler::RuntimeProfiler(Isolate* isolate) | 72 RuntimeProfiler::RuntimeProfiler(Isolate* isolate) |
| 75 : isolate_(isolate), | 73 : isolate_(isolate), |
| 76 sampler_threshold_(kSamplerThresholdInit), | 74 sampler_threshold_(kSamplerThresholdInit), |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 238 // Add the collected functions as samples. It's important not to do | 236 // Add the collected functions as samples. It's important not to do |
| 239 // this as part of collecting them because this will interfere with | 237 // this as part of collecting them because this will interfere with |
| 240 // the sample lookup in case of recursive functions. | 238 // the sample lookup in case of recursive functions. |
| 241 for (int i = 0; i < sample_count; i++) { | 239 for (int i = 0; i < sample_count; i++) { |
| 242 AddSample(samples[i], kSamplerFrameWeight[i]); | 240 AddSample(samples[i], kSamplerFrameWeight[i]); |
| 243 } | 241 } |
| 244 } | 242 } |
| 245 | 243 |
| 246 | 244 |
| 247 void RuntimeProfiler::NotifyTick() { | 245 void RuntimeProfiler::NotifyTick() { |
| 248 #ifdef ENABLE_LOGGING_AND_PROFILING | |
| 249 isolate_->stack_guard()->RequestRuntimeProfilerTick(); | 246 isolate_->stack_guard()->RequestRuntimeProfilerTick(); |
| 250 #endif | |
| 251 } | 247 } |
| 252 | 248 |
| 253 | 249 |
| 254 void RuntimeProfiler::Setup() { | 250 void RuntimeProfiler::Setup() { |
| 255 ASSERT(has_been_globally_setup_); | 251 ASSERT(has_been_globally_setup_); |
| 256 ClearSampleBuffer(); | 252 ClearSampleBuffer(); |
| 257 // If the ticker hasn't already started, make sure to do so to get | 253 // If the ticker hasn't already started, make sure to do so to get |
| 258 // the ticks for the runtime profiler. | 254 // the ticks for the runtime profiler. |
| 259 if (IsEnabled()) isolate_->logger()->EnsureTickerStarted(); | 255 if (IsEnabled()) isolate_->logger()->EnsureTickerStarted(); |
| 260 } | 256 } |
| (...skipping 27 matching lines...) Expand all Loading... |
| 288 sampler_window_[i] = map_word.ToForwardingAddress(); | 284 sampler_window_[i] = map_word.ToForwardingAddress(); |
| 289 } else { | 285 } else { |
| 290 sampler_window_[i] = NULL; | 286 sampler_window_[i] = NULL; |
| 291 } | 287 } |
| 292 } | 288 } |
| 293 } | 289 } |
| 294 } | 290 } |
| 295 | 291 |
| 296 | 292 |
| 297 void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { | 293 void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { |
| 298 #ifdef ENABLE_LOGGING_AND_PROFILING | |
| 299 // The profiler thread must still be waiting. | 294 // The profiler thread must still be waiting. |
| 300 ASSERT(NoBarrier_Load(&state_) >= 0); | 295 ASSERT(NoBarrier_Load(&state_) >= 0); |
| 301 // In IsolateEnteredJS we have already incremented the counter and | 296 // In IsolateEnteredJS we have already incremented the counter and |
| 302 // undid the decrement done by the profiler thread. Increment again | 297 // undid the decrement done by the profiler thread. Increment again |
| 303 // to get the right count of active isolates. | 298 // to get the right count of active isolates. |
| 304 NoBarrier_AtomicIncrement(&state_, 1); | 299 NoBarrier_AtomicIncrement(&state_, 1); |
| 305 semaphore_->Signal(); | 300 semaphore_->Signal(); |
| 306 #endif | |
| 307 } | 301 } |
| 308 | 302 |
| 309 | 303 |
| 310 bool RuntimeProfiler::IsSomeIsolateInJS() { | 304 bool RuntimeProfiler::IsSomeIsolateInJS() { |
| 311 return NoBarrier_Load(&state_) > 0; | 305 return NoBarrier_Load(&state_) > 0; |
| 312 } | 306 } |
| 313 | 307 |
| 314 | 308 |
| 315 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { | 309 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { |
| 316 #ifdef ENABLE_LOGGING_AND_PROFILING | |
| 317 Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); | 310 Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); |
| 318 ASSERT(old_state >= -1); | 311 ASSERT(old_state >= -1); |
| 319 if (old_state != 0) return false; | 312 if (old_state != 0) return false; |
| 320 semaphore_->Wait(); | 313 semaphore_->Wait(); |
| 321 #endif | |
| 322 return true; | 314 return true; |
| 323 } | 315 } |
| 324 | 316 |
| 325 | 317 |
| 326 void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { | 318 void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { |
| 327 #ifdef ENABLE_LOGGING_AND_PROFILING | |
| 328 // Do a fake increment. If the profiler is waiting on the semaphore, | 319 // Do a fake increment. If the profiler is waiting on the semaphore, |
| 329 // the returned state is 0, which can be left as an initial state in | 320 // the returned state is 0, which can be left as an initial state in |
| 330 // case profiling is restarted later. If the profiler is not | 321 // case profiling is restarted later. If the profiler is not |
| 331 // waiting, the increment will prevent it from waiting, but has to | 322 // waiting, the increment will prevent it from waiting, but has to |
| 332 // be undone after the profiler is stopped. | 323 // be undone after the profiler is stopped. |
| 333 Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1); | 324 Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1); |
| 334 ASSERT(new_state >= 0); | 325 ASSERT(new_state >= 0); |
| 335 if (new_state == 0) { | 326 if (new_state == 0) { |
| 336 // The profiler thread is waiting. Wake it up. It must check for | 327 // The profiler thread is waiting. Wake it up. It must check for |
| 337 // stop conditions before attempting to wait again. | 328 // stop conditions before attempting to wait again. |
| 338 semaphore_->Signal(); | 329 semaphore_->Signal(); |
| 339 } | 330 } |
| 340 thread->Join(); | 331 thread->Join(); |
| 341 // The profiler thread is now stopped. Undo the increment in case it | 332 // The profiler thread is now stopped. Undo the increment in case it |
| 342 // was not waiting. | 333 // was not waiting. |
| 343 if (new_state != 0) { | 334 if (new_state != 0) { |
| 344 NoBarrier_AtomicIncrement(&state_, -1); | 335 NoBarrier_AtomicIncrement(&state_, -1); |
| 345 } | 336 } |
| 346 #endif | |
| 347 } | 337 } |
| 348 | 338 |
| 349 | 339 |
| 350 void RuntimeProfiler::RemoveDeadSamples() { | 340 void RuntimeProfiler::RemoveDeadSamples() { |
| 351 for (int i = 0; i < kSamplerWindowSize; i++) { | 341 for (int i = 0; i < kSamplerWindowSize; i++) { |
| 352 Object* function = sampler_window_[i]; | 342 Object* function = sampler_window_[i]; |
| 353 if (function != NULL && !HeapObject::cast(function)->IsMarked()) { | 343 if (function != NULL && !HeapObject::cast(function)->IsMarked()) { |
| 354 sampler_window_[i] = NULL; | 344 sampler_window_[i] = NULL; |
| 355 } | 345 } |
| 356 } | 346 } |
| 357 } | 347 } |
| 358 | 348 |
| 359 | 349 |
| 360 void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) { | 350 void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) { |
| 361 for (int i = 0; i < kSamplerWindowSize; i++) { | 351 for (int i = 0; i < kSamplerWindowSize; i++) { |
| 362 visitor->VisitPointer(&sampler_window_[i]); | 352 visitor->VisitPointer(&sampler_window_[i]); |
| 363 } | 353 } |
| 364 } | 354 } |
| 365 | 355 |
| 366 | 356 |
| 367 bool RuntimeProfilerRateLimiter::SuspendIfNecessary() { | 357 bool RuntimeProfilerRateLimiter::SuspendIfNecessary() { |
| 368 #ifdef ENABLE_LOGGING_AND_PROFILING | |
| 369 if (!RuntimeProfiler::IsSomeIsolateInJS()) { | 358 if (!RuntimeProfiler::IsSomeIsolateInJS()) { |
| 370 return RuntimeProfiler::WaitForSomeIsolateToEnterJS(); | 359 return RuntimeProfiler::WaitForSomeIsolateToEnterJS(); |
| 371 } | 360 } |
| 372 #endif | |
| 373 return false; | 361 return false; |
| 374 } | 362 } |
| 375 | 363 |
| 376 | 364 |
| 377 } } // namespace v8::internal | 365 } } // namespace v8::internal |
| OLD | NEW |