OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
65 // Number of times a function has to be seen on the stack before it is | 65 // Number of times a function has to be seen on the stack before it is |
66 // optimized. | 66 // optimized. |
67 static const int kProfilerTicksBeforeOptimization = 2; | 67 static const int kProfilerTicksBeforeOptimization = 2; |
68 | 68 |
69 // Maximum size in bytes of generated code for a function to be optimized | 69 // Maximum size in bytes of generated code for a function to be optimized |
70 // the very first time it is seen on the stack. | 70 // the very first time it is seen on the stack. |
71 static const int kMaxSizeEarlyOpt = 500; | 71 static const int kMaxSizeEarlyOpt = 500; |
72 | 72 |
73 | 73 |
74 Atomic32 RuntimeProfiler::state_ = 0; | 74 Atomic32 RuntimeProfiler::state_ = 0; |
75 // TODO(isolates): Create the semaphore lazily and clean it up when no | 75 |
76 // longer required. | 76 // TODO(isolates): Clean up the semaphore when it is no longer required. |
77 Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0); | 77 static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER; |
78 | 78 |
79 #ifdef DEBUG | 79 #ifdef DEBUG |
80 bool RuntimeProfiler::has_been_globally_set_up_ = false; | 80 bool RuntimeProfiler::has_been_globally_set_up_ = false; |
81 #endif | 81 #endif |
82 bool RuntimeProfiler::enabled_ = false; | 82 bool RuntimeProfiler::enabled_ = false; |
83 | 83 |
84 | 84 |
85 RuntimeProfiler::RuntimeProfiler(Isolate* isolate) | 85 RuntimeProfiler::RuntimeProfiler(Isolate* isolate) |
86 : isolate_(isolate), | 86 : isolate_(isolate), |
87 sampler_threshold_(kSamplerThresholdInit), | 87 sampler_threshold_(kSamplerThresholdInit), |
(...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
398 } | 398 } |
399 | 399 |
400 | 400 |
401 void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { | 401 void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { |
402 // The profiler thread must still be waiting. | 402 // The profiler thread must still be waiting. |
403 ASSERT(NoBarrier_Load(&state_) >= 0); | 403 ASSERT(NoBarrier_Load(&state_) >= 0); |
404 // In IsolateEnteredJS we have already incremented the counter and | 404 // In IsolateEnteredJS we have already incremented the counter and |
405 // undid the decrement done by the profiler thread. Increment again | 405 // undid the decrement done by the profiler thread. Increment again |
406 // to get the right count of active isolates. | 406 // to get the right count of active isolates. |
407 NoBarrier_AtomicIncrement(&state_, 1); | 407 NoBarrier_AtomicIncrement(&state_, 1); |
408 semaphore_->Signal(); | 408 semaphore.Pointer()->Signal(); |
409 } | 409 } |
410 | 410 |
411 | 411 |
412 bool RuntimeProfiler::IsSomeIsolateInJS() { | 412 bool RuntimeProfiler::IsSomeIsolateInJS() { |
413 return NoBarrier_Load(&state_) > 0; | 413 return NoBarrier_Load(&state_) > 0; |
414 } | 414 } |
415 | 415 |
416 | 416 |
417 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { | 417 bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { |
418 Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); | 418 Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); |
419 ASSERT(old_state >= -1); | 419 ASSERT(old_state >= -1); |
420 if (old_state != 0) return false; | 420 if (old_state != 0) return false; |
421 semaphore_->Wait(); | 421 semaphore.Pointer()->Wait(); |
422 return true; | 422 return true; |
423 } | 423 } |
424 | 424 |
425 | 425 |
426 void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { | 426 void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { |
427 // Do a fake increment. If the profiler is waiting on the semaphore, | 427 // Do a fake increment. If the profiler is waiting on the semaphore, |
428 // the returned state is 0, which can be left as an initial state in | 428 // the returned state is 0, which can be left as an initial state in |
429 // case profiling is restarted later. If the profiler is not | 429 // case profiling is restarted later. If the profiler is not |
430 // waiting, the increment will prevent it from waiting, but has to | 430 // waiting, the increment will prevent it from waiting, but has to |
431 // be undone after the profiler is stopped. | 431 // be undone after the profiler is stopped. |
432 Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1); | 432 Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1); |
433 ASSERT(new_state >= 0); | 433 ASSERT(new_state >= 0); |
434 if (new_state == 0) { | 434 if (new_state == 0) { |
435 // The profiler thread is waiting. Wake it up. It must check for | 435 // The profiler thread is waiting. Wake it up. It must check for |
436 // stop conditions before attempting to wait again. | 436 // stop conditions before attempting to wait again. |
437 semaphore_->Signal(); | 437 semaphore.Pointer()->Signal(); |
438 } | 438 } |
439 thread->Join(); | 439 thread->Join(); |
440 // The profiler thread is now stopped. Undo the increment in case it | 440 // The profiler thread is now stopped. Undo the increment in case it |
441 // was not waiting. | 441 // was not waiting. |
442 if (new_state != 0) { | 442 if (new_state != 0) { |
443 NoBarrier_AtomicIncrement(&state_, -1); | 443 NoBarrier_AtomicIncrement(&state_, -1); |
444 } | 444 } |
445 } | 445 } |
446 | 446 |
447 | 447 |
(...skipping 17 matching lines...) Expand all Loading... |
465 | 465 |
466 bool RuntimeProfilerRateLimiter::SuspendIfNecessary() { | 466 bool RuntimeProfilerRateLimiter::SuspendIfNecessary() { |
467 if (!RuntimeProfiler::IsSomeIsolateInJS()) { | 467 if (!RuntimeProfiler::IsSomeIsolateInJS()) { |
468 return RuntimeProfiler::WaitForSomeIsolateToEnterJS(); | 468 return RuntimeProfiler::WaitForSomeIsolateToEnterJS(); |
469 } | 469 } |
470 return false; | 470 return false; |
471 } | 471 } |
472 | 472 |
473 | 473 |
474 } } // namespace v8::internal | 474 } } // namespace v8::internal |
OLD | NEW |