Index: src/runtime-profiler.cc |
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc |
index be29d5d19ec36a19e5f584af86936b8faa6835ca..752d79c982a781655d1984e773b0245424e19085 100644 |
--- a/src/runtime-profiler.cc |
+++ b/src/runtime-profiler.cc |
@@ -86,17 +86,6 @@ static const int kMaxSizeEarlyOpt = |
5 * FullCodeGenerator::kBackEdgeDistanceUnit; |
-Atomic32 RuntimeProfiler::state_ = 0; |
- |
-// TODO(isolates): Clean up the semaphore when it is no longer required. |
-static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER; |
- |
-#ifdef DEBUG |
-bool RuntimeProfiler::has_been_globally_set_up_ = false; |
-#endif |
-bool RuntimeProfiler::enabled_ = false; |
- |
- |
RuntimeProfiler::RuntimeProfiler(Isolate* isolate) |
: isolate_(isolate), |
sampler_threshold_(kSamplerThresholdInit), |
@@ -110,15 +99,6 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate) |
} |
-void RuntimeProfiler::GlobalSetUp() { |
- ASSERT(!has_been_globally_set_up_); |
- enabled_ = V8::UseCrankshaft() && FLAG_opt; |
-#ifdef DEBUG |
- has_been_globally_set_up_ = true; |
-#endif |
-} |
- |
- |
static void GetICCounts(JSFunction* function, |
int* ic_with_type_info_count, |
int* ic_total_count, |
@@ -386,7 +366,6 @@ void RuntimeProfiler::OptimizeNow() { |
void RuntimeProfiler::SetUp() { |
- ASSERT(has_been_globally_set_up_); |
if (!FLAG_watch_ic_patching) { |
ClearSampleBuffer(); |
} |
@@ -429,48 +408,6 @@ void RuntimeProfiler::UpdateSamplesAfterScavenge() { |
} |
-void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { |
- // The profiler thread must still be waiting. |
- ASSERT(NoBarrier_Load(&state_) >= 0); |
- // In IsolateEnteredJS we have already incremented the counter and |
- // undid the decrement done by the profiler thread. Increment again |
- // to get the right count of active isolates. |
- NoBarrier_AtomicIncrement(&state_, 1); |
- semaphore.Pointer()->Signal(); |
-} |
- |
- |
-bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { |
- Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); |
- ASSERT(old_state >= -1); |
- if (old_state != 0) return false; |
- semaphore.Pointer()->Wait(); |
- return true; |
-} |
- |
- |
-void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { |
- // Do a fake increment. If the profiler is waiting on the semaphore, |
- // the returned state is 0, which can be left as an initial state in |
- // case profiling is restarted later. If the profiler is not |
- // waiting, the increment will prevent it from waiting, but has to |
- // be undone after the profiler is stopped. |
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1); |
- ASSERT(new_state >= 0); |
- if (new_state == 0) { |
- // The profiler thread is waiting. Wake it up. It must check for |
- // stop conditions before attempting to wait again. |
- semaphore.Pointer()->Signal(); |
- } |
- thread->Join(); |
- // The profiler thread is now stopped. Undo the increment in case it |
- // was not waiting. |
- if (new_state != 0) { |
- NoBarrier_AtomicIncrement(&state_, -1); |
- } |
-} |
- |
- |
void RuntimeProfiler::RemoveDeadSamples() { |
for (int i = 0; i < kSamplerWindowSize; i++) { |
Object* function = sampler_window_[i]; |