Index: src/optimizing-compiler-thread.cc |
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc |
index 39fc191baf585fb5d526bf93439a21f97d0f37f4..21ef237107194d06a897110444c01710eae31bd0 100644 |
--- a/src/optimizing-compiler-thread.cc |
+++ b/src/optimizing-compiler-thread.cc |
@@ -39,7 +39,7 @@ namespace internal { |
void OptimizingCompilerThread::Run() { |
#ifdef DEBUG |
- { ScopedLock lock(&thread_id_mutex_); |
+ { ScopedLock lock(thread_id_mutex_); |
thread_id_ = ThreadId::Current().ToInteger(); |
} |
#endif |
@@ -93,7 +93,7 @@ void OptimizingCompilerThread::CompileNext() { |
// The function may have already been optimized by OSR. Simply continue. |
// Use a mutex to make sure that functions marked for install |
// are always also queued. |
- ScopedLock mark_and_queue(&install_mutex_); |
+ ScopedLock mark_and_queue(install_mutex_); |
{ Heap::RelocationLock relocation_lock(isolate_->heap()); |
AllowHandleDereference ahd; |
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); |
@@ -141,7 +141,7 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() { |
OptimizingCompiler* compiler; |
while (true) { |
{ // Memory barrier to ensure marked functions are queued. |
- ScopedLock marked_and_queued(&install_mutex_); |
+ ScopedLock marked_and_queued(install_mutex_); |
if (!output_queue_.Dequeue(&compiler)) return; |
} |
Compiler::InstallOptimizedCode(compiler); |
@@ -163,7 +163,7 @@ void OptimizingCompilerThread::QueueForOptimization( |
#ifdef DEBUG |
bool OptimizingCompilerThread::IsOptimizerThread() { |
if (!FLAG_parallel_recompilation) return false; |
- ScopedLock lock(&thread_id_mutex_); |
+ ScopedLock lock(thread_id_mutex_); |
return ThreadId::Current().ToInteger() == thread_id_; |
} |
#endif |