OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 21 matching lines...) Expand all Loading... |
32 #include "hydrogen.h" | 32 #include "hydrogen.h" |
33 #include "isolate.h" | 33 #include "isolate.h" |
34 #include "v8threads.h" | 34 #include "v8threads.h" |
35 | 35 |
36 namespace v8 { | 36 namespace v8 { |
37 namespace internal { | 37 namespace internal { |
38 | 38 |
39 | 39 |
40 void OptimizingCompilerThread::Run() { | 40 void OptimizingCompilerThread::Run() { |
41 #ifdef DEBUG | 41 #ifdef DEBUG |
42 { ScopedLock lock(thread_id_mutex_); | 42 { LockGuard<Mutex> lock_guard(&thread_id_mutex_); |
43 thread_id_ = ThreadId::Current().ToInteger(); | 43 thread_id_ = ThreadId::Current().ToInteger(); |
44 } | 44 } |
45 #endif | 45 #endif |
46 Isolate::SetIsolateThreadLocals(isolate_, NULL); | 46 Isolate::SetIsolateThreadLocals(isolate_, NULL); |
47 DisallowHeapAllocation no_allocation; | 47 DisallowHeapAllocation no_allocation; |
48 DisallowHandleAllocation no_handles; | 48 DisallowHandleAllocation no_handles; |
49 DisallowHandleDereference no_deref; | 49 DisallowHandleDereference no_deref; |
50 | 50 |
51 ElapsedTimer total_timer; | 51 ElapsedTimer total_timer; |
52 if (FLAG_trace_concurrent_recompilation) total_timer.Start(); | 52 if (FLAG_trace_concurrent_recompilation) total_timer.Start(); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); | 101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); |
102 | 102 |
103 // The function may have already been optimized by OSR. Simply continue. | 103 // The function may have already been optimized by OSR. Simply continue. |
104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); | 104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); |
105 USE(status); // Prevent an unused-variable error in release mode. | 105 USE(status); // Prevent an unused-variable error in release mode. |
106 ASSERT(status != OptimizingCompiler::FAILED); | 106 ASSERT(status != OptimizingCompiler::FAILED); |
107 | 107 |
108 // The function may have already been optimized by OSR. Simply continue. | 108 // The function may have already been optimized by OSR. Simply continue. |
109 // Use a mutex to make sure that functions marked for install | 109 // Use a mutex to make sure that functions marked for install |
110 // are always also queued. | 110 // are always also queued. |
111 ScopedLock mark_and_queue(install_mutex_); | 111 LockGuard<Mutex> mark_and_queue(&install_mutex_); |
112 { Heap::RelocationLock relocation_lock(isolate_->heap()); | 112 { Heap::RelocationLock relocation_lock(isolate_->heap()); |
113 AllowHandleDereference ahd; | 113 AllowHandleDereference ahd; |
114 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); | 114 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); |
115 } | 115 } |
116 output_queue_.Enqueue(optimizing_compiler); | 116 output_queue_.Enqueue(optimizing_compiler); |
117 } | 117 } |
118 | 118 |
119 | 119 |
120 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 120 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
121 OptimizingCompiler* optimizing_compiler; | 121 OptimizingCompiler* optimizing_compiler; |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
182 Join(); | 182 Join(); |
183 } | 183 } |
184 | 184 |
185 | 185 |
186 void OptimizingCompilerThread::InstallOptimizedFunctions() { | 186 void OptimizingCompilerThread::InstallOptimizedFunctions() { |
187 ASSERT(!IsOptimizerThread()); | 187 ASSERT(!IsOptimizerThread()); |
188 HandleScope handle_scope(isolate_); | 188 HandleScope handle_scope(isolate_); |
189 OptimizingCompiler* compiler; | 189 OptimizingCompiler* compiler; |
190 while (true) { | 190 while (true) { |
191 { // Memory barrier to ensure marked functions are queued. | 191 { // Memory barrier to ensure marked functions are queued. |
192 ScopedLock marked_and_queued(install_mutex_); | 192 LockGuard<Mutex> marked_and_queued(&install_mutex_); |
193 if (!output_queue_.Dequeue(&compiler)) return; | 193 if (!output_queue_.Dequeue(&compiler)) return; |
194 } | 194 } |
195 Compiler::InstallOptimizedCode(compiler); | 195 Compiler::InstallOptimizedCode(compiler); |
196 } | 196 } |
197 } | 197 } |
198 | 198 |
199 | 199 |
200 void OptimizingCompilerThread::QueueForOptimization( | 200 void OptimizingCompilerThread::QueueForOptimization( |
201 OptimizingCompiler* optimizing_compiler) { | 201 OptimizingCompiler* optimizing_compiler) { |
202 ASSERT(IsQueueAvailable()); | 202 ASSERT(IsQueueAvailable()); |
203 ASSERT(!IsOptimizerThread()); | 203 ASSERT(!IsOptimizerThread()); |
204 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); | 204 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); |
205 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); | 205 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); |
206 input_queue_.Enqueue(optimizing_compiler); | 206 input_queue_.Enqueue(optimizing_compiler); |
207 input_queue_semaphore_->Signal(); | 207 input_queue_semaphore_->Signal(); |
208 } | 208 } |
209 | 209 |
210 | 210 |
211 #ifdef DEBUG | 211 #ifdef DEBUG |
212 bool OptimizingCompilerThread::IsOptimizerThread() { | 212 bool OptimizingCompilerThread::IsOptimizerThread() { |
213 if (!FLAG_concurrent_recompilation) return false; | 213 if (!FLAG_concurrent_recompilation) return false; |
214 ScopedLock lock(thread_id_mutex_); | 214 LockGuard<Mutex> lock_guard(&thread_id_mutex_); |
215 return ThreadId::Current().ToInteger() == thread_id_; | 215 return ThreadId::Current().ToInteger() == thread_id_; |
216 } | 216 } |
217 #endif | 217 #endif |
218 | 218 |
219 | 219 |
220 } } // namespace v8::internal | 220 } } // namespace v8::internal |
OLD | NEW |