OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/optimizing-compiler-thread.h" | 5 #include "src/optimizing-compiler-thread.h" |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/full-codegen.h" | 10 #include "src/full-codegen.h" |
11 #include "src/hydrogen.h" | 11 #include "src/hydrogen.h" |
12 #include "src/isolate.h" | 12 #include "src/isolate.h" |
13 #include "src/v8threads.h" | 13 #include "src/v8threads.h" |
14 | 14 |
15 namespace v8 { | 15 namespace v8 { |
16 namespace internal { | 16 namespace internal { |
17 | 17 |
18 class OptimizingCompilerThread::CompileTask : public v8::Task { | |
19 public: | |
20 CompileTask(Isolate* isolate, OptimizedCompileJob* job) | |
21 : isolate_(isolate), job_(job) {} | |
22 | |
23 virtual ~CompileTask() {} | |
24 | |
25 private: | |
26 // v8::Task overrides. | |
27 virtual void Run() OVERRIDE { | |
28 // The function may have already been optimized by OSR. Simply continue. | |
29 OptimizedCompileJob::Status status = job_->OptimizeGraph(); | |
30 USE(status); // Prevent an unused-variable error in release mode. | |
31 DCHECK(status != OptimizedCompileJob::FAILED); | |
32 | |
33 // The function may have already been optimized by OSR. Simply continue. | |
34 // Use a mutex to make sure that functions marked for install | |
35 // are always also queued. | |
36 { | |
37 base::LockGuard<base::Mutex> lock_guard( | |
38 &isolate_->optimizing_compiler_thread()->output_queue_mutex_); | |
39 isolate_->optimizing_compiler_thread()->output_queue_.Enqueue(job_); | |
40 } | |
41 isolate_->stack_guard()->RequestInstallCode(); | |
42 { | |
43 base::LockGuard<base::Mutex> lock_guard( | |
44 &isolate_->optimizing_compiler_thread()->input_queue_mutex_); | |
45 isolate_->optimizing_compiler_thread()->input_queue_length_--; | |
46 } | |
47 isolate_->optimizing_compiler_thread()->input_queue_semaphore_.Signal(); | |
48 } | |
49 | |
50 Isolate* isolate_; | |
51 OptimizedCompileJob* job_; | |
52 | |
53 DISALLOW_COPY_AND_ASSIGN(CompileTask); | |
54 }; | |
55 | |
56 | |
18 OptimizingCompilerThread::~OptimizingCompilerThread() { | 57 OptimizingCompilerThread::~OptimizingCompilerThread() { |
19 DCHECK_EQ(0, input_queue_length_); | 58 DCHECK_EQ(0, input_queue_length_); |
20 DeleteArray(input_queue_); | 59 DeleteArray(input_queue_); |
21 if (FLAG_concurrent_osr) { | 60 if (FLAG_concurrent_osr) { |
22 #ifdef DEBUG | 61 #ifdef DEBUG |
23 for (int i = 0; i < osr_buffer_capacity_; i++) { | 62 for (int i = 0; i < osr_buffer_capacity_; i++) { |
24 CHECK_EQ(NULL, osr_buffer_[i]); | 63 CHECK_EQ(NULL, osr_buffer_[i]); |
25 } | 64 } |
26 #endif | 65 #endif |
27 DeleteArray(osr_buffer_); | 66 DeleteArray(osr_buffer_); |
28 } | 67 } |
29 } | 68 } |
30 | 69 |
31 | 70 |
32 void OptimizingCompilerThread::Run() { | 71 void OptimizingCompilerThread::Run() { |
33 #ifdef DEBUG | 72 #ifdef DEBUG |
34 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | 73 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); |
35 thread_id_ = ThreadId::Current().ToInteger(); | 74 thread_id_ = ThreadId::Current().ToInteger(); |
36 } | 75 } |
37 #endif | 76 #endif |
38 Isolate::SetIsolateThreadLocals(isolate_, NULL); | 77 Isolate::SetIsolateThreadLocals(isolate_, NULL); |
39 DisallowHeapAllocation no_allocation; | 78 DisallowHeapAllocation no_allocation; |
40 DisallowHandleAllocation no_handles; | 79 DisallowHandleAllocation no_handles; |
41 DisallowHandleDereference no_deref; | 80 DisallowHandleDereference no_deref; |
Yang
2014/10/07 07:00:40
Please copy those Disallow* scopes over to Compile
| |
42 | 81 |
82 if (FLAG_job_based_recompilation) { | |
83 return; | |
84 } | |
85 | |
43 base::ElapsedTimer total_timer; | 86 base::ElapsedTimer total_timer; |
44 if (FLAG_trace_concurrent_recompilation) total_timer.Start(); | 87 if (FLAG_trace_concurrent_recompilation) total_timer.Start(); |
45 | 88 |
46 while (true) { | 89 while (true) { |
47 input_queue_semaphore_.Wait(); | 90 input_queue_semaphore_.Wait(); |
48 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); | 91 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); |
49 | 92 |
50 if (FLAG_concurrent_recompilation_delay != 0) { | 93 if (FLAG_concurrent_recompilation_delay != 0) { |
51 base::OS::Sleep(FLAG_concurrent_recompilation_delay); | 94 base::OS::Sleep(FLAG_concurrent_recompilation_delay); |
52 } | 95 } |
(...skipping 26 matching lines...) Expand all Loading... | |
79 | 122 |
80 if (FLAG_trace_concurrent_recompilation) { | 123 if (FLAG_trace_concurrent_recompilation) { |
81 time_spent_compiling_ += compiling_timer.Elapsed(); | 124 time_spent_compiling_ += compiling_timer.Elapsed(); |
82 } | 125 } |
83 } | 126 } |
84 } | 127 } |
85 | 128 |
86 | 129 |
87 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { | 130 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { |
88 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); | 131 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); |
132 DCHECK(!FLAG_job_based_recompilation); | |
89 if (input_queue_length_ == 0) return NULL; | 133 if (input_queue_length_ == 0) return NULL; |
90 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; | 134 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
91 DCHECK_NE(NULL, job); | 135 DCHECK_NE(NULL, job); |
92 input_queue_shift_ = InputQueueIndex(1); | 136 input_queue_shift_ = InputQueueIndex(1); |
93 input_queue_length_--; | 137 input_queue_length_--; |
94 return job; | 138 return job; |
95 } | 139 } |
96 | 140 |
97 | 141 |
98 void OptimizingCompilerThread::CompileNext() { | 142 void OptimizingCompilerThread::CompileNext() { |
(...skipping 28 matching lines...) Expand all Loading... | |
127 } else { | 171 } else { |
128 Handle<JSFunction> function = info->closure(); | 172 Handle<JSFunction> function = info->closure(); |
129 function->ReplaceCode(function->shared()->code()); | 173 function->ReplaceCode(function->shared()->code()); |
130 } | 174 } |
131 } | 175 } |
132 delete info; | 176 delete info; |
133 } | 177 } |
134 | 178 |
135 | 179 |
136 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 180 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
181 DCHECK(!FLAG_job_based_recompilation); | |
137 OptimizedCompileJob* job; | 182 OptimizedCompileJob* job; |
138 while ((job = NextInput())) { | 183 while ((job = NextInput())) { |
139 // This should not block, since we have one signal on the input queue | 184 // This should not block, since we have one signal on the input queue |
140 // semaphore corresponding to each element in the input queue. | 185 // semaphore corresponding to each element in the input queue. |
141 input_queue_semaphore_.Wait(); | 186 input_queue_semaphore_.Wait(); |
142 // OSR jobs are dealt with separately. | 187 // OSR jobs are dealt with separately. |
143 if (!job->info()->is_osr()) { | 188 if (!job->info()->is_osr()) { |
144 DisposeOptimizedCompileJob(job, restore_function_code); | 189 DisposeOptimizedCompileJob(job, restore_function_code); |
145 } | 190 } |
146 } | 191 } |
(...skipping 18 matching lines...) Expand all Loading... | |
165 osr_buffer_[i] = NULL; | 210 osr_buffer_[i] = NULL; |
166 } | 211 } |
167 } | 212 } |
168 } | 213 } |
169 | 214 |
170 | 215 |
171 void OptimizingCompilerThread::Flush() { | 216 void OptimizingCompilerThread::Flush() { |
172 DCHECK(!IsOptimizerThread()); | 217 DCHECK(!IsOptimizerThread()); |
173 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | 218 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); |
174 if (FLAG_block_concurrent_recompilation) Unblock(); | 219 if (FLAG_block_concurrent_recompilation) Unblock(); |
175 input_queue_semaphore_.Signal(); | 220 if (!FLAG_job_based_recompilation) { |
176 stop_semaphore_.Wait(); | 221 input_queue_semaphore_.Signal(); |
222 stop_semaphore_.Wait(); | |
223 } | |
177 FlushOutputQueue(true); | 224 FlushOutputQueue(true); |
178 if (FLAG_concurrent_osr) FlushOsrBuffer(true); | 225 if (FLAG_concurrent_osr) FlushOsrBuffer(true); |
179 if (FLAG_trace_concurrent_recompilation) { | 226 if (FLAG_trace_concurrent_recompilation) { |
180 PrintF(" ** Flushed concurrent recompilation queues.\n"); | 227 PrintF(" ** Flushed concurrent recompilation queues.\n"); |
181 } | 228 } |
182 } | 229 } |
183 | 230 |
184 | 231 |
185 void OptimizingCompilerThread::Stop() { | 232 void OptimizingCompilerThread::Stop() { |
186 DCHECK(!IsOptimizerThread()); | 233 DCHECK(!IsOptimizerThread()); |
187 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); | 234 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); |
188 if (FLAG_block_concurrent_recompilation) Unblock(); | 235 if (FLAG_block_concurrent_recompilation) Unblock(); |
189 input_queue_semaphore_.Signal(); | 236 if (!FLAG_job_based_recompilation) { |
190 stop_semaphore_.Wait(); | 237 input_queue_semaphore_.Signal(); |
238 stop_semaphore_.Wait(); | |
239 } | |
191 | 240 |
192 if (FLAG_concurrent_recompilation_delay != 0) { | 241 if (FLAG_job_based_recompilation) { |
242 while (true) { | |
243 { | |
244 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | |
245 if (!input_queue_length_) break; | |
246 } | |
247 input_queue_semaphore_.Wait(); | |
248 } | |
249 } else if (FLAG_concurrent_recompilation_delay != 0) { | |
193 // At this point the optimizing compiler thread's event loop has stopped. | 250 // At this point the optimizing compiler thread's event loop has stopped. |
194 // There is no need for a mutex when reading input_queue_length_. | 251 // There is no need for a mutex when reading input_queue_length_. |
195 while (input_queue_length_ > 0) CompileNext(); | 252 while (input_queue_length_ > 0) CompileNext(); |
196 InstallOptimizedFunctions(); | 253 InstallOptimizedFunctions(); |
197 } else { | 254 } else { |
198 FlushInputQueue(false); | 255 FlushInputQueue(false); |
199 FlushOutputQueue(false); | 256 FlushOutputQueue(false); |
200 } | 257 } |
201 | 258 |
202 if (FLAG_concurrent_osr) FlushOsrBuffer(false); | 259 if (FLAG_concurrent_osr) FlushOsrBuffer(false); |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
267 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); | 324 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); |
268 input_queue_[InputQueueIndex(0)] = job; | 325 input_queue_[InputQueueIndex(0)] = job; |
269 input_queue_length_++; | 326 input_queue_length_++; |
270 } else { | 327 } else { |
271 // Add job to the back of the input queue. | 328 // Add job to the back of the input queue. |
272 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | 329 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); |
273 DCHECK_LT(input_queue_length_, input_queue_capacity_); | 330 DCHECK_LT(input_queue_length_, input_queue_capacity_); |
274 input_queue_[InputQueueIndex(input_queue_length_)] = job; | 331 input_queue_[InputQueueIndex(input_queue_length_)] = job; |
275 input_queue_length_++; | 332 input_queue_length_++; |
276 } | 333 } |
277 if (FLAG_block_concurrent_recompilation) { | 334 if (FLAG_job_based_recompilation) { |
335 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
336 new CompileTask(isolate_, job), v8::Platform::kShortRunningTask); | |
337 } else if (FLAG_block_concurrent_recompilation) { | |
278 blocked_jobs_++; | 338 blocked_jobs_++; |
279 } else { | 339 } else { |
280 input_queue_semaphore_.Signal(); | 340 input_queue_semaphore_.Signal(); |
281 } | 341 } |
282 } | 342 } |
283 | 343 |
284 | 344 |
285 void OptimizingCompilerThread::Unblock() { | 345 void OptimizingCompilerThread::Unblock() { |
286 DCHECK(!IsOptimizerThread()); | 346 DCHECK(!IsOptimizerThread()); |
347 if (FLAG_job_based_recompilation) { | |
348 return; | |
349 } | |
287 while (blocked_jobs_ > 0) { | 350 while (blocked_jobs_ > 0) { |
288 input_queue_semaphore_.Signal(); | 351 input_queue_semaphore_.Signal(); |
289 blocked_jobs_--; | 352 blocked_jobs_--; |
290 } | 353 } |
291 } | 354 } |
292 | 355 |
293 | 356 |
294 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( | 357 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( |
295 Handle<JSFunction> function, BailoutId osr_ast_id) { | 358 Handle<JSFunction> function, BailoutId osr_ast_id) { |
296 DCHECK(!IsOptimizerThread()); | 359 DCHECK(!IsOptimizerThread()); |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
368 | 431 |
369 | 432 |
370 bool OptimizingCompilerThread::IsOptimizerThread() { | 433 bool OptimizingCompilerThread::IsOptimizerThread() { |
371 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | 434 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); |
372 return ThreadId::Current().ToInteger() == thread_id_; | 435 return ThreadId::Current().ToInteger() == thread_id_; |
373 } | 436 } |
374 #endif | 437 #endif |
375 | 438 |
376 | 439 |
377 } } // namespace v8::internal | 440 } } // namespace v8::internal |
OLD | NEW |