OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/optimizing-compiler-thread.h" | 5 #include "src/optimizing-compiler-thread.h" |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/full-codegen.h" | 10 #include "src/full-codegen.h" |
11 #include "src/hydrogen.h" | 11 #include "src/hydrogen.h" |
12 #include "src/isolate.h" | 12 #include "src/isolate.h" |
13 #include "src/v8threads.h" | 13 #include "src/v8threads.h" |
14 | 14 |
15 namespace v8 { | 15 namespace v8 { |
16 namespace internal { | 16 namespace internal { |
17 | 17 |
18 class OptimizingCompilerThread::CompileTask : public v8::Task { | |
19 public: | |
20 CompileTask(Isolate* isolate, OptimizedCompileJob* job) | |
21 : isolate_(isolate), job_(job) {} | |
22 | |
23 virtual ~CompileTask() {} | |
24 | |
25 private: | |
26 // v8::Task overrides. | |
27 virtual void Run() OVERRIDE { | |
28 Isolate::SetIsolateThreadLocals(isolate_, NULL); | |
Yang
2014/10/07 07:35:37
I guess we are assuming that the thread this tasks
jochen (gone - plz use gerrit)
2014/10/07 07:58:32
we can't know which thread this will run on.
Idea
| |
29 DisallowHeapAllocation no_allocation; | |
30 DisallowHandleAllocation no_handles; | |
31 DisallowHandleDereference no_deref; | |
32 | |
33 // The function may have already been optimized by OSR. Simply continue. | |
34 OptimizedCompileJob::Status status = job_->OptimizeGraph(); | |
35 USE(status); // Prevent an unused-variable error in release mode. | |
36 DCHECK(status != OptimizedCompileJob::FAILED); | |
37 | |
38 // The function may have already been optimized by OSR. Simply continue. | |
39 // Use a mutex to make sure that functions marked for install | |
40 // are always also queued. | |
41 { | |
42 base::LockGuard<base::Mutex> lock_guard( | |
43 &isolate_->optimizing_compiler_thread()->output_queue_mutex_); | |
44 isolate_->optimizing_compiler_thread()->output_queue_.Enqueue(job_); | |
45 } | |
46 isolate_->stack_guard()->RequestInstallCode(); | |
47 { | |
48 base::LockGuard<base::Mutex> lock_guard( | |
49 &isolate_->optimizing_compiler_thread()->input_queue_mutex_); | |
50 isolate_->optimizing_compiler_thread()->input_queue_length_--; | |
51 } | |
52 isolate_->optimizing_compiler_thread()->input_queue_semaphore_.Signal(); | |
53 } | |
54 | |
55 Isolate* isolate_; | |
56 OptimizedCompileJob* job_; | |
57 | |
58 DISALLOW_COPY_AND_ASSIGN(CompileTask); | |
59 }; | |
60 | |
61 | |
18 OptimizingCompilerThread::~OptimizingCompilerThread() { | 62 OptimizingCompilerThread::~OptimizingCompilerThread() { |
19 DCHECK_EQ(0, input_queue_length_); | 63 DCHECK_EQ(0, input_queue_length_); |
20 DeleteArray(input_queue_); | 64 DeleteArray(input_queue_); |
21 if (FLAG_concurrent_osr) { | 65 if (FLAG_concurrent_osr) { |
22 #ifdef DEBUG | 66 #ifdef DEBUG |
23 for (int i = 0; i < osr_buffer_capacity_; i++) { | 67 for (int i = 0; i < osr_buffer_capacity_; i++) { |
24 CHECK_EQ(NULL, osr_buffer_[i]); | 68 CHECK_EQ(NULL, osr_buffer_[i]); |
25 } | 69 } |
26 #endif | 70 #endif |
27 DeleteArray(osr_buffer_); | 71 DeleteArray(osr_buffer_); |
28 } | 72 } |
29 } | 73 } |
30 | 74 |
31 | 75 |
32 void OptimizingCompilerThread::Run() { | 76 void OptimizingCompilerThread::Run() { |
33 #ifdef DEBUG | 77 #ifdef DEBUG |
34 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | 78 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); |
35 thread_id_ = ThreadId::Current().ToInteger(); | 79 thread_id_ = ThreadId::Current().ToInteger(); |
36 } | 80 } |
37 #endif | 81 #endif |
38 Isolate::SetIsolateThreadLocals(isolate_, NULL); | 82 Isolate::SetIsolateThreadLocals(isolate_, NULL); |
39 DisallowHeapAllocation no_allocation; | 83 DisallowHeapAllocation no_allocation; |
40 DisallowHandleAllocation no_handles; | 84 DisallowHandleAllocation no_handles; |
41 DisallowHandleDereference no_deref; | 85 DisallowHandleDereference no_deref; |
42 | 86 |
87 if (FLAG_job_based_recompilation) { | |
88 return; | |
89 } | |
90 | |
43 base::ElapsedTimer total_timer; | 91 base::ElapsedTimer total_timer; |
44 if (FLAG_trace_concurrent_recompilation) total_timer.Start(); | 92 if (FLAG_trace_concurrent_recompilation) total_timer.Start(); |
45 | 93 |
46 while (true) { | 94 while (true) { |
47 input_queue_semaphore_.Wait(); | 95 input_queue_semaphore_.Wait(); |
48 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); | 96 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); |
49 | 97 |
50 if (FLAG_concurrent_recompilation_delay != 0) { | 98 if (FLAG_concurrent_recompilation_delay != 0) { |
51 base::OS::Sleep(FLAG_concurrent_recompilation_delay); | 99 base::OS::Sleep(FLAG_concurrent_recompilation_delay); |
52 } | 100 } |
(...skipping 26 matching lines...) Expand all Loading... | |
79 | 127 |
80 if (FLAG_trace_concurrent_recompilation) { | 128 if (FLAG_trace_concurrent_recompilation) { |
81 time_spent_compiling_ += compiling_timer.Elapsed(); | 129 time_spent_compiling_ += compiling_timer.Elapsed(); |
82 } | 130 } |
83 } | 131 } |
84 } | 132 } |
85 | 133 |
86 | 134 |
87 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { | 135 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { |
88 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); | 136 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); |
137 DCHECK(!FLAG_job_based_recompilation); | |
89 if (input_queue_length_ == 0) return NULL; | 138 if (input_queue_length_ == 0) return NULL; |
90 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; | 139 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
91 DCHECK_NE(NULL, job); | 140 DCHECK_NE(NULL, job); |
92 input_queue_shift_ = InputQueueIndex(1); | 141 input_queue_shift_ = InputQueueIndex(1); |
93 input_queue_length_--; | 142 input_queue_length_--; |
94 return job; | 143 return job; |
95 } | 144 } |
96 | 145 |
97 | 146 |
98 void OptimizingCompilerThread::CompileNext() { | 147 void OptimizingCompilerThread::CompileNext() { |
(...skipping 28 matching lines...) Expand all Loading... | |
127 } else { | 176 } else { |
128 Handle<JSFunction> function = info->closure(); | 177 Handle<JSFunction> function = info->closure(); |
129 function->ReplaceCode(function->shared()->code()); | 178 function->ReplaceCode(function->shared()->code()); |
130 } | 179 } |
131 } | 180 } |
132 delete info; | 181 delete info; |
133 } | 182 } |
134 | 183 |
135 | 184 |
136 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 185 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
186 DCHECK(!FLAG_job_based_recompilation); | |
137 OptimizedCompileJob* job; | 187 OptimizedCompileJob* job; |
138 while ((job = NextInput())) { | 188 while ((job = NextInput())) { |
139 // This should not block, since we have one signal on the input queue | 189 // This should not block, since we have one signal on the input queue |
140 // semaphore corresponding to each element in the input queue. | 190 // semaphore corresponding to each element in the input queue. |
141 input_queue_semaphore_.Wait(); | 191 input_queue_semaphore_.Wait(); |
142 // OSR jobs are dealt with separately. | 192 // OSR jobs are dealt with separately. |
143 if (!job->info()->is_osr()) { | 193 if (!job->info()->is_osr()) { |
144 DisposeOptimizedCompileJob(job, restore_function_code); | 194 DisposeOptimizedCompileJob(job, restore_function_code); |
145 } | 195 } |
146 } | 196 } |
(...skipping 18 matching lines...) Expand all Loading... | |
165 osr_buffer_[i] = NULL; | 215 osr_buffer_[i] = NULL; |
166 } | 216 } |
167 } | 217 } |
168 } | 218 } |
169 | 219 |
170 | 220 |
171 void OptimizingCompilerThread::Flush() { | 221 void OptimizingCompilerThread::Flush() { |
172 DCHECK(!IsOptimizerThread()); | 222 DCHECK(!IsOptimizerThread()); |
173 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | 223 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); |
174 if (FLAG_block_concurrent_recompilation) Unblock(); | 224 if (FLAG_block_concurrent_recompilation) Unblock(); |
175 input_queue_semaphore_.Signal(); | 225 if (!FLAG_job_based_recompilation) { |
176 stop_semaphore_.Wait(); | 226 input_queue_semaphore_.Signal(); |
227 stop_semaphore_.Wait(); | |
228 } | |
177 FlushOutputQueue(true); | 229 FlushOutputQueue(true); |
178 if (FLAG_concurrent_osr) FlushOsrBuffer(true); | 230 if (FLAG_concurrent_osr) FlushOsrBuffer(true); |
179 if (FLAG_trace_concurrent_recompilation) { | 231 if (FLAG_trace_concurrent_recompilation) { |
180 PrintF(" ** Flushed concurrent recompilation queues.\n"); | 232 PrintF(" ** Flushed concurrent recompilation queues.\n"); |
181 } | 233 } |
182 } | 234 } |
183 | 235 |
184 | 236 |
185 void OptimizingCompilerThread::Stop() { | 237 void OptimizingCompilerThread::Stop() { |
186 DCHECK(!IsOptimizerThread()); | 238 DCHECK(!IsOptimizerThread()); |
187 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); | 239 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); |
188 if (FLAG_block_concurrent_recompilation) Unblock(); | 240 if (FLAG_block_concurrent_recompilation) Unblock(); |
189 input_queue_semaphore_.Signal(); | 241 if (!FLAG_job_based_recompilation) { |
190 stop_semaphore_.Wait(); | 242 input_queue_semaphore_.Signal(); |
243 stop_semaphore_.Wait(); | |
244 } | |
191 | 245 |
192 if (FLAG_concurrent_recompilation_delay != 0) { | 246 if (FLAG_job_based_recompilation) { |
247 while (true) { | |
248 { | |
249 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | |
250 if (!input_queue_length_) break; | |
251 } | |
252 input_queue_semaphore_.Wait(); | |
253 } | |
254 } else if (FLAG_concurrent_recompilation_delay != 0) { | |
193 // At this point the optimizing compiler thread's event loop has stopped. | 255 // At this point the optimizing compiler thread's event loop has stopped. |
194 // There is no need for a mutex when reading input_queue_length_. | 256 // There is no need for a mutex when reading input_queue_length_. |
195 while (input_queue_length_ > 0) CompileNext(); | 257 while (input_queue_length_ > 0) CompileNext(); |
196 InstallOptimizedFunctions(); | 258 InstallOptimizedFunctions(); |
197 } else { | 259 } else { |
198 FlushInputQueue(false); | 260 FlushInputQueue(false); |
199 FlushOutputQueue(false); | 261 FlushOutputQueue(false); |
200 } | 262 } |
201 | 263 |
202 if (FLAG_concurrent_osr) FlushOsrBuffer(false); | 264 if (FLAG_concurrent_osr) FlushOsrBuffer(false); |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
267 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); | 329 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); |
268 input_queue_[InputQueueIndex(0)] = job; | 330 input_queue_[InputQueueIndex(0)] = job; |
269 input_queue_length_++; | 331 input_queue_length_++; |
270 } else { | 332 } else { |
271 // Add job to the back of the input queue. | 333 // Add job to the back of the input queue. |
272 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | 334 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); |
273 DCHECK_LT(input_queue_length_, input_queue_capacity_); | 335 DCHECK_LT(input_queue_length_, input_queue_capacity_); |
274 input_queue_[InputQueueIndex(input_queue_length_)] = job; | 336 input_queue_[InputQueueIndex(input_queue_length_)] = job; |
275 input_queue_length_++; | 337 input_queue_length_++; |
276 } | 338 } |
277 if (FLAG_block_concurrent_recompilation) { | 339 if (FLAG_job_based_recompilation) { |
340 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
341 new CompileTask(isolate_, job), v8::Platform::kShortRunningTask); | |
342 } else if (FLAG_block_concurrent_recompilation) { | |
278 blocked_jobs_++; | 343 blocked_jobs_++; |
279 } else { | 344 } else { |
280 input_queue_semaphore_.Signal(); | 345 input_queue_semaphore_.Signal(); |
281 } | 346 } |
282 } | 347 } |
283 | 348 |
284 | 349 |
285 void OptimizingCompilerThread::Unblock() { | 350 void OptimizingCompilerThread::Unblock() { |
286 DCHECK(!IsOptimizerThread()); | 351 DCHECK(!IsOptimizerThread()); |
352 if (FLAG_job_based_recompilation) { | |
353 return; | |
354 } | |
287 while (blocked_jobs_ > 0) { | 355 while (blocked_jobs_ > 0) { |
288 input_queue_semaphore_.Signal(); | 356 input_queue_semaphore_.Signal(); |
289 blocked_jobs_--; | 357 blocked_jobs_--; |
290 } | 358 } |
291 } | 359 } |
292 | 360 |
293 | 361 |
294 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( | 362 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( |
295 Handle<JSFunction> function, BailoutId osr_ast_id) { | 363 Handle<JSFunction> function, BailoutId osr_ast_id) { |
296 DCHECK(!IsOptimizerThread()); | 364 DCHECK(!IsOptimizerThread()); |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
368 | 436 |
369 | 437 |
370 bool OptimizingCompilerThread::IsOptimizerThread() { | 438 bool OptimizingCompilerThread::IsOptimizerThread() { |
371 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | 439 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); |
372 return ThreadId::Current().ToInteger() == thread_id_; | 440 return ThreadId::Current().ToInteger() == thread_id_; |
373 } | 441 } |
374 #endif | 442 #endif |
375 | 443 |
376 | 444 |
377 } } // namespace v8::internal | 445 } } // namespace v8::internal |
OLD | NEW |