Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(725)

Side by Side Diff: src/optimizing-compiler-thread.cc

Issue 639353002: Store local copies of flags needed on the background thread (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: updates Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/optimizing-compiler-thread.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/optimizing-compiler-thread.h" 5 #include "src/optimizing-compiler-thread.h"
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #include "src/base/atomicops.h" 9 #include "src/base/atomicops.h"
10 #include "src/full-codegen.h" 10 #include "src/full-codegen.h"
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
75 void OptimizingCompilerThread::Run() { 75 void OptimizingCompilerThread::Run() {
76 #ifdef DEBUG 76 #ifdef DEBUG
77 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); 77 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
78 thread_id_ = ThreadId::Current().ToInteger(); 78 thread_id_ = ThreadId::Current().ToInteger();
79 } 79 }
80 #endif 80 #endif
81 DisallowHeapAllocation no_allocation; 81 DisallowHeapAllocation no_allocation;
82 DisallowHandleAllocation no_handles; 82 DisallowHandleAllocation no_handles;
83 DisallowHandleDereference no_deref; 83 DisallowHandleDereference no_deref;
84 84
85 if (FLAG_job_based_recompilation) { 85 if (job_based_recompilation_) {
86 return; 86 return;
87 } 87 }
88 88
89 base::ElapsedTimer total_timer; 89 base::ElapsedTimer total_timer;
90 if (FLAG_trace_concurrent_recompilation) total_timer.Start(); 90 if (tracing_enabled_) total_timer.Start();
91 91
92 while (true) { 92 while (true) {
93 input_queue_semaphore_.Wait(); 93 input_queue_semaphore_.Wait();
94 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); 94 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
95 95
96 if (FLAG_concurrent_recompilation_delay != 0) { 96 if (FLAG_concurrent_recompilation_delay != 0) {
97 base::OS::Sleep(FLAG_concurrent_recompilation_delay); 97 base::OS::Sleep(FLAG_concurrent_recompilation_delay);
98 } 98 }
99 99
100 switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) { 100 switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
101 case CONTINUE: 101 case CONTINUE:
102 break; 102 break;
103 case STOP: 103 case STOP:
104 if (FLAG_trace_concurrent_recompilation) { 104 if (tracing_enabled_) {
105 time_spent_total_ = total_timer.Elapsed(); 105 time_spent_total_ = total_timer.Elapsed();
106 } 106 }
107 stop_semaphore_.Signal(); 107 stop_semaphore_.Signal();
108 return; 108 return;
109 case FLUSH: 109 case FLUSH:
110 // The main thread is blocked, waiting for the stop semaphore. 110 // The main thread is blocked, waiting for the stop semaphore.
111 { AllowHandleDereference allow_handle_dereference; 111 { AllowHandleDereference allow_handle_dereference;
112 FlushInputQueue(true); 112 FlushInputQueue(true);
113 } 113 }
114 base::Release_Store(&stop_thread_, 114 base::Release_Store(&stop_thread_,
115 static_cast<base::AtomicWord>(CONTINUE)); 115 static_cast<base::AtomicWord>(CONTINUE));
116 stop_semaphore_.Signal(); 116 stop_semaphore_.Signal();
117 // Return to start of consumer loop. 117 // Return to start of consumer loop.
118 continue; 118 continue;
119 } 119 }
120 120
121 base::ElapsedTimer compiling_timer; 121 base::ElapsedTimer compiling_timer;
122 if (FLAG_trace_concurrent_recompilation) compiling_timer.Start(); 122 if (tracing_enabled_) compiling_timer.Start();
123 123
124 CompileNext(); 124 CompileNext();
125 125
126 if (FLAG_trace_concurrent_recompilation) { 126 if (tracing_enabled_) {
127 time_spent_compiling_ += compiling_timer.Elapsed(); 127 time_spent_compiling_ += compiling_timer.Elapsed();
128 } 128 }
129 } 129 }
130 } 130 }
131 131
132 132
133 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { 133 OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
134 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); 134 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
135 DCHECK(!FLAG_job_based_recompilation); 135 DCHECK(!job_based_recompilation_);
136 if (input_queue_length_ == 0) return NULL; 136 if (input_queue_length_ == 0) return NULL;
137 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; 137 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
138 DCHECK_NE(NULL, job); 138 DCHECK_NE(NULL, job);
139 input_queue_shift_ = InputQueueIndex(1); 139 input_queue_shift_ = InputQueueIndex(1);
140 input_queue_length_--; 140 input_queue_length_--;
141 return job; 141 return job;
142 } 142 }
143 143
144 144
145 void OptimizingCompilerThread::CompileNext() { 145 void OptimizingCompilerThread::CompileNext() {
(...skipping 28 matching lines...) Expand all
174 } else { 174 } else {
175 Handle<JSFunction> function = info->closure(); 175 Handle<JSFunction> function = info->closure();
176 function->ReplaceCode(function->shared()->code()); 176 function->ReplaceCode(function->shared()->code());
177 } 177 }
178 } 178 }
179 delete info; 179 delete info;
180 } 180 }
181 181
182 182
183 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { 183 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
184 DCHECK(!FLAG_job_based_recompilation); 184 DCHECK(!job_based_recompilation_);
185 OptimizedCompileJob* job; 185 OptimizedCompileJob* job;
186 while ((job = NextInput())) { 186 while ((job = NextInput())) {
187 // This should not block, since we have one signal on the input queue 187 // This should not block, since we have one signal on the input queue
188 // semaphore corresponding to each element in the input queue. 188 // semaphore corresponding to each element in the input queue.
189 input_queue_semaphore_.Wait(); 189 input_queue_semaphore_.Wait();
190 // OSR jobs are dealt with separately. 190 // OSR jobs are dealt with separately.
191 if (!job->info()->is_osr()) { 191 if (!job->info()->is_osr()) {
192 DisposeOptimizedCompileJob(job, restore_function_code); 192 DisposeOptimizedCompileJob(job, restore_function_code);
193 } 193 }
194 } 194 }
(...skipping 18 matching lines...) Expand all
213 osr_buffer_[i] = NULL; 213 osr_buffer_[i] = NULL;
214 } 214 }
215 } 215 }
216 } 216 }
217 217
218 218
219 void OptimizingCompilerThread::Flush() { 219 void OptimizingCompilerThread::Flush() {
220 DCHECK(!IsOptimizerThread()); 220 DCHECK(!IsOptimizerThread());
221 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); 221 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
222 if (FLAG_block_concurrent_recompilation) Unblock(); 222 if (FLAG_block_concurrent_recompilation) Unblock();
223 if (!FLAG_job_based_recompilation) { 223 if (!job_based_recompilation_) {
224 input_queue_semaphore_.Signal(); 224 input_queue_semaphore_.Signal();
225 stop_semaphore_.Wait(); 225 stop_semaphore_.Wait();
226 } 226 }
227 FlushOutputQueue(true); 227 FlushOutputQueue(true);
228 if (FLAG_concurrent_osr) FlushOsrBuffer(true); 228 if (FLAG_concurrent_osr) FlushOsrBuffer(true);
229 if (FLAG_trace_concurrent_recompilation) { 229 if (tracing_enabled_) {
230 PrintF(" ** Flushed concurrent recompilation queues.\n"); 230 PrintF(" ** Flushed concurrent recompilation queues.\n");
231 } 231 }
232 } 232 }
233 233
234 234
235 void OptimizingCompilerThread::Stop() { 235 void OptimizingCompilerThread::Stop() {
236 DCHECK(!IsOptimizerThread()); 236 DCHECK(!IsOptimizerThread());
237 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); 237 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
238 if (FLAG_block_concurrent_recompilation) Unblock(); 238 if (FLAG_block_concurrent_recompilation) Unblock();
239 if (!FLAG_job_based_recompilation) { 239 if (!job_based_recompilation_) {
240 input_queue_semaphore_.Signal(); 240 input_queue_semaphore_.Signal();
241 stop_semaphore_.Wait(); 241 stop_semaphore_.Wait();
242 } 242 }
243 243
244 if (FLAG_job_based_recompilation) { 244 if (job_based_recompilation_) {
245 while (true) { 245 while (true) {
246 { 246 {
247 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); 247 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
248 if (!input_queue_length_) break; 248 if (!input_queue_length_) break;
249 } 249 }
250 input_queue_semaphore_.Wait(); 250 input_queue_semaphore_.Wait();
251 } 251 }
252 } else if (FLAG_concurrent_recompilation_delay != 0) { 252 } else if (FLAG_concurrent_recompilation_delay != 0) {
253 // At this point the optimizing compiler thread's event loop has stopped. 253 // At this point the optimizing compiler thread's event loop has stopped.
254 // There is no need for a mutex when reading input_queue_length_. 254 // There is no need for a mutex when reading input_queue_length_.
255 while (input_queue_length_ > 0) CompileNext(); 255 while (input_queue_length_ > 0) CompileNext();
256 InstallOptimizedFunctions(); 256 InstallOptimizedFunctions();
257 } else { 257 } else {
258 FlushInputQueue(false); 258 FlushInputQueue(false);
259 FlushOutputQueue(false); 259 FlushOutputQueue(false);
260 } 260 }
261 261
262 if (FLAG_concurrent_osr) FlushOsrBuffer(false); 262 if (FLAG_concurrent_osr) FlushOsrBuffer(false);
263 263
264 if (FLAG_trace_concurrent_recompilation) { 264 if (tracing_enabled_) {
265 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); 265 double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
266 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); 266 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
267 } 267 }
268 268
269 if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) && 269 if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) {
270 FLAG_concurrent_osr) {
271 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); 270 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
272 } 271 }
273 272
274 Join(); 273 Join();
275 } 274 }
276 275
277 276
278 void OptimizingCompilerThread::InstallOptimizedFunctions() { 277 void OptimizingCompilerThread::InstallOptimizedFunctions() {
279 DCHECK(!IsOptimizerThread()); 278 DCHECK(!IsOptimizerThread());
280 HandleScope handle_scope(isolate_); 279 HandleScope handle_scope(isolate_);
281 280
282 OptimizedCompileJob* job; 281 OptimizedCompileJob* job;
283 while (output_queue_.Dequeue(&job)) { 282 while (output_queue_.Dequeue(&job)) {
284 CompilationInfo* info = job->info(); 283 CompilationInfo* info = job->info();
285 Handle<JSFunction> function(*info->closure()); 284 Handle<JSFunction> function(*info->closure());
286 if (info->is_osr()) { 285 if (info->is_osr()) {
287 if (FLAG_trace_osr) { 286 if (FLAG_trace_osr) {
288 PrintF("[COSR - "); 287 PrintF("[COSR - ");
289 function->ShortPrint(); 288 function->ShortPrint();
290 PrintF(" is ready for install and entry at AST id %d]\n", 289 PrintF(" is ready for install and entry at AST id %d]\n",
291 info->osr_ast_id().ToInt()); 290 info->osr_ast_id().ToInt());
292 } 291 }
293 job->WaitForInstall(); 292 job->WaitForInstall();
294 // Remove stack check that guards OSR entry on original code. 293 // Remove stack check that guards OSR entry on original code.
295 Handle<Code> code = info->unoptimized_code(); 294 Handle<Code> code = info->unoptimized_code();
296 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); 295 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
297 BackEdgeTable::RemoveStackCheck(code, offset); 296 BackEdgeTable::RemoveStackCheck(code, offset);
298 } else { 297 } else {
299 if (function->IsOptimized()) { 298 if (function->IsOptimized()) {
300 if (FLAG_trace_concurrent_recompilation) { 299 if (tracing_enabled_) {
301 PrintF(" ** Aborting compilation for "); 300 PrintF(" ** Aborting compilation for ");
302 function->ShortPrint(); 301 function->ShortPrint();
303 PrintF(" as it has already been optimized.\n"); 302 PrintF(" as it has already been optimized.\n");
304 } 303 }
305 DisposeOptimizedCompileJob(job, false); 304 DisposeOptimizedCompileJob(job, false);
306 } else { 305 } else {
307 Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job); 306 Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
308 function->ReplaceCode( 307 function->ReplaceCode(
309 code.is_null() ? function->shared()->code() : *code); 308 code.is_null() ? function->shared()->code() : *code);
310 } 309 }
(...skipping 16 matching lines...) Expand all
327 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); 326 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
328 input_queue_[InputQueueIndex(0)] = job; 327 input_queue_[InputQueueIndex(0)] = job;
329 input_queue_length_++; 328 input_queue_length_++;
330 } else { 329 } else {
331 // Add job to the back of the input queue. 330 // Add job to the back of the input queue.
332 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); 331 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
333 DCHECK_LT(input_queue_length_, input_queue_capacity_); 332 DCHECK_LT(input_queue_length_, input_queue_capacity_);
334 input_queue_[InputQueueIndex(input_queue_length_)] = job; 333 input_queue_[InputQueueIndex(input_queue_length_)] = job;
335 input_queue_length_++; 334 input_queue_length_++;
336 } 335 }
337 if (FLAG_job_based_recompilation) { 336 if (job_based_recompilation_) {
338 V8::GetCurrentPlatform()->CallOnBackgroundThread( 337 V8::GetCurrentPlatform()->CallOnBackgroundThread(
339 new CompileTask(isolate_, job), v8::Platform::kShortRunningTask); 338 new CompileTask(isolate_, job), v8::Platform::kShortRunningTask);
340 } else if (FLAG_block_concurrent_recompilation) { 339 } else if (FLAG_block_concurrent_recompilation) {
341 blocked_jobs_++; 340 blocked_jobs_++;
342 } else { 341 } else {
343 input_queue_semaphore_.Signal(); 342 input_queue_semaphore_.Signal();
344 } 343 }
345 } 344 }
346 345
347 346
348 void OptimizingCompilerThread::Unblock() { 347 void OptimizingCompilerThread::Unblock() {
349 DCHECK(!IsOptimizerThread()); 348 DCHECK(!IsOptimizerThread());
350 if (FLAG_job_based_recompilation) { 349 if (job_based_recompilation_) {
351 return; 350 return;
352 } 351 }
353 while (blocked_jobs_ > 0) { 352 while (blocked_jobs_ > 0) {
354 input_queue_semaphore_.Signal(); 353 input_queue_semaphore_.Signal();
355 blocked_jobs_--; 354 blocked_jobs_--;
356 } 355 }
357 } 356 }
358 357
359 358
360 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( 359 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
434 433
435 434
436 bool OptimizingCompilerThread::IsOptimizerThread() { 435 bool OptimizingCompilerThread::IsOptimizerThread() {
437 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); 436 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
438 return ThreadId::Current().ToInteger() == thread_id_; 437 return ThreadId::Current().ToInteger() == thread_id_;
439 } 438 }
440 #endif 439 #endif
441 440
442 441
443 } } // namespace v8::internal 442 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/optimizing-compiler-thread.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698