Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/optimizing-compiler-thread.h" | 5 #include "src/optimizing-compiler-thread.h" |
| 6 | 6 |
| 7 #include "src/v8.h" | 7 #include "src/v8.h" |
| 8 | 8 |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/full-codegen.h" | 10 #include "src/full-codegen.h" |
| 11 #include "src/hydrogen.h" | 11 #include "src/hydrogen.h" |
| 12 #include "src/isolate.h" | 12 #include "src/isolate.h" |
| 13 #include "src/v8threads.h" | 13 #include "src/v8threads.h" |
| 14 | 14 |
| 15 namespace v8 { | 15 namespace v8 { |
| 16 namespace internal { | 16 namespace internal { |
| 17 | 17 |
| 18 static void DisposeOptimizedCompileJob(OptimizedCompileJob* job, | |
|
Benedikt Meurer
2014/12/22 13:10:48
Nit: use anonymous namespace instead of static her
| |
| 19 bool restore_function_code) { | |
| 20 // The recompile job is allocated in the CompilationInfo's zone. | |
| 21 CompilationInfo* info = job->info(); | |
| 22 if (restore_function_code) { | |
| 23 if (info->is_osr()) { | |
| 24 if (!job->IsWaitingForInstall()) { | |
| 25 // Remove stack check that guards OSR entry on original code. | |
| 26 Handle<Code> code = info->unoptimized_code(); | |
| 27 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); | |
| 28 BackEdgeTable::RemoveStackCheck(code, offset); | |
| 29 } | |
| 30 } else { | |
| 31 Handle<JSFunction> function = info->closure(); | |
| 32 function->ReplaceCode(function->shared()->code()); | |
| 33 } | |
| 34 } | |
| 35 delete info; | |
| 36 } | |
| 37 | |
| 38 | |
| 18 class OptimizingCompilerThread::CompileTask : public v8::Task { | 39 class OptimizingCompilerThread::CompileTask : public v8::Task { |
| 19 public: | 40 public: |
| 20 CompileTask(Isolate* isolate, OptimizedCompileJob* job) | 41 explicit CompileTask(Isolate* isolate) : isolate_(isolate) {} |
| 21 : isolate_(isolate), job_(job) {} | |
| 22 | 42 |
| 23 virtual ~CompileTask() {} | 43 virtual ~CompileTask() {} |
| 24 | 44 |
| 25 private: | 45 private: |
| 26 // v8::Task overrides. | 46 // v8::Task overrides. |
| 27 void Run() OVERRIDE { | 47 void Run() OVERRIDE { |
| 28 DisallowHeapAllocation no_allocation; | 48 DisallowHeapAllocation no_allocation; |
| 29 DisallowHandleAllocation no_handles; | 49 DisallowHandleAllocation no_handles; |
| 30 DisallowHandleDereference no_deref; | 50 DisallowHandleDereference no_deref; |
| 31 | 51 |
| 32 // The function may have already been optimized by OSR. Simply continue. | 52 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); |
| 33 OptimizedCompileJob::Status status = job_->OptimizeGraph(); | |
| 34 USE(status); // Prevent an unused-variable error in release mode. | |
| 35 DCHECK(status != OptimizedCompileJob::FAILED); | |
| 36 | 53 |
| 37 // The function may have already been optimized by OSR. Simply continue. | 54 OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread(); |
| 38 // Use a mutex to make sure that functions marked for install | 55 |
| 39 // are always also queued. | 56 if (thread->recompilation_delay_ != 0) { |
| 57 base::OS::Sleep(thread->recompilation_delay_); | |
| 58 } | |
| 59 | |
| 60 StopFlag flag; | |
| 61 OptimizedCompileJob* job = thread->NextInput(&flag); | |
| 62 | |
| 63 if (flag == CONTINUE) { | |
| 64 thread->CompileNext(job); | |
| 65 } else { | |
| 66 AllowHandleDereference allow_handle_dereference; | |
| 67 if (!job->info()->is_osr()) { | |
| 68 DisposeOptimizedCompileJob(job, true); | |
| 69 } | |
| 70 } | |
| 71 bool signal = false; | |
| 40 { | 72 { |
| 41 base::LockGuard<base::Mutex> lock_guard( | 73 base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_); |
| 42 &isolate_->optimizing_compiler_thread()->output_queue_mutex_); | 74 if (--thread->task_count_ == 0) { |
| 43 isolate_->optimizing_compiler_thread()->output_queue_.Enqueue(job_); | 75 if (static_cast<StopFlag>(base::Acquire_Load(&thread->stop_thread_)) == |
| 76 FLUSH) { | |
| 77 base::Release_Store(&thread->stop_thread_, | |
| 78 static_cast<base::AtomicWord>(CONTINUE)); | |
| 79 signal = true; | |
| 80 } | |
| 81 } | |
| 44 } | 82 } |
| 45 isolate_->stack_guard()->RequestInstallCode(); | 83 if (signal) thread->stop_semaphore_.Signal(); |
| 46 { | |
| 47 base::LockGuard<base::Mutex> lock_guard( | |
| 48 &isolate_->optimizing_compiler_thread()->input_queue_mutex_); | |
| 49 isolate_->optimizing_compiler_thread()->input_queue_length_--; | |
| 50 } | |
| 51 isolate_->optimizing_compiler_thread()->input_queue_semaphore_.Signal(); | |
| 52 } | 84 } |
| 53 | 85 |
| 54 Isolate* isolate_; | 86 Isolate* isolate_; |
| 55 OptimizedCompileJob* job_; | |
| 56 | 87 |
| 57 DISALLOW_COPY_AND_ASSIGN(CompileTask); | 88 DISALLOW_COPY_AND_ASSIGN(CompileTask); |
| 58 }; | 89 }; |
| 59 | 90 |
| 60 | 91 |
| 61 OptimizingCompilerThread::~OptimizingCompilerThread() { | 92 OptimizingCompilerThread::~OptimizingCompilerThread() { |
| 62 DCHECK_EQ(0, input_queue_length_); | 93 DCHECK_EQ(0, input_queue_length_); |
| 63 DeleteArray(input_queue_); | 94 DeleteArray(input_queue_); |
| 64 if (FLAG_concurrent_osr) { | 95 if (FLAG_concurrent_osr) { |
| 65 #ifdef DEBUG | 96 #ifdef DEBUG |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 86 return; | 117 return; |
| 87 } | 118 } |
| 88 | 119 |
| 89 base::ElapsedTimer total_timer; | 120 base::ElapsedTimer total_timer; |
| 90 if (tracing_enabled_) total_timer.Start(); | 121 if (tracing_enabled_) total_timer.Start(); |
| 91 | 122 |
| 92 while (true) { | 123 while (true) { |
| 93 input_queue_semaphore_.Wait(); | 124 input_queue_semaphore_.Wait(); |
| 94 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); | 125 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); |
| 95 | 126 |
| 96 if (FLAG_concurrent_recompilation_delay != 0) { | 127 if (recompilation_delay_ != 0) { |
| 97 base::OS::Sleep(FLAG_concurrent_recompilation_delay); | 128 base::OS::Sleep(recompilation_delay_); |
| 98 } | 129 } |
| 99 | 130 |
| 100 switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) { | 131 switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) { |
| 101 case CONTINUE: | 132 case CONTINUE: |
| 102 break; | 133 break; |
| 103 case STOP: | 134 case STOP: |
| 104 if (tracing_enabled_) { | 135 if (tracing_enabled_) { |
| 105 time_spent_total_ = total_timer.Elapsed(); | 136 time_spent_total_ = total_timer.Elapsed(); |
| 106 } | 137 } |
| 107 stop_semaphore_.Signal(); | 138 stop_semaphore_.Signal(); |
| 108 return; | 139 return; |
| 109 case FLUSH: | 140 case FLUSH: |
| 110 // The main thread is blocked, waiting for the stop semaphore. | 141 // The main thread is blocked, waiting for the stop semaphore. |
| 111 { AllowHandleDereference allow_handle_dereference; | 142 { AllowHandleDereference allow_handle_dereference; |
| 112 FlushInputQueue(true); | 143 FlushInputQueue(true); |
| 113 } | 144 } |
| 114 base::Release_Store(&stop_thread_, | 145 base::Release_Store(&stop_thread_, |
| 115 static_cast<base::AtomicWord>(CONTINUE)); | 146 static_cast<base::AtomicWord>(CONTINUE)); |
| 116 stop_semaphore_.Signal(); | 147 stop_semaphore_.Signal(); |
| 117 // Return to start of consumer loop. | 148 // Return to start of consumer loop. |
| 118 continue; | 149 continue; |
| 119 } | 150 } |
| 120 | 151 |
| 121 base::ElapsedTimer compiling_timer; | 152 base::ElapsedTimer compiling_timer; |
| 122 if (tracing_enabled_) compiling_timer.Start(); | 153 if (tracing_enabled_) compiling_timer.Start(); |
| 123 | 154 |
| 124 CompileNext(); | 155 CompileNext(NextInput()); |
| 125 | 156 |
| 126 if (tracing_enabled_) { | 157 if (tracing_enabled_) { |
| 127 time_spent_compiling_ += compiling_timer.Elapsed(); | 158 time_spent_compiling_ += compiling_timer.Elapsed(); |
| 128 } | 159 } |
| 129 } | 160 } |
| 130 } | 161 } |
| 131 | 162 |
| 132 | 163 |
| 133 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { | 164 OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) { |
| 134 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); | 165 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); |
| 135 DCHECK(!job_based_recompilation_); | 166 if (input_queue_length_ == 0) { |
| 136 if (input_queue_length_ == 0) return NULL; | 167 if (flag) { |
| 168 UNREACHABLE(); | |
| 169 *flag = CONTINUE; | |
| 170 } | |
| 171 return NULL; | |
| 172 } | |
| 137 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; | 173 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
| 138 DCHECK_NE(NULL, job); | 174 DCHECK_NE(NULL, job); |
| 139 input_queue_shift_ = InputQueueIndex(1); | 175 input_queue_shift_ = InputQueueIndex(1); |
| 140 input_queue_length_--; | 176 input_queue_length_--; |
| 177 if (flag) { | |
| 178 *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)); | |
| 179 } | |
| 141 return job; | 180 return job; |
| 142 } | 181 } |
| 143 | 182 |
| 144 | 183 |
| 145 void OptimizingCompilerThread::CompileNext() { | 184 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) { |
| 146 OptimizedCompileJob* job = NextInput(); | |
| 147 DCHECK_NE(NULL, job); | 185 DCHECK_NE(NULL, job); |
| 148 | 186 |
| 149 // The function may have already been optimized by OSR. Simply continue. | 187 // The function may have already been optimized by OSR. Simply continue. |
| 150 OptimizedCompileJob::Status status = job->OptimizeGraph(); | 188 OptimizedCompileJob::Status status = job->OptimizeGraph(); |
| 151 USE(status); // Prevent an unused-variable error in release mode. | 189 USE(status); // Prevent an unused-variable error in release mode. |
| 152 DCHECK(status != OptimizedCompileJob::FAILED); | 190 DCHECK(status != OptimizedCompileJob::FAILED); |
| 153 | 191 |
| 154 // The function may have already been optimized by OSR. Simply continue. | 192 // The function may have already been optimized by OSR. Simply continue. |
| 155 // Use a mutex to make sure that functions marked for install | 193 // Use a mutex to make sure that functions marked for install |
| 156 // are always also queued. | 194 // are always also queued. |
| 195 if (job_based_recompilation_) output_queue_mutex_.Lock(); | |
| 157 output_queue_.Enqueue(job); | 196 output_queue_.Enqueue(job); |
| 197 if (job_based_recompilation_) output_queue_mutex_.Unlock(); | |
| 158 isolate_->stack_guard()->RequestInstallCode(); | 198 isolate_->stack_guard()->RequestInstallCode(); |
| 159 } | 199 } |
| 160 | 200 |
| 161 | 201 |
| 162 static void DisposeOptimizedCompileJob(OptimizedCompileJob* job, | |
| 163 bool restore_function_code) { | |
| 164 // The recompile job is allocated in the CompilationInfo's zone. | |
| 165 CompilationInfo* info = job->info(); | |
| 166 if (restore_function_code) { | |
| 167 if (info->is_osr()) { | |
| 168 if (!job->IsWaitingForInstall()) { | |
| 169 // Remove stack check that guards OSR entry on original code. | |
| 170 Handle<Code> code = info->unoptimized_code(); | |
| 171 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); | |
| 172 BackEdgeTable::RemoveStackCheck(code, offset); | |
| 173 } | |
| 174 } else { | |
| 175 Handle<JSFunction> function = info->closure(); | |
| 176 function->ReplaceCode(function->shared()->code()); | |
| 177 } | |
| 178 } | |
| 179 delete info; | |
| 180 } | |
| 181 | |
| 182 | |
| 183 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 202 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
| 184 DCHECK(!job_based_recompilation_); | |
| 185 OptimizedCompileJob* job; | 203 OptimizedCompileJob* job; |
| 186 while ((job = NextInput())) { | 204 while ((job = NextInput())) { |
| 205 DCHECK(!job_based_recompilation_); | |
| 187 // This should not block, since we have one signal on the input queue | 206 // This should not block, since we have one signal on the input queue |
| 188 // semaphore corresponding to each element in the input queue. | 207 // semaphore corresponding to each element in the input queue. |
| 189 input_queue_semaphore_.Wait(); | 208 input_queue_semaphore_.Wait(); |
| 190 // OSR jobs are dealt with separately. | 209 // OSR jobs are dealt with separately. |
| 191 if (!job->info()->is_osr()) { | 210 if (!job->info()->is_osr()) { |
| 192 DisposeOptimizedCompileJob(job, restore_function_code); | 211 DisposeOptimizedCompileJob(job, restore_function_code); |
| 193 } | 212 } |
| 194 } | 213 } |
| 195 } | 214 } |
| 196 | 215 |
| 197 | 216 |
| 198 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { | 217 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { |
| 218 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_); | |
| 199 OptimizedCompileJob* job; | 219 OptimizedCompileJob* job; |
| 200 while (output_queue_.Dequeue(&job)) { | 220 while (output_queue_.Dequeue(&job)) { |
| 201 // OSR jobs are dealt with separately. | 221 // OSR jobs are dealt with separately. |
| 202 if (!job->info()->is_osr()) { | 222 if (!job->info()->is_osr()) { |
| 203 DisposeOptimizedCompileJob(job, restore_function_code); | 223 DisposeOptimizedCompileJob(job, restore_function_code); |
| 204 } | 224 } |
| 205 } | 225 } |
| 206 } | 226 } |
| 207 | 227 |
| 208 | 228 |
| 209 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { | 229 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { |
| 210 for (int i = 0; i < osr_buffer_capacity_; i++) { | 230 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 211 if (osr_buffer_[i] != NULL) { | 231 if (osr_buffer_[i] != NULL) { |
| 212 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code); | 232 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code); |
| 213 osr_buffer_[i] = NULL; | 233 osr_buffer_[i] = NULL; |
| 214 } | 234 } |
| 215 } | 235 } |
| 216 } | 236 } |
| 217 | 237 |
| 218 | 238 |
| 219 void OptimizingCompilerThread::Flush() { | 239 void OptimizingCompilerThread::Flush() { |
| 220 DCHECK(!IsOptimizerThread()); | 240 DCHECK(!IsOptimizerThread()); |
| 221 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | 241 bool block = true; |
| 222 if (FLAG_block_concurrent_recompilation) Unblock(); | 242 if (job_based_recompilation_) { |
| 223 if (!job_based_recompilation_) { | 243 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_); |
| 224 input_queue_semaphore_.Signal(); | 244 block = task_count_ > 0 || blocked_jobs_ > 0; |
| 225 stop_semaphore_.Wait(); | 245 if (block) { |
| 246 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | |
| 247 } | |
| 248 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 249 } else { | |
| 250 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | |
| 251 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 226 } | 252 } |
| 253 if (!job_based_recompilation_) input_queue_semaphore_.Signal(); | |
| 254 if (block) stop_semaphore_.Wait(); | |
| 227 FlushOutputQueue(true); | 255 FlushOutputQueue(true); |
| 228 if (FLAG_concurrent_osr) FlushOsrBuffer(true); | 256 if (FLAG_concurrent_osr) FlushOsrBuffer(true); |
| 229 if (tracing_enabled_) { | 257 if (tracing_enabled_) { |
| 230 PrintF(" ** Flushed concurrent recompilation queues.\n"); | 258 PrintF(" ** Flushed concurrent recompilation queues.\n"); |
| 231 } | 259 } |
| 232 } | 260 } |
| 233 | 261 |
| 234 | 262 |
| 235 void OptimizingCompilerThread::Stop() { | 263 void OptimizingCompilerThread::Stop() { |
| 236 DCHECK(!IsOptimizerThread()); | 264 DCHECK(!IsOptimizerThread()); |
| 237 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); | 265 bool block = true; |
| 238 if (FLAG_block_concurrent_recompilation) Unblock(); | 266 if (job_based_recompilation_) { |
| 239 if (!job_based_recompilation_) { | 267 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_); |
| 240 input_queue_semaphore_.Signal(); | 268 block = task_count_ > 0 || blocked_jobs_ > 0; |
| 241 stop_semaphore_.Wait(); | 269 if (block) { |
| 270 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | |
| 271 } | |
| 272 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 273 } else { | |
| 274 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); | |
| 275 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 242 } | 276 } |
| 277 if (!job_based_recompilation_) input_queue_semaphore_.Signal(); | |
| 278 if (block) stop_semaphore_.Wait(); | |
| 243 | 279 |
| 244 if (job_based_recompilation_) { | 280 if (recompilation_delay_ != 0) { |
| 245 while (true) { | |
| 246 { | |
| 247 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | |
| 248 if (!input_queue_length_) break; | |
| 249 } | |
| 250 input_queue_semaphore_.Wait(); | |
| 251 } | |
| 252 } else if (FLAG_concurrent_recompilation_delay != 0) { | |
| 253 // At this point the optimizing compiler thread's event loop has stopped. | 281 // At this point the optimizing compiler thread's event loop has stopped. |
| 254 // There is no need for a mutex when reading input_queue_length_. | 282 // There is no need for a mutex when reading input_queue_length_. |
| 255 while (input_queue_length_ > 0) CompileNext(); | 283 while (input_queue_length_ > 0) CompileNext(NextInput()); |
| 256 InstallOptimizedFunctions(); | 284 InstallOptimizedFunctions(); |
| 257 } else { | 285 } else { |
| 258 FlushInputQueue(false); | 286 FlushInputQueue(false); |
| 259 FlushOutputQueue(false); | 287 FlushOutputQueue(false); |
| 260 } | 288 } |
| 261 | 289 |
| 262 if (FLAG_concurrent_osr) FlushOsrBuffer(false); | 290 if (FLAG_concurrent_osr) FlushOsrBuffer(false); |
| 263 | 291 |
| 264 if (tracing_enabled_) { | 292 if (tracing_enabled_) { |
| 265 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); | 293 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); |
| 294 if (job_based_recompilation_) percentage = 100.0; | |
| 266 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); | 295 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); |
| 267 } | 296 } |
| 268 | 297 |
| 269 if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) { | 298 if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) { |
| 270 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); | 299 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); |
| 271 } | 300 } |
| 272 | 301 |
| 273 Join(); | 302 Join(); |
| 274 } | 303 } |
| 275 | 304 |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 326 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); | 355 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); |
| 327 input_queue_[InputQueueIndex(0)] = job; | 356 input_queue_[InputQueueIndex(0)] = job; |
| 328 input_queue_length_++; | 357 input_queue_length_++; |
| 329 } else { | 358 } else { |
| 330 // Add job to the back of the input queue. | 359 // Add job to the back of the input queue. |
| 331 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | 360 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); |
| 332 DCHECK_LT(input_queue_length_, input_queue_capacity_); | 361 DCHECK_LT(input_queue_length_, input_queue_capacity_); |
| 333 input_queue_[InputQueueIndex(input_queue_length_)] = job; | 362 input_queue_[InputQueueIndex(input_queue_length_)] = job; |
| 334 input_queue_length_++; | 363 input_queue_length_++; |
| 335 } | 364 } |
| 336 if (job_based_recompilation_) { | 365 if (FLAG_block_concurrent_recompilation) { |
| 366 blocked_jobs_++; | |
| 367 } else if (job_based_recompilation_) { | |
| 368 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_); | |
| 369 ++task_count_; | |
| 337 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 370 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 338 new CompileTask(isolate_, job), v8::Platform::kShortRunningTask); | 371 new CompileTask(isolate_), v8::Platform::kShortRunningTask); |
| 339 } else if (FLAG_block_concurrent_recompilation) { | |
| 340 blocked_jobs_++; | |
| 341 } else { | 372 } else { |
| 342 input_queue_semaphore_.Signal(); | 373 input_queue_semaphore_.Signal(); |
| 343 } | 374 } |
| 344 } | 375 } |
| 345 | 376 |
| 346 | 377 |
| 347 void OptimizingCompilerThread::Unblock() { | 378 void OptimizingCompilerThread::Unblock() { |
| 348 DCHECK(!IsOptimizerThread()); | 379 DCHECK(!IsOptimizerThread()); |
| 349 if (job_based_recompilation_) { | 380 { |
| 350 return; | 381 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_); |
| 382 task_count_ += blocked_jobs_; | |
| 351 } | 383 } |
| 352 while (blocked_jobs_ > 0) { | 384 while (blocked_jobs_ > 0) { |
| 353 input_queue_semaphore_.Signal(); | 385 if (job_based_recompilation_) { |
| 386 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
| 387 new CompileTask(isolate_), v8::Platform::kShortRunningTask); | |
| 388 } else { | |
| 389 input_queue_semaphore_.Signal(); | |
| 390 } | |
| 354 blocked_jobs_--; | 391 blocked_jobs_--; |
| 355 } | 392 } |
| 356 } | 393 } |
| 357 | 394 |
| 358 | 395 |
| 359 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( | 396 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( |
| 360 Handle<JSFunction> function, BailoutId osr_ast_id) { | 397 Handle<JSFunction> function, BailoutId osr_ast_id) { |
| 361 DCHECK(!IsOptimizerThread()); | 398 DCHECK(!IsOptimizerThread()); |
| 362 for (int i = 0; i < osr_buffer_capacity_; i++) { | 399 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 363 OptimizedCompileJob* current = osr_buffer_[i]; | 400 OptimizedCompileJob* current = osr_buffer_[i]; |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 433 | 470 |
| 434 | 471 |
| 435 bool OptimizingCompilerThread::IsOptimizerThread() { | 472 bool OptimizingCompilerThread::IsOptimizerThread() { |
| 436 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | 473 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); |
| 437 return ThreadId::Current().ToInteger() == thread_id_; | 474 return ThreadId::Current().ToInteger() == thread_id_; |
| 438 } | 475 } |
| 439 #endif | 476 #endif |
| 440 | 477 |
| 441 | 478 |
| 442 } } // namespace v8::internal | 479 } } // namespace v8::internal |
| OLD | NEW |