OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/optimizing-compiler-thread.h" | 5 #include "src/optimizing-compiler-thread.h" |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/full-codegen.h" | 10 #include "src/full-codegen.h" |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
95 DISALLOW_COPY_AND_ASSIGN(CompileTask); | 95 DISALLOW_COPY_AND_ASSIGN(CompileTask); |
96 }; | 96 }; |
97 | 97 |
98 | 98 |
99 OptimizingCompilerThread::~OptimizingCompilerThread() { | 99 OptimizingCompilerThread::~OptimizingCompilerThread() { |
100 DCHECK_EQ(0, input_queue_length_); | 100 DCHECK_EQ(0, input_queue_length_); |
101 DeleteArray(input_queue_); | 101 DeleteArray(input_queue_); |
102 if (FLAG_concurrent_osr) { | 102 if (FLAG_concurrent_osr) { |
103 #ifdef DEBUG | 103 #ifdef DEBUG |
104 for (int i = 0; i < osr_buffer_capacity_; i++) { | 104 for (int i = 0; i < osr_buffer_capacity_; i++) { |
105 CHECK_EQ(NULL, osr_buffer_[i]); | 105 CHECK_NULL(osr_buffer_[i]); |
106 } | 106 } |
107 #endif | 107 #endif |
108 DeleteArray(osr_buffer_); | 108 DeleteArray(osr_buffer_); |
109 } | 109 } |
110 } | 110 } |
111 | 111 |
112 | 112 |
113 void OptimizingCompilerThread::Run() { | 113 void OptimizingCompilerThread::Run() { |
114 #ifdef DEBUG | 114 #ifdef DEBUG |
115 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | 115 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
171 OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) { | 171 OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) { |
172 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); | 172 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); |
173 if (input_queue_length_ == 0) { | 173 if (input_queue_length_ == 0) { |
174 if (flag) { | 174 if (flag) { |
175 UNREACHABLE(); | 175 UNREACHABLE(); |
176 *flag = CONTINUE; | 176 *flag = CONTINUE; |
177 } | 177 } |
178 return NULL; | 178 return NULL; |
179 } | 179 } |
180 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; | 180 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
181 DCHECK_NE(NULL, job); | 181 DCHECK_NOT_NULL(job); |
182 input_queue_shift_ = InputQueueIndex(1); | 182 input_queue_shift_ = InputQueueIndex(1); |
183 input_queue_length_--; | 183 input_queue_length_--; |
184 if (flag) { | 184 if (flag) { |
185 *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)); | 185 *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)); |
186 } | 186 } |
187 return job; | 187 return job; |
188 } | 188 } |
189 | 189 |
190 | 190 |
191 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) { | 191 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) { |
192 DCHECK_NE(NULL, job); | 192 DCHECK_NOT_NULL(job); |
193 | 193 |
194 // The function may have already been optimized by OSR. Simply continue. | 194 // The function may have already been optimized by OSR. Simply continue. |
195 OptimizedCompileJob::Status status = job->OptimizeGraph(); | 195 OptimizedCompileJob::Status status = job->OptimizeGraph(); |
196 USE(status); // Prevent an unused-variable error in release mode. | 196 USE(status); // Prevent an unused-variable error in release mode. |
197 DCHECK(status != OptimizedCompileJob::FAILED); | 197 DCHECK(status != OptimizedCompileJob::FAILED); |
198 | 198 |
199 // The function may have already been optimized by OSR. Simply continue. | 199 // The function may have already been optimized by OSR. Simply continue. |
200 // Use a mutex to make sure that functions marked for install | 200 // Use a mutex to make sure that functions marked for install |
201 // are always also queued. | 201 // are always also queued. |
202 if (job_based_recompilation_) output_queue_mutex_.Lock(); | 202 if (job_based_recompilation_) output_queue_mutex_.Lock(); |
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
477 | 477 |
478 | 478 |
479 bool OptimizingCompilerThread::IsOptimizerThread() { | 479 bool OptimizingCompilerThread::IsOptimizerThread() { |
480 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | 480 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); |
481 return ThreadId::Current().ToInteger() == thread_id_; | 481 return ThreadId::Current().ToInteger() == thread_id_; |
482 } | 482 } |
483 #endif | 483 #endif |
484 | 484 |
485 | 485 |
486 } } // namespace v8::internal | 486 } } // namespace v8::internal |
OLD | NEW |