Chromium Code Reviews| Index: src/optimizing-compiler-thread.h |
| diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h |
| index d98a8b2b7e75ea25ee800ce5975c4da15b847a75..f27eeaeefb75095619d2110a7309c64583d8b35f 100644 |
| --- a/src/optimizing-compiler-thread.h |
| +++ b/src/optimizing-compiler-thread.h |
| @@ -53,20 +53,11 @@ class OptimizingCompilerThread : public Thread { |
| isolate_(isolate), |
| stop_semaphore_(0), |
| input_queue_semaphore_(0), |
|
titzer
2013/10/10 09:41:57
The external synchronization on these buffers is s
|
| - osr_cursor_(0), |
| + input_queue_(FLAG_concurrent_recompilation_queue_length), |
| + osr_buffer_(FLAG_concurrent_recompilation_queue_length + 4), |
| osr_hits_(0), |
| osr_attempts_(0) { |
| NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); |
| - NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0)); |
| - if (FLAG_concurrent_osr) { |
| - osr_buffer_size_ = FLAG_concurrent_recompilation_queue_length + 4; |
| - osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_size_); |
| - for (int i = 0; i < osr_buffer_size_; i++) osr_buffer_[i] = NULL; |
| - } |
| - } |
| - |
| - ~OptimizingCompilerThread() { |
| - if (FLAG_concurrent_osr) DeleteArray(osr_buffer_); |
| } |
| void Run(); |
| @@ -81,17 +72,15 @@ class OptimizingCompilerThread : public Thread { |
| bool IsQueuedForOSR(JSFunction* function); |
| inline bool IsQueueAvailable() { |
| - // We don't need a barrier since we have a data dependency right |
| - // after. |
| - Atomic32 current_length = NoBarrier_Load(&queue_length_); |
| - |
| - // This can be queried only from the execution thread. |
| - ASSERT(!IsOptimizerThread()); |
| - // Since only the execution thread increments queue_length_ and |
| - // only one thread can run inside an Isolate at one time, a direct |
| - // doesn't introduce a race -- queue_length_ may decreased in |
| - // meantime, but not increased. |
| - return (current_length < FLAG_concurrent_recompilation_queue_length); |
| + LockGuard<Mutex> access_input(&input_mutex_); |
| + return input_queue_.available(); |
| + } |
| + |
| + inline void DrainOsrBuffer() { |
|
titzer
2013/10/10 09:41:57
I like the idea of dropping one OSR entry per GC,
|
| + // Advance cursor of the cyclic buffer to next empty slot or stale OSR job. |
| + // Dispose said OSR job in the latter case. Calling this on every GC |
| + // should make sure that we do not hold onto stale jobs indefinitely. |
| + AddToOsrBuffer(NULL); |
| } |
| #ifdef DEBUG |
| @@ -109,9 +98,6 @@ class OptimizingCompilerThread : public Thread { |
| // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry. |
| // Tasks evicted from the cyclic buffer are discarded. |
| void AddToOsrBuffer(RecompileJob* compiler); |
| - void AdvanceOsrCursor() { |
| - osr_cursor_ = (osr_cursor_ + 1) % osr_buffer_size_; |
| - } |
| #ifdef DEBUG |
| int thread_id_; |
| @@ -121,21 +107,16 @@ class OptimizingCompilerThread : public Thread { |
| Isolate* isolate_; |
| Semaphore stop_semaphore_; |
| Semaphore input_queue_semaphore_; |
| + Mutex input_mutex_; |
| // Queue of incoming recompilation tasks (including OSR). |
| - UnboundQueue<RecompileJob*> input_queue_; |
| + CircularQueue<RecompileJob*> input_queue_; |
| // Queue of recompilation tasks ready to be installed (excluding OSR). |
| UnboundQueue<RecompileJob*> output_queue_; |
| // Cyclic buffer of recompilation tasks for OSR. |
| - // TODO(yangguo): This may keep zombie tasks indefinitely, holding on to |
| - // a lot of memory. Fix this. |
| - RecompileJob** osr_buffer_; |
| - // Cursor for the cyclic buffer. |
| - int osr_cursor_; |
| - int osr_buffer_size_; |
| + CircularBuffer<RecompileJob*> osr_buffer_; |
| volatile AtomicWord stop_thread_; |
| - volatile Atomic32 queue_length_; |
| TimeDelta time_spent_compiling_; |
| TimeDelta time_spent_total_; |