Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 45 | 45 |
| 46 class OptimizingCompilerThread : public Thread { | 46 class OptimizingCompilerThread : public Thread { |
| 47 public: | 47 public: |
| 48 explicit OptimizingCompilerThread(Isolate *isolate) : | 48 explicit OptimizingCompilerThread(Isolate *isolate) : |
| 49 Thread("OptimizingCompilerThread"), | 49 Thread("OptimizingCompilerThread"), |
| 50 #ifdef DEBUG | 50 #ifdef DEBUG |
| 51 thread_id_(0), | 51 thread_id_(0), |
| 52 #endif | 52 #endif |
| 53 isolate_(isolate), | 53 isolate_(isolate), |
| 54 stop_semaphore_(0), | 54 stop_semaphore_(0), |
| 55 input_queue_semaphore_(0), | 55 input_queue_semaphore_(0), |
|
titzer
2013/10/10 09:41:57
The external synchronization on these buffers is s
| |
| 56 osr_cursor_(0), | 56 input_queue_(FLAG_concurrent_recompilation_queue_length), |
| 57 osr_buffer_(FLAG_concurrent_recompilation_queue_length + 4), | |
| 57 osr_hits_(0), | 58 osr_hits_(0), |
| 58 osr_attempts_(0) { | 59 osr_attempts_(0) { |
| 59 NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); | 60 NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); |
| 60 NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0)); | |
| 61 if (FLAG_concurrent_osr) { | |
| 62 osr_buffer_size_ = FLAG_concurrent_recompilation_queue_length + 4; | |
| 63 osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_size_); | |
| 64 for (int i = 0; i < osr_buffer_size_; i++) osr_buffer_[i] = NULL; | |
| 65 } | |
| 66 } | |
| 67 | |
| 68 ~OptimizingCompilerThread() { | |
| 69 if (FLAG_concurrent_osr) DeleteArray(osr_buffer_); | |
| 70 } | 61 } |
| 71 | 62 |
| 72 void Run(); | 63 void Run(); |
| 73 void Stop(); | 64 void Stop(); |
| 74 void Flush(); | 65 void Flush(); |
| 75 void QueueForOptimization(RecompileJob* optimizing_compiler); | 66 void QueueForOptimization(RecompileJob* optimizing_compiler); |
| 76 void InstallOptimizedFunctions(); | 67 void InstallOptimizedFunctions(); |
| 77 RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function, | 68 RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function, |
| 78 uint32_t osr_pc_offset); | 69 uint32_t osr_pc_offset); |
| 79 bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset); | 70 bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset); |
| 80 | 71 |
| 81 bool IsQueuedForOSR(JSFunction* function); | 72 bool IsQueuedForOSR(JSFunction* function); |
| 82 | 73 |
| 83 inline bool IsQueueAvailable() { | 74 inline bool IsQueueAvailable() { |
| 84 // We don't need a barrier since we have a data dependency right | 75 LockGuard<Mutex> access_input(&input_mutex_); |
| 85 // after. | 76 return input_queue_.available(); |
| 86 Atomic32 current_length = NoBarrier_Load(&queue_length_); | 77 } |
| 87 | 78 |
| 88 // This can be queried only from the execution thread. | 79 inline void DrainOsrBuffer() { |
|
titzer
2013/10/10 09:41:57
I like the idea of dropping one OSR entry per GC,
| |
| 89 ASSERT(!IsOptimizerThread()); | 80 // Advance cursor of the cyclic buffer to next empty slot or stale OSR job. |
| 90 // Since only the execution thread increments queue_length_ and | 81 // Dispose said OSR job in the latter case. Calling this on every GC |
| 91 // only one thread can run inside an Isolate at one time, a direct | 82 // should make sure that we do not hold onto stale jobs indefinitely. |
| 92 // doesn't introduce a race -- queue_length_ may decreased in | 83 AddToOsrBuffer(NULL); |
| 93 // meantime, but not increased. | |
| 94 return (current_length < FLAG_concurrent_recompilation_queue_length); | |
| 95 } | 84 } |
| 96 | 85 |
| 97 #ifdef DEBUG | 86 #ifdef DEBUG |
| 98 bool IsOptimizerThread(); | 87 bool IsOptimizerThread(); |
| 99 #endif | 88 #endif |
| 100 | 89 |
| 101 private: | 90 private: |
| 102 enum StopFlag { CONTINUE, STOP, FLUSH }; | 91 enum StopFlag { CONTINUE, STOP, FLUSH }; |
| 103 | 92 |
| 104 void FlushInputQueue(bool restore_function_code); | 93 void FlushInputQueue(bool restore_function_code); |
| 105 void FlushOutputQueue(bool restore_function_code); | 94 void FlushOutputQueue(bool restore_function_code); |
| 106 void FlushOsrBuffer(bool restore_function_code); | 95 void FlushOsrBuffer(bool restore_function_code); |
| 107 void CompileNext(); | 96 void CompileNext(); |
| 108 | 97 |
| 109 // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry. | 98 // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry. |
| 110 // Tasks evicted from the cyclic buffer are discarded. | 99 // Tasks evicted from the cyclic buffer are discarded. |
| 111 void AddToOsrBuffer(RecompileJob* compiler); | 100 void AddToOsrBuffer(RecompileJob* compiler); |
| 112 void AdvanceOsrCursor() { | |
| 113 osr_cursor_ = (osr_cursor_ + 1) % osr_buffer_size_; | |
| 114 } | |
| 115 | 101 |
| 116 #ifdef DEBUG | 102 #ifdef DEBUG |
| 117 int thread_id_; | 103 int thread_id_; |
| 118 Mutex thread_id_mutex_; | 104 Mutex thread_id_mutex_; |
| 119 #endif | 105 #endif |
| 120 | 106 |
| 121 Isolate* isolate_; | 107 Isolate* isolate_; |
| 122 Semaphore stop_semaphore_; | 108 Semaphore stop_semaphore_; |
| 123 Semaphore input_queue_semaphore_; | 109 Semaphore input_queue_semaphore_; |
| 110 Mutex input_mutex_; | |
| 124 | 111 |
| 125 // Queue of incoming recompilation tasks (including OSR). | 112 // Queue of incoming recompilation tasks (including OSR). |
| 126 UnboundQueue<RecompileJob*> input_queue_; | 113 CircularQueue<RecompileJob*> input_queue_; |
| 127 // Queue of recompilation tasks ready to be installed (excluding OSR). | 114 // Queue of recompilation tasks ready to be installed (excluding OSR). |
| 128 UnboundQueue<RecompileJob*> output_queue_; | 115 UnboundQueue<RecompileJob*> output_queue_; |
| 129 // Cyclic buffer of recompilation tasks for OSR. | 116 // Cyclic buffer of recompilation tasks for OSR. |
| 130 // TODO(yangguo): This may keep zombie tasks indefinitely, holding on to | 117 CircularBuffer<RecompileJob*> osr_buffer_; |
| 131 // a lot of memory. Fix this. | |
| 132 RecompileJob** osr_buffer_; | |
| 133 // Cursor for the cyclic buffer. | |
| 134 int osr_cursor_; | |
| 135 int osr_buffer_size_; | |
| 136 | 118 |
| 137 volatile AtomicWord stop_thread_; | 119 volatile AtomicWord stop_thread_; |
| 138 volatile Atomic32 queue_length_; | |
| 139 TimeDelta time_spent_compiling_; | 120 TimeDelta time_spent_compiling_; |
| 140 TimeDelta time_spent_total_; | 121 TimeDelta time_spent_total_; |
| 141 | 122 |
| 142 int osr_hits_; | 123 int osr_hits_; |
| 143 int osr_attempts_; | 124 int osr_attempts_; |
| 144 }; | 125 }; |
| 145 | 126 |
| 146 } } // namespace v8::internal | 127 } } // namespace v8::internal |
| 147 | 128 |
| 148 #endif // V8_OPTIMIZING_COMPILER_THREAD_H_ | 129 #endif // V8_OPTIMIZING_COMPILER_THREAD_H_ |
| OLD | NEW |