| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 46 class OptimizingCompilerThread : public Thread { | 46 class OptimizingCompilerThread : public Thread { |
| 47 public: | 47 public: |
| 48 explicit OptimizingCompilerThread(Isolate *isolate) : | 48 explicit OptimizingCompilerThread(Isolate *isolate) : |
| 49 Thread("OptimizingCompilerThread"), | 49 Thread("OptimizingCompilerThread"), |
| 50 #ifdef DEBUG | 50 #ifdef DEBUG |
| 51 thread_id_(0), | 51 thread_id_(0), |
| 52 #endif | 52 #endif |
| 53 isolate_(isolate), | 53 isolate_(isolate), |
| 54 stop_semaphore_(0), | 54 stop_semaphore_(0), |
| 55 input_queue_semaphore_(0), | 55 input_queue_semaphore_(0), |
| 56 osr_cursor_(0), | 56 input_queue_capacity_(FLAG_concurrent_recompilation_queue_length), |
| 57 input_queue_length_(0), |
| 58 input_queue_shift_(0), |
| 59 osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4), |
| 60 osr_buffer_cursor_(0), |
| 57 osr_hits_(0), | 61 osr_hits_(0), |
| 58 osr_attempts_(0) { | 62 osr_attempts_(0), |
| 63 blocked_jobs_(0) { |
| 59 NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); | 64 NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); |
| 60 NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0)); | 65 input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_); |
| 61 if (FLAG_concurrent_osr) { | 66 if (FLAG_concurrent_osr) { |
| 62 osr_buffer_size_ = FLAG_concurrent_recompilation_queue_length + 4; | 67 // Allocate and mark OSR buffer slots as empty. |
| 63 osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_size_); | 68 osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_); |
| 64 for (int i = 0; i < osr_buffer_size_; i++) osr_buffer_[i] = NULL; | 69 for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL; |
| 65 } | 70 } |
| 66 } | 71 } |
| 67 | 72 |
| 68 ~OptimizingCompilerThread() { | 73 ~OptimizingCompilerThread(); |
| 69 if (FLAG_concurrent_osr) DeleteArray(osr_buffer_); | |
| 70 } | |
| 71 | 74 |
| 72 void Run(); | 75 void Run(); |
| 73 void Stop(); | 76 void Stop(); |
| 74 void Flush(); | 77 void Flush(); |
| 75 void QueueForOptimization(RecompileJob* optimizing_compiler); | 78 void QueueForOptimization(RecompileJob* optimizing_compiler); |
| 79 void Unblock(); |
| 76 void InstallOptimizedFunctions(); | 80 void InstallOptimizedFunctions(); |
| 77 RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function, | 81 RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function, |
| 78 uint32_t osr_pc_offset); | 82 uint32_t osr_pc_offset); |
| 79 bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset); | 83 bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset); |
| 80 | 84 |
| 81 bool IsQueuedForOSR(JSFunction* function); | 85 bool IsQueuedForOSR(JSFunction* function); |
| 82 | 86 |
| 83 inline bool IsQueueAvailable() { | 87 inline bool IsQueueAvailable() { |
| 84 // We don't need a barrier since we have a data dependency right | 88 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); |
| 85 // after. | 89 return input_queue_length_ < input_queue_capacity_; |
| 86 Atomic32 current_length = NoBarrier_Load(&queue_length_); | 90 } |
| 87 | 91 |
| 88 // This can be queried only from the execution thread. | 92 inline void AgeBufferedOsrJobs() { |
| 89 ASSERT(!IsOptimizerThread()); | 93 // Advance cursor of the cyclic buffer to next empty slot or stale OSR job. |
| 90 // Since only the execution thread increments queue_length_ and | 94 // Dispose said OSR job in the latter case. Calling this on every GC |
| 91 // only one thread can run inside an Isolate at one time, a direct | 95 // should make sure that we do not hold onto stale jobs indefinitely. |
| 92 // doesn't introduce a race -- queue_length_ may decreased in | 96 AddToOsrBuffer(NULL); |
| 93 // meantime, but not increased. | |
| 94 return (current_length < FLAG_concurrent_recompilation_queue_length); | |
| 95 } | 97 } |
| 96 | 98 |
| 97 #ifdef DEBUG | 99 #ifdef DEBUG |
| 98 bool IsOptimizerThread(); | 100 bool IsOptimizerThread(); |
| 99 #endif | 101 #endif |
| 100 | 102 |
| 101 private: | 103 private: |
| 102 enum StopFlag { CONTINUE, STOP, FLUSH }; | 104 enum StopFlag { CONTINUE, STOP, FLUSH }; |
| 103 | 105 |
| 104 void FlushInputQueue(bool restore_function_code); | 106 void FlushInputQueue(bool restore_function_code); |
| 105 void FlushOutputQueue(bool restore_function_code); | 107 void FlushOutputQueue(bool restore_function_code); |
| 106 void FlushOsrBuffer(bool restore_function_code); | 108 void FlushOsrBuffer(bool restore_function_code); |
| 107 void CompileNext(); | 109 void CompileNext(); |
| 110 RecompileJob* NextInput(); |
| 108 | 111 |
| 109 // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry. | 112 // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry. |
| 110 // Tasks evicted from the cyclic buffer are discarded. | 113 // Tasks evicted from the cyclic buffer are discarded. |
| 111 void AddToOsrBuffer(RecompileJob* compiler); | 114 void AddToOsrBuffer(RecompileJob* compiler); |
| 112 void AdvanceOsrCursor() { | 115 |
| 113 osr_cursor_ = (osr_cursor_ + 1) % osr_buffer_size_; | 116 inline int InputQueueIndex(int i) { |
| 117 int result = (i + input_queue_shift_) % input_queue_capacity_; |
| 118 ASSERT_LE(0, result); |
| 119 ASSERT_LT(result, input_queue_capacity_); |
| 120 return result; |
| 114 } | 121 } |
| 115 | 122 |
| 116 #ifdef DEBUG | 123 #ifdef DEBUG |
| 117 int thread_id_; | 124 int thread_id_; |
| 118 Mutex thread_id_mutex_; | 125 Mutex thread_id_mutex_; |
| 119 #endif | 126 #endif |
| 120 | 127 |
| 121 Isolate* isolate_; | 128 Isolate* isolate_; |
| 122 Semaphore stop_semaphore_; | 129 Semaphore stop_semaphore_; |
| 123 Semaphore input_queue_semaphore_; | 130 Semaphore input_queue_semaphore_; |
| 124 | 131 |
| 125 // Queue of incoming recompilation tasks (including OSR). | 132 // Circular queue of incoming recompilation tasks (including OSR). |
| 126 UnboundQueue<RecompileJob*> input_queue_; | 133 RecompileJob** input_queue_; |
| 134 int input_queue_capacity_; |
| 135 int input_queue_length_; |
| 136 int input_queue_shift_; |
| 137 Mutex input_queue_mutex_; |
| 138 |
| 127 // Queue of recompilation tasks ready to be installed (excluding OSR). | 139 // Queue of recompilation tasks ready to be installed (excluding OSR). |
| 128 UnboundQueue<RecompileJob*> output_queue_; | 140 UnboundQueue<RecompileJob*> output_queue_; |
| 141 |
| 129 // Cyclic buffer of recompilation tasks for OSR. | 142 // Cyclic buffer of recompilation tasks for OSR. |
| 130 // TODO(yangguo): This may keep zombie tasks indefinitely, holding on to | |
| 131 // a lot of memory. Fix this. | |
| 132 RecompileJob** osr_buffer_; | 143 RecompileJob** osr_buffer_; |
| 133 // Cursor for the cyclic buffer. | 144 int osr_buffer_capacity_; |
| 134 int osr_cursor_; | 145 int osr_buffer_cursor_; |
| 135 int osr_buffer_size_; | |
| 136 | 146 |
| 137 volatile AtomicWord stop_thread_; | 147 volatile AtomicWord stop_thread_; |
| 138 volatile Atomic32 queue_length_; | |
| 139 TimeDelta time_spent_compiling_; | 148 TimeDelta time_spent_compiling_; |
| 140 TimeDelta time_spent_total_; | 149 TimeDelta time_spent_total_; |
| 141 | 150 |
| 142 int osr_hits_; | 151 int osr_hits_; |
| 143 int osr_attempts_; | 152 int osr_attempts_; |
| 153 |
| 154 int blocked_jobs_; |
| 144 }; | 155 }; |
| 145 | 156 |
| 146 } } // namespace v8::internal | 157 } } // namespace v8::internal |
| 147 | 158 |
| 148 #endif // V8_OPTIMIZING_COMPILER_THREAD_H_ | 159 #endif // V8_OPTIMIZING_COMPILER_THREAD_H_ |
| OLD | NEW |