OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); | 101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); |
102 | 102 |
103 // The function may have already been optimized by OSR. Simply continue. | 103 // The function may have already been optimized by OSR. Simply continue. |
104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); | 104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); |
105 USE(status); // Prevent an unused-variable error in release mode. | 105 USE(status); // Prevent an unused-variable error in release mode. |
106 ASSERT(status != OptimizingCompiler::FAILED); | 106 ASSERT(status != OptimizingCompiler::FAILED); |
107 | 107 |
108 // The function may have already been optimized by OSR. Simply continue. | 108 // The function may have already been optimized by OSR. Simply continue. |
109 // Use a mutex to make sure that functions marked for install | 109 // Use a mutex to make sure that functions marked for install |
110 // are always also queued. | 110 // are always also queued. |
111 LockGuard<Mutex> mark_and_queue(&install_mutex_); | 111 if (!optimizing_compiler->info()->osr_ast_id().IsNone()) { |
112 { Heap::RelocationLock relocation_lock(isolate_->heap()); | 112 ASSERT(FLAG_speculative_concurrent_osr); |
| 113 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); |
| 114 osr_candidates_.RemoveElement(optimizing_compiler); |
| 115 ready_for_osr_.Add(optimizing_compiler); |
| 116 } else { |
| 117 LockGuard<Mutex> mark_and_queue(&install_mutex_); |
| 118 Heap::RelocationLock relocation_lock(isolate_->heap()); |
113 AllowHandleDereference ahd; | 119 AllowHandleDereference ahd; |
114 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); | 120 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); |
| 121 output_queue_.Enqueue(optimizing_compiler); |
115 } | 122 } |
116 output_queue_.Enqueue(optimizing_compiler); | |
117 } | 123 } |
118 | 124 |
119 | 125 |
120 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 126 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
121 OptimizingCompiler* optimizing_compiler; | 127 OptimizingCompiler* optimizing_compiler; |
122 // The optimizing compiler is allocated in the CompilationInfo's zone. | 128 // The optimizing compiler is allocated in the CompilationInfo's zone. |
123 while (input_queue_.Dequeue(&optimizing_compiler)) { | 129 while (input_queue_.Dequeue(&optimizing_compiler)) { |
124 // This should not block, since we have one signal on the input queue | 130 // This should not block, since we have one signal on the input queue |
125 // semaphore corresponding to each element in the input queue. | 131 // semaphore corresponding to each element in the input queue. |
126 input_queue_semaphore_->Wait(); | 132 input_queue_semaphore_->Wait(); |
(...skipping 11 matching lines...) Expand all Loading... |
138 OptimizingCompiler* optimizing_compiler; | 144 OptimizingCompiler* optimizing_compiler; |
139 // The optimizing compiler is allocated in the CompilationInfo's zone. | 145 // The optimizing compiler is allocated in the CompilationInfo's zone. |
140 while (output_queue_.Dequeue(&optimizing_compiler)) { | 146 while (output_queue_.Dequeue(&optimizing_compiler)) { |
141 CompilationInfo* info = optimizing_compiler->info(); | 147 CompilationInfo* info = optimizing_compiler->info(); |
142 if (restore_function_code) { | 148 if (restore_function_code) { |
143 Handle<JSFunction> function = info->closure(); | 149 Handle<JSFunction> function = info->closure(); |
144 function->ReplaceCode(function->shared()->code()); | 150 function->ReplaceCode(function->shared()->code()); |
145 } | 151 } |
146 delete info; | 152 delete info; |
147 } | 153 } |
| 154 |
| 155 osr_candidates_.Clear(); |
| 156 while (ready_for_osr_.length() > 0) { |
| 157 ASSERT(FLAG_speculative_concurrent_osr); |
| 158 optimizing_compiler = ready_for_osr_.RemoveLast(); |
| 159 CompilationInfo* info = optimizing_compiler->info(); |
| 160 delete info; |
| 161 } |
148 } | 162 } |
149 | 163 |
150 | 164 |
151 void OptimizingCompilerThread::Flush() { | 165 void OptimizingCompilerThread::Flush() { |
152 ASSERT(!IsOptimizerThread()); | 166 ASSERT(!IsOptimizerThread()); |
153 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); | 167 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); |
154 input_queue_semaphore_->Signal(); | 168 input_queue_semaphore_->Signal(); |
155 stop_semaphore_->Wait(); | 169 stop_semaphore_->Wait(); |
156 FlushOutputQueue(true); | 170 FlushOutputQueue(true); |
157 } | 171 } |
(...skipping 14 matching lines...) Expand all Loading... |
172 } else { | 186 } else { |
173 FlushInputQueue(false); | 187 FlushInputQueue(false); |
174 FlushOutputQueue(false); | 188 FlushOutputQueue(false); |
175 } | 189 } |
176 | 190 |
177 if (FLAG_trace_concurrent_recompilation) { | 191 if (FLAG_trace_concurrent_recompilation) { |
178 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); | 192 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); |
179 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); | 193 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); |
180 } | 194 } |
181 | 195 |
| 196 if (FLAG_trace_osr && FLAG_speculative_concurrent_osr) { |
| 197 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); |
| 198 } |
| 199 |
182 Join(); | 200 Join(); |
183 } | 201 } |
184 | 202 |
185 | 203 |
186 void OptimizingCompilerThread::InstallOptimizedFunctions() { | 204 void OptimizingCompilerThread::InstallOptimizedFunctions() { |
187 ASSERT(!IsOptimizerThread()); | 205 ASSERT(!IsOptimizerThread()); |
188 HandleScope handle_scope(isolate_); | 206 HandleScope handle_scope(isolate_); |
189 OptimizingCompiler* compiler; | 207 OptimizingCompiler* compiler; |
190 while (true) { | 208 while (true) { |
191 { // Memory barrier to ensure marked functions are queued. | 209 { // Memory barrier to ensure marked functions are queued. |
192 LockGuard<Mutex> marked_and_queued(&install_mutex_); | 210 LockGuard<Mutex> marked_and_queued(&install_mutex_); |
193 if (!output_queue_.Dequeue(&compiler)) return; | 211 if (!output_queue_.Dequeue(&compiler)) return; |
194 } | 212 } |
195 Compiler::InstallOptimizedCode(compiler); | 213 Compiler::InstallOptimizedCode(compiler); |
196 } | 214 } |
| 215 |
| 216 // Remove the oldest OSR candidates that are ready so that we |
| 217 // only have limited number of them waiting. |
| 218 if (FLAG_speculative_concurrent_osr) RemoveStaleOSRCandidates(); |
197 } | 219 } |
198 | 220 |
199 | 221 |
200 void OptimizingCompilerThread::QueueForOptimization( | 222 void OptimizingCompilerThread::QueueForOptimization( |
201 OptimizingCompiler* optimizing_compiler) { | 223 OptimizingCompiler* optimizing_compiler) { |
202 ASSERT(IsQueueAvailable()); | 224 ASSERT(IsQueueAvailable()); |
203 ASSERT(!IsOptimizerThread()); | 225 ASSERT(!IsOptimizerThread()); |
204 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); | 226 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); |
205 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); | 227 if (optimizing_compiler->info()->osr_ast_id().IsNone()) { |
| 228 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); |
| 229 } else { |
| 230 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); |
| 231 ASSERT(FLAG_speculative_concurrent_osr); |
| 232 osr_candidates_.Add(optimizing_compiler); |
| 233 osr_attempts_++; |
| 234 } |
206 input_queue_.Enqueue(optimizing_compiler); | 235 input_queue_.Enqueue(optimizing_compiler); |
207 input_queue_semaphore_->Signal(); | 236 input_queue_semaphore_->Signal(); |
208 } | 237 } |
209 | 238 |
210 | 239 |
| 240 OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate( |
| 241 Handle<JSFunction> function, uint32_t osr_pc_offset) { |
| 242 ASSERT(!IsOptimizerThread()); |
| 243 ASSERT(FLAG_speculative_concurrent_osr); |
| 244 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); |
| 245 for (int i = 0; i < ready_for_osr_.length(); i++) { |
| 246 if (ready_for_osr_[i]->osr_pc_offset() == osr_pc_offset && |
| 247 ready_for_osr_[i]->info()->closure().is_identical_to(function)) { |
| 248 osr_hits_++; |
| 249 return ready_for_osr_.Remove(i); |
| 250 } |
| 251 } |
| 252 return NULL; |
| 253 } |
| 254 |
| 255 |
| 256 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, |
| 257 uint32_t osr_pc_offset) { |
| 258 ASSERT(!IsOptimizerThread()); |
| 259 ASSERT(FLAG_speculative_concurrent_osr); |
| 260 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); |
| 261 for (int i = 0; i < osr_candidates_.length(); i++) { |
| 262 if (osr_candidates_[i]->osr_pc_offset() == osr_pc_offset && |
| 263 osr_candidates_[i]->info()->closure().is_identical_to(function)) { |
| 264 return true; |
| 265 } |
| 266 } |
| 267 return false; |
| 268 } |
| 269 |
| 270 |
| 271 void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) { |
| 272 ASSERT(!IsOptimizerThread()); |
| 273 ASSERT(FLAG_speculative_concurrent_osr); |
| 274 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); |
| 275 while (ready_for_osr_.length() > limit) { |
| 276 OptimizingCompiler* compiler = ready_for_osr_.Remove(0); |
| 277 CompilationInfo* throw_away = compiler->info(); |
| 278 if (FLAG_trace_osr) { |
| 279 PrintF(" ** Discarded "); |
| 280 throw_away->closure()->PrintName(); |
| 281 PrintF(" that has been concurrently compiled for OSR at %d.\n", |
| 282 throw_away->osr_ast_id().ToInt()); |
| 283 } |
| 284 delete throw_away; |
| 285 } |
| 286 } |
| 287 |
| 288 |
211 #ifdef DEBUG | 289 #ifdef DEBUG |
212 bool OptimizingCompilerThread::IsOptimizerThread() { | 290 bool OptimizingCompilerThread::IsOptimizerThread() { |
213 if (!FLAG_concurrent_recompilation) return false; | 291 if (!FLAG_concurrent_recompilation) return false; |
214 LockGuard<Mutex> lock_guard(&thread_id_mutex_); | 292 LockGuard<Mutex> lock_guard(&thread_id_mutex_); |
215 return ThreadId::Current().ToInteger() == thread_id_; | 293 return ThreadId::Current().ToInteger() == thread_id_; |
216 } | 294 } |
217 #endif | 295 #endif |
218 | 296 |
219 | 297 |
220 } } // namespace v8::internal | 298 } } // namespace v8::internal |
OLD | NEW |