OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
86 | 86 |
87 CompileNext(); | 87 CompileNext(); |
88 | 88 |
89 if (FLAG_trace_concurrent_recompilation) { | 89 if (FLAG_trace_concurrent_recompilation) { |
90 time_spent_compiling_ += compiling_timer.Elapsed(); | 90 time_spent_compiling_ += compiling_timer.Elapsed(); |
91 } | 91 } |
92 } | 92 } |
93 } | 93 } |
94 | 94 |
95 | 95 |
| 96 RecompileJob* OptimizingCompilerThread::NextInput() { |
| 97 LockGuard<Mutex> access_input_queue_(&input_queue_mutex_); |
| 98 if (input_queue_length_ == 0) return NULL; |
| 99 RecompileJob* job = input_queue_[InputQueueIndex(0)]; |
| 100 ASSERT_NE(NULL, job); |
| 101 input_queue_shift_ = InputQueueIndex(1); |
| 102 input_queue_length_--; |
| 103 return job; |
| 104 } |
| 105 |
| 106 |
96 void OptimizingCompilerThread::CompileNext() { | 107 void OptimizingCompilerThread::CompileNext() { |
97 RecompileJob* job = NULL; | 108 RecompileJob* job = NextInput(); |
98 bool result = input_queue_.Dequeue(&job); | 109 ASSERT_NE(NULL, job); |
99 USE(result); | |
100 ASSERT(result); | |
101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); | |
102 | 110 |
103 // The function may have already been optimized by OSR. Simply continue. | 111 // The function may have already been optimized by OSR. Simply continue. |
104 RecompileJob::Status status = job->OptimizeGraph(); | 112 RecompileJob::Status status = job->OptimizeGraph(); |
105 USE(status); // Prevent an unused-variable error in release mode. | 113 USE(status); // Prevent an unused-variable error in release mode. |
106 ASSERT(status != RecompileJob::FAILED); | 114 ASSERT(status != RecompileJob::FAILED); |
107 | 115 |
108 // The function may have already been optimized by OSR. Simply continue. | 116 // The function may have already been optimized by OSR. Simply continue. |
109 // Use a mutex to make sure that functions marked for install | 117 // Use a mutex to make sure that functions marked for install |
110 // are always also queued. | 118 // are always also queued. |
111 output_queue_.Enqueue(job); | 119 output_queue_.Enqueue(job); |
(...skipping 12 matching lines...) Expand all Loading... |
124 Handle<JSFunction> function = info->closure(); | 132 Handle<JSFunction> function = info->closure(); |
125 function->ReplaceCode(function->shared()->code()); | 133 function->ReplaceCode(function->shared()->code()); |
126 } | 134 } |
127 } | 135 } |
128 delete info; | 136 delete info; |
129 } | 137 } |
130 | 138 |
131 | 139 |
132 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 140 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
133 RecompileJob* job; | 141 RecompileJob* job; |
134 while (input_queue_.Dequeue(&job)) { | 142 while ((job = NextInput())) { |
135 // This should not block, since we have one signal on the input queue | 143 // This should not block, since we have one signal on the input queue |
136 // semaphore corresponding to each element in the input queue. | 144 // semaphore corresponding to each element in the input queue. |
137 input_queue_semaphore_.Wait(); | 145 input_queue_semaphore_.Wait(); |
138 // OSR jobs are dealt with separately. | 146 // OSR jobs are dealt with separately. |
139 if (!job->info()->is_osr()) { | 147 if (!job->info()->is_osr()) { |
140 DisposeRecompileJob(job, restore_function_code); | 148 DisposeRecompileJob(job, restore_function_code); |
141 } | 149 } |
142 } | 150 } |
143 Release_Store(&queue_length_, static_cast<AtomicWord>(0)); | |
144 } | 151 } |
145 | 152 |
146 | 153 |
147 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { | 154 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { |
148 RecompileJob* job; | 155 RecompileJob* job; |
149 while (output_queue_.Dequeue(&job)) { | 156 while (output_queue_.Dequeue(&job)) { |
150 // OSR jobs are dealt with separately. | 157 // OSR jobs are dealt with separately. |
151 if (!job->info()->is_osr()) { | 158 if (!job->info()->is_osr()) { |
152 DisposeRecompileJob(job, restore_function_code); | 159 DisposeRecompileJob(job, restore_function_code); |
153 } | 160 } |
154 } | 161 } |
155 } | 162 } |
156 | 163 |
157 | 164 |
158 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { | 165 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { |
159 RecompileJob* job; | 166 for (int i = 0; i < osr_buffer_capacity_; i++) { |
160 for (int i = 0; i < osr_buffer_size_; i++) { | 167 if (osr_buffer_[i] != NULL) { |
161 job = osr_buffer_[i]; | 168 DisposeRecompileJob(osr_buffer_[i], restore_function_code); |
162 if (job != NULL) DisposeRecompileJob(job, restore_function_code); | 169 osr_buffer_[i] = NULL; |
| 170 } |
163 } | 171 } |
164 osr_cursor_ = 0; | |
165 } | 172 } |
166 | 173 |
167 | 174 |
168 void OptimizingCompilerThread::Flush() { | 175 void OptimizingCompilerThread::Flush() { |
169 ASSERT(!IsOptimizerThread()); | 176 ASSERT(!IsOptimizerThread()); |
170 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); | 177 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); |
171 if (FLAG_block_concurrent_recompilation) Unblock(); | 178 if (FLAG_block_concurrent_recompilation) Unblock(); |
172 input_queue_semaphore_.Signal(); | 179 input_queue_semaphore_.Signal(); |
173 stop_semaphore_.Wait(); | 180 stop_semaphore_.Wait(); |
174 FlushOutputQueue(true); | 181 FlushOutputQueue(true); |
175 if (FLAG_concurrent_osr) FlushOsrBuffer(true); | 182 if (FLAG_concurrent_osr) FlushOsrBuffer(true); |
176 if (FLAG_trace_concurrent_recompilation) { | 183 if (FLAG_trace_concurrent_recompilation) { |
177 PrintF(" ** Flushed concurrent recompilation queues.\n"); | 184 PrintF(" ** Flushed concurrent recompilation queues.\n"); |
178 } | 185 } |
179 } | 186 } |
180 | 187 |
181 | 188 |
182 void OptimizingCompilerThread::Stop() { | 189 void OptimizingCompilerThread::Stop() { |
183 ASSERT(!IsOptimizerThread()); | 190 ASSERT(!IsOptimizerThread()); |
184 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); | 191 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); |
185 if (FLAG_block_concurrent_recompilation) Unblock(); | 192 if (FLAG_block_concurrent_recompilation) Unblock(); |
186 input_queue_semaphore_.Signal(); | 193 input_queue_semaphore_.Signal(); |
187 stop_semaphore_.Wait(); | 194 stop_semaphore_.Wait(); |
188 | 195 |
189 if (FLAG_concurrent_recompilation_delay != 0) { | 196 if (FLAG_concurrent_recompilation_delay != 0) { |
190 // Barrier when loading queue length is not necessary since the write | 197 // At this point the optimizing compiler thread's event loop has stopped. |
191 // happens in CompileNext on the same thread. | 198 // There is no need for a mutex when reading input_queue_length_. |
192 // This is used only for testing. | 199 while (input_queue_length_ > 0) CompileNext(); |
193 while (NoBarrier_Load(&queue_length_) > 0) CompileNext(); | |
194 InstallOptimizedFunctions(); | 200 InstallOptimizedFunctions(); |
195 } else { | 201 } else { |
196 FlushInputQueue(false); | 202 FlushInputQueue(false); |
197 FlushOutputQueue(false); | 203 FlushOutputQueue(false); |
198 } | 204 } |
199 | 205 |
200 if (FLAG_concurrent_osr) FlushOsrBuffer(false); | 206 if (FLAG_concurrent_osr) FlushOsrBuffer(false); |
201 | 207 |
202 if (FLAG_trace_concurrent_recompilation) { | 208 if (FLAG_trace_concurrent_recompilation) { |
203 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); | 209 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); |
(...skipping 28 matching lines...) Expand all Loading... |
232 } else { | 238 } else { |
233 Compiler::InstallOptimizedCode(job); | 239 Compiler::InstallOptimizedCode(job); |
234 } | 240 } |
235 } | 241 } |
236 } | 242 } |
237 | 243 |
238 | 244 |
239 void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) { | 245 void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) { |
240 ASSERT(IsQueueAvailable()); | 246 ASSERT(IsQueueAvailable()); |
241 ASSERT(!IsOptimizerThread()); | 247 ASSERT(!IsOptimizerThread()); |
242 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); | |
243 CompilationInfo* info = job->info(); | 248 CompilationInfo* info = job->info(); |
244 if (info->is_osr()) { | 249 if (info->is_osr()) { |
245 if (FLAG_trace_concurrent_recompilation) { | 250 if (FLAG_trace_concurrent_recompilation) { |
246 PrintF(" ** Queueing "); | 251 PrintF(" ** Queueing "); |
247 info->closure()->PrintName(); | 252 info->closure()->PrintName(); |
248 PrintF(" for concurrent on-stack replacement.\n"); | 253 PrintF(" for concurrent on-stack replacement.\n"); |
249 } | 254 } |
250 AddToOsrBuffer(job); | |
251 osr_attempts_++; | 255 osr_attempts_++; |
252 BackEdgeTable::AddStackCheck(info); | 256 BackEdgeTable::AddStackCheck(info); |
| 257 AddToOsrBuffer(job); |
| 258 // Add job to the front of the input queue. |
| 259 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); |
| 260 ASSERT_LT(input_queue_length_, input_queue_capacity_); |
| 261 // Move shift_ back by one. |
| 262 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); |
| 263 input_queue_[InputQueueIndex(0)] = job; |
| 264 input_queue_length_++; |
253 } else { | 265 } else { |
254 info->closure()->MarkInRecompileQueue(); | 266 info->closure()->MarkInRecompileQueue(); |
| 267 // Add job to the back of the input queue. |
| 268 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); |
| 269 ASSERT_LT(input_queue_length_, input_queue_capacity_); |
| 270 input_queue_[InputQueueIndex(input_queue_length_)] = job; |
| 271 input_queue_length_++; |
255 } | 272 } |
256 input_queue_.Enqueue(job); | |
257 if (FLAG_block_concurrent_recompilation) { | 273 if (FLAG_block_concurrent_recompilation) { |
258 blocked_jobs_++; | 274 blocked_jobs_++; |
259 } else { | 275 } else { |
260 input_queue_semaphore_.Signal(); | 276 input_queue_semaphore_.Signal(); |
261 } | 277 } |
262 } | 278 } |
263 | 279 |
264 | 280 |
265 void OptimizingCompilerThread::Unblock() { | 281 void OptimizingCompilerThread::Unblock() { |
266 ASSERT(!IsOptimizerThread()); | 282 ASSERT(!IsOptimizerThread()); |
267 while (blocked_jobs_ > 0) { | 283 while (blocked_jobs_ > 0) { |
268 input_queue_semaphore_.Signal(); | 284 input_queue_semaphore_.Signal(); |
269 blocked_jobs_--; | 285 blocked_jobs_--; |
270 } | 286 } |
271 } | 287 } |
272 | 288 |
273 | 289 |
274 RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( | 290 RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( |
275 Handle<JSFunction> function, uint32_t osr_pc_offset) { | 291 Handle<JSFunction> function, uint32_t osr_pc_offset) { |
276 ASSERT(!IsOptimizerThread()); | 292 ASSERT(!IsOptimizerThread()); |
277 RecompileJob* result = NULL; | 293 for (int i = 0; i < osr_buffer_capacity_; i++) { |
278 for (int i = 0; i < osr_buffer_size_; i++) { | 294 RecompileJob* current = osr_buffer_[i]; |
279 result = osr_buffer_[i]; | 295 if (current != NULL && |
280 if (result == NULL) continue; | 296 current->IsWaitingForInstall() && |
281 if (result->IsWaitingForInstall() && | 297 current->info()->HasSameOsrEntry(function, osr_pc_offset)) { |
282 result->info()->HasSameOsrEntry(function, osr_pc_offset)) { | |
283 osr_hits_++; | 298 osr_hits_++; |
284 osr_buffer_[i] = NULL; | 299 osr_buffer_[i] = NULL; |
285 return result; | 300 return current; |
286 } | 301 } |
287 } | 302 } |
288 return NULL; | 303 return NULL; |
289 } | 304 } |
290 | 305 |
291 | 306 |
292 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, | 307 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, |
293 uint32_t osr_pc_offset) { | 308 uint32_t osr_pc_offset) { |
294 ASSERT(!IsOptimizerThread()); | 309 ASSERT(!IsOptimizerThread()); |
295 for (int i = 0; i < osr_buffer_size_; i++) { | 310 for (int i = 0; i < osr_buffer_capacity_; i++) { |
296 if (osr_buffer_[i] != NULL && | 311 RecompileJob* current = osr_buffer_[i]; |
297 osr_buffer_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) { | 312 if (current != NULL && |
298 return !osr_buffer_[i]->IsWaitingForInstall(); | 313 current->info()->HasSameOsrEntry(function, osr_pc_offset)) { |
| 314 return !current->IsWaitingForInstall(); |
299 } | 315 } |
300 } | 316 } |
301 return false; | 317 return false; |
302 } | 318 } |
303 | 319 |
304 | 320 |
305 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { | 321 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { |
306 ASSERT(!IsOptimizerThread()); | 322 ASSERT(!IsOptimizerThread()); |
307 for (int i = 0; i < osr_buffer_size_; i++) { | 323 for (int i = 0; i < osr_buffer_capacity_; i++) { |
308 if (osr_buffer_[i] != NULL && | 324 RecompileJob* current = osr_buffer_[i]; |
309 *osr_buffer_[i]->info()->closure() == function) { | 325 if (current != NULL && *current->info()->closure() == function) { |
310 return !osr_buffer_[i]->IsWaitingForInstall(); | 326 return !current->IsWaitingForInstall(); |
311 } | 327 } |
312 } | 328 } |
313 return false; | 329 return false; |
314 } | 330 } |
315 | 331 |
316 | 332 |
317 void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) { | 333 void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) { |
318 ASSERT(!IsOptimizerThread()); | 334 ASSERT(!IsOptimizerThread()); |
319 // Store into next empty slot or replace next stale OSR job that's waiting | 335 // Find the next slot that is empty or has a stale job. |
320 // in vain. Dispose in the latter case. | |
321 RecompileJob* stale; | |
322 while (true) { | 336 while (true) { |
323 stale = osr_buffer_[osr_cursor_]; | 337 RecompileJob* stale = osr_buffer_[osr_buffer_cursor_]; |
324 if (stale == NULL) break; | 338 if (stale == NULL || stale->IsWaitingForInstall()) break; |
325 if (stale->IsWaitingForInstall()) { | 339 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; |
326 CompilationInfo* info = stale->info(); | |
327 if (FLAG_trace_osr) { | |
328 PrintF("[COSR - Discarded "); | |
329 info->closure()->PrintName(); | |
330 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); | |
331 } | |
332 DisposeRecompileJob(stale, false); | |
333 break; | |
334 } | |
335 AdvanceOsrCursor(); | |
336 } | 340 } |
337 | 341 |
338 osr_buffer_[osr_cursor_] = job; | 342 // Add to found slot and dispose the evicted job. |
339 AdvanceOsrCursor(); | 343 RecompileJob* evicted = osr_buffer_[osr_buffer_cursor_]; |
| 344 if (evicted != NULL) { |
| 345 ASSERT(evicted->IsWaitingForInstall()); |
| 346 CompilationInfo* info = evicted->info(); |
| 347 if (FLAG_trace_osr) { |
| 348 PrintF("[COSR - Discarded "); |
| 349 info->closure()->PrintName(); |
| 350 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); |
| 351 } |
| 352 DisposeRecompileJob(evicted, false); |
| 353 } |
| 354 osr_buffer_[osr_buffer_cursor_] = job; |
| 355 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; |
340 } | 356 } |
341 | 357 |
342 | 358 |
343 #ifdef DEBUG | 359 #ifdef DEBUG |
344 bool OptimizingCompilerThread::IsOptimizerThread() { | 360 bool OptimizingCompilerThread::IsOptimizerThread() { |
345 if (!FLAG_concurrent_recompilation) return false; | 361 if (!FLAG_concurrent_recompilation) return false; |
346 LockGuard<Mutex> lock_guard(&thread_id_mutex_); | 362 LockGuard<Mutex> lock_guard(&thread_id_mutex_); |
347 return ThreadId::Current().ToInteger() == thread_id_; | 363 return ThreadId::Current().ToInteger() == thread_id_; |
348 } | 364 } |
349 #endif | 365 #endif |
350 | 366 |
351 | 367 |
352 } } // namespace v8::internal | 368 } } // namespace v8::internal |
OLD | NEW |