Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(144)

Side by Side Diff: src/optimizing-compiler-thread.cc

Issue 25505002: Improve queuing for concurrent OSR. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: inlined datastructures at call sites. Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
86 86
87 CompileNext(); 87 CompileNext();
88 88
89 if (FLAG_trace_concurrent_recompilation) { 89 if (FLAG_trace_concurrent_recompilation) {
90 time_spent_compiling_ += compiling_timer.Elapsed(); 90 time_spent_compiling_ += compiling_timer.Elapsed();
91 } 91 }
92 } 92 }
93 } 93 }
94 94
95 95
96 RecompileJob* OptimizingCompilerThread::NextInput() {
97 LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
98 if (input_queue_length_ == 0) return NULL;
99 RecompileJob* job = input_queue_[InputQueueIndex(0)];
100 ASSERT_NE(NULL, job);
101 input_queue_shift_ = InputQueueIndex(1);
102 input_queue_length_--;
103 return job;
104 }
105
106
96 void OptimizingCompilerThread::CompileNext() { 107 void OptimizingCompilerThread::CompileNext() {
97 RecompileJob* job = NULL; 108 RecompileJob* job = NextInput();
98 bool result = input_queue_.Dequeue(&job); 109 ASSERT_NE(NULL, job);
99 USE(result);
100 ASSERT(result);
101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
102 110
103 // The function may have already been optimized by OSR. Simply continue. 111 // The function may have already been optimized by OSR. Simply continue.
104 RecompileJob::Status status = job->OptimizeGraph(); 112 RecompileJob::Status status = job->OptimizeGraph();
105 USE(status); // Prevent an unused-variable error in release mode. 113 USE(status); // Prevent an unused-variable error in release mode.
106 ASSERT(status != RecompileJob::FAILED); 114 ASSERT(status != RecompileJob::FAILED);
107 115
108 // The function may have already been optimized by OSR. Simply continue. 116 // The function may have already been optimized by OSR. Simply continue.
109 // Use a mutex to make sure that functions marked for install 117 // Use a mutex to make sure that functions marked for install
110 // are always also queued. 118 // are always also queued.
111 output_queue_.Enqueue(job); 119 output_queue_.Enqueue(job);
(...skipping 12 matching lines...) Expand all
124 Handle<JSFunction> function = info->closure(); 132 Handle<JSFunction> function = info->closure();
125 function->ReplaceCode(function->shared()->code()); 133 function->ReplaceCode(function->shared()->code());
126 } 134 }
127 } 135 }
128 delete info; 136 delete info;
129 } 137 }
130 138
131 139
132 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { 140 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
133 RecompileJob* job; 141 RecompileJob* job;
134 while (input_queue_.Dequeue(&job)) { 142 while ((job = NextInput())) {
135 // This should not block, since we have one signal on the input queue 143 // This should not block, since we have one signal on the input queue
136 // semaphore corresponding to each element in the input queue. 144 // semaphore corresponding to each element in the input queue.
137 input_queue_semaphore_.Wait(); 145 input_queue_semaphore_.Wait();
138 // OSR jobs are dealt with separately. 146 // OSR jobs are dealt with separately.
139 if (!job->info()->is_osr()) { 147 if (!job->info()->is_osr()) {
140 DisposeRecompileJob(job, restore_function_code); 148 DisposeRecompileJob(job, restore_function_code);
141 } 149 }
142 } 150 }
143 Release_Store(&queue_length_, static_cast<AtomicWord>(0));
144 } 151 }
145 152
146 153
147 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { 154 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
148 RecompileJob* job; 155 RecompileJob* job;
149 while (output_queue_.Dequeue(&job)) { 156 while (output_queue_.Dequeue(&job)) {
150 // OSR jobs are dealt with separately. 157 // OSR jobs are dealt with separately.
151 if (!job->info()->is_osr()) { 158 if (!job->info()->is_osr()) {
152 DisposeRecompileJob(job, restore_function_code); 159 DisposeRecompileJob(job, restore_function_code);
153 } 160 }
154 } 161 }
155 } 162 }
156 163
157 164
158 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { 165 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
159 RecompileJob* job; 166 for (int i = 0; i < osr_buffer_capacity_; i++) {
160 for (int i = 0; i < osr_buffer_size_; i++) { 167 if (osr_buffer_[i] != NULL) {
161 job = osr_buffer_[i]; 168 DisposeRecompileJob(osr_buffer_[i], restore_function_code);
162 if (job != NULL) DisposeRecompileJob(job, restore_function_code); 169 osr_buffer_[i] = NULL;
170 }
163 } 171 }
164 osr_cursor_ = 0;
165 } 172 }
166 173
167 174
168 void OptimizingCompilerThread::Flush() { 175 void OptimizingCompilerThread::Flush() {
169 ASSERT(!IsOptimizerThread()); 176 ASSERT(!IsOptimizerThread());
170 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); 177 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
171 input_queue_semaphore_.Signal(); 178 input_queue_semaphore_.Signal();
172 stop_semaphore_.Wait(); 179 stop_semaphore_.Wait();
173 FlushOutputQueue(true); 180 FlushOutputQueue(true);
174 if (FLAG_concurrent_osr) FlushOsrBuffer(true); 181 if (FLAG_concurrent_osr) FlushOsrBuffer(true);
175 if (FLAG_trace_concurrent_recompilation) { 182 if (FLAG_trace_concurrent_recompilation) {
176 PrintF(" ** Flushed concurrent recompilation queues.\n"); 183 PrintF(" ** Flushed concurrent recompilation queues.\n");
177 } 184 }
178 } 185 }
179 186
180 187
181 void OptimizingCompilerThread::Stop() { 188 void OptimizingCompilerThread::Stop() {
182 ASSERT(!IsOptimizerThread()); 189 ASSERT(!IsOptimizerThread());
183 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); 190 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
184 input_queue_semaphore_.Signal(); 191 input_queue_semaphore_.Signal();
185 stop_semaphore_.Wait(); 192 stop_semaphore_.Wait();
186 193
187 if (FLAG_concurrent_recompilation_delay != 0) { 194 if (FLAG_concurrent_recompilation_delay != 0) {
188 // Barrier when loading queue length is not necessary since the write 195 // At this point the optimizing compiler thread's event loop has stopped.
189 // happens in CompileNext on the same thread. 196 // There is no need for a mutex when reading input_queue_length_.
190 // This is used only for testing. 197 while (input_queue_length_ > 0) CompileNext();
191 while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
192 InstallOptimizedFunctions(); 198 InstallOptimizedFunctions();
193 } else { 199 } else {
194 FlushInputQueue(false); 200 FlushInputQueue(false);
195 FlushOutputQueue(false); 201 FlushOutputQueue(false);
196 } 202 }
197 203
198 if (FLAG_concurrent_osr) FlushOsrBuffer(false); 204 if (FLAG_concurrent_osr) FlushOsrBuffer(false);
199 205
200 if (FLAG_trace_concurrent_recompilation) { 206 if (FLAG_trace_concurrent_recompilation) {
201 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); 207 double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
(...skipping 28 matching lines...) Expand all
230 } else { 236 } else {
231 Compiler::InstallOptimizedCode(job); 237 Compiler::InstallOptimizedCode(job);
232 } 238 }
233 } 239 }
234 } 240 }
235 241
236 242
237 void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) { 243 void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
238 ASSERT(IsQueueAvailable()); 244 ASSERT(IsQueueAvailable());
239 ASSERT(!IsOptimizerThread()); 245 ASSERT(!IsOptimizerThread());
240 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
241 CompilationInfo* info = job->info(); 246 CompilationInfo* info = job->info();
242 if (info->is_osr()) { 247 if (info->is_osr()) {
243 if (FLAG_trace_concurrent_recompilation) { 248 if (FLAG_trace_concurrent_recompilation) {
244 PrintF(" ** Queueing "); 249 PrintF(" ** Queueing ");
245 info->closure()->PrintName(); 250 info->closure()->PrintName();
246 PrintF(" for concurrent on-stack replacement.\n"); 251 PrintF(" for concurrent on-stack replacement.\n");
247 } 252 }
248 AddToOsrBuffer(job);
249 osr_attempts_++; 253 osr_attempts_++;
250 BackEdgeTable::AddStackCheck(info); 254 BackEdgeTable::AddStackCheck(info);
255 AddToOsrBuffer(job);
256 // Add job to the front of the input queue.
257 LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
258 ASSERT_LT(input_queue_length_, input_queue_capacity_);
259 // Move shift_ back by one.
260 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
261 input_queue_[InputQueueIndex(0)] = job;
262 input_queue_length_++;
251 } else { 263 } else {
252 info->closure()->MarkInRecompileQueue(); 264 info->closure()->MarkInRecompileQueue();
265 // Add job to the back of the input queue.
266 LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
267 ASSERT_LT(input_queue_length_, input_queue_capacity_);
268 input_queue_[InputQueueIndex(input_queue_length_)] = job;
269 input_queue_length_++;
253 } 270 }
254 input_queue_.Enqueue(job);
255 input_queue_semaphore_.Signal(); 271 input_queue_semaphore_.Signal();
256 } 272 }
257 273
258 274
259 RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( 275 RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
260 Handle<JSFunction> function, uint32_t osr_pc_offset) { 276 Handle<JSFunction> function, uint32_t osr_pc_offset) {
261 ASSERT(!IsOptimizerThread()); 277 ASSERT(!IsOptimizerThread());
262 RecompileJob* result = NULL; 278 for (int i = 0; i < osr_buffer_capacity_; i++) {
263 for (int i = 0; i < osr_buffer_size_; i++) { 279 RecompileJob* current = osr_buffer_[i];
264 result = osr_buffer_[i]; 280 if (current != NULL &&
265 if (result == NULL) continue; 281 current->IsWaitingForInstall() &&
266 if (result->IsWaitingForInstall() && 282 current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
267 result->info()->HasSameOsrEntry(function, osr_pc_offset)) {
268 osr_hits_++; 283 osr_hits_++;
269 osr_buffer_[i] = NULL; 284 osr_buffer_[i] = NULL;
270 return result; 285 return current;
271 } 286 }
272 } 287 }
273 return NULL; 288 return NULL;
274 } 289 }
275 290
276 291
277 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, 292 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
278 uint32_t osr_pc_offset) { 293 uint32_t osr_pc_offset) {
279 ASSERT(!IsOptimizerThread()); 294 ASSERT(!IsOptimizerThread());
280 for (int i = 0; i < osr_buffer_size_; i++) { 295 for (int i = 0; i < osr_buffer_capacity_; i++) {
281 if (osr_buffer_[i] != NULL && 296 RecompileJob* current = osr_buffer_[i];
282 osr_buffer_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) { 297 if (current != NULL &&
283 return !osr_buffer_[i]->IsWaitingForInstall(); 298 current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
299 return !current->IsWaitingForInstall();
284 } 300 }
285 } 301 }
286 return false; 302 return false;
287 } 303 }
288 304
289 305
290 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { 306 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
291 ASSERT(!IsOptimizerThread()); 307 ASSERT(!IsOptimizerThread());
292 for (int i = 0; i < osr_buffer_size_; i++) { 308 for (int i = 0; i < osr_buffer_capacity_; i++) {
293 if (osr_buffer_[i] != NULL && 309 RecompileJob* current = osr_buffer_[i];
294 *osr_buffer_[i]->info()->closure() == function) { 310 if (current != NULL && *current->info()->closure() == function) {
295 return !osr_buffer_[i]->IsWaitingForInstall(); 311 return !current->IsWaitingForInstall();
296 } 312 }
297 } 313 }
298 return false; 314 return false;
299 } 315 }
300 316
301 317
302 void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) { 318 void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
303 ASSERT(!IsOptimizerThread()); 319 ASSERT(!IsOptimizerThread());
304 // Store into next empty slot or replace next stale OSR job that's waiting 320 // Find the next slot that is empty or has a stale job.
305 // in vain. Dispose in the latter case.
306 RecompileJob* stale;
307 while (true) { 321 while (true) {
308 stale = osr_buffer_[osr_cursor_]; 322 RecompileJob* stale = osr_buffer_[osr_buffer_cursor_];
309 if (stale == NULL) break; 323 if (stale == NULL || stale->IsWaitingForInstall()) break;
310 if (stale->IsWaitingForInstall()) { 324 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
311 CompilationInfo* info = stale->info();
312 if (FLAG_trace_osr) {
313 PrintF("[COSR - Discarded ");
314 info->closure()->PrintName();
315 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
316 }
317 DisposeRecompileJob(stale, false);
318 break;
319 }
320 AdvanceOsrCursor();
321 } 325 }
322 326
323 osr_buffer_[osr_cursor_] = job; 327 // Add to found slot and dispose the evicted job.
324 AdvanceOsrCursor(); 328 RecompileJob* evicted = osr_buffer_[osr_buffer_cursor_];
329 if (evicted != NULL) {
330 ASSERT(evicted->IsWaitingForInstall());
331 CompilationInfo* info = evicted->info();
332 if (FLAG_trace_osr) {
333 PrintF("[COSR - Discarded ");
334 info->closure()->PrintName();
335 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
336 }
337 DisposeRecompileJob(evicted, false);
338 }
339 osr_buffer_[osr_buffer_cursor_] = job;
340 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
325 } 341 }
326 342
327 343
328 #ifdef DEBUG 344 #ifdef DEBUG
329 bool OptimizingCompilerThread::IsOptimizerThread() { 345 bool OptimizingCompilerThread::IsOptimizerThread() {
330 if (!FLAG_concurrent_recompilation) return false; 346 if (!FLAG_concurrent_recompilation) return false;
331 LockGuard<Mutex> lock_guard(&thread_id_mutex_); 347 LockGuard<Mutex> lock_guard(&thread_id_mutex_);
332 return ThreadId::Current().ToInteger() == thread_id_; 348 return ThreadId::Current().ToInteger() == thread_id_;
333 } 349 }
334 #endif 350 #endif
335 351
336 352
337 } } // namespace v8::internal 353 } } // namespace v8::internal
OLDNEW
« src/optimizing-compiler-thread.h ('K') | « src/optimizing-compiler-thread.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698