Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(173)

Side by Side Diff: src/optimizing-compiler-thread.cc

Issue 23710014: Introduce concurrent on-stack replacement. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: addressed comments Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/optimizing-compiler-thread.h ('k') | src/platform/mutex.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); 101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
102 102
103 // The function may have already been optimized by OSR. Simply continue. 103 // The function may have already been optimized by OSR. Simply continue.
104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); 104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
105 USE(status); // Prevent an unused-variable error in release mode. 105 USE(status); // Prevent an unused-variable error in release mode.
106 ASSERT(status != OptimizingCompiler::FAILED); 106 ASSERT(status != OptimizingCompiler::FAILED);
107 107
108 // The function may have already been optimized by OSR. Simply continue. 108 // The function may have already been optimized by OSR. Simply continue.
109 // Use a mutex to make sure that functions marked for install 109 // Use a mutex to make sure that functions marked for install
110 // are always also queued. 110 // are always also queued.
111 LockGuard<Mutex> mark_and_queue(&install_mutex_); 111 if (!optimizing_compiler->info()->osr_ast_id().IsNone()) {
112 { Heap::RelocationLock relocation_lock(isolate_->heap()); 112 ASSERT(FLAG_concurrent_osr);
113 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
114 osr_candidates_.RemoveElement(optimizing_compiler);
115 ready_for_osr_.Add(optimizing_compiler);
116 } else {
117 LockGuard<Mutex> mark_and_queue(&install_mutex_);
118 Heap::RelocationLock relocation_lock(isolate_->heap());
113 AllowHandleDereference ahd; 119 AllowHandleDereference ahd;
114 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); 120 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
121 output_queue_.Enqueue(optimizing_compiler);
115 } 122 }
116 output_queue_.Enqueue(optimizing_compiler);
117 } 123 }
118 124
119 125
120 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { 126 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
121 OptimizingCompiler* optimizing_compiler; 127 OptimizingCompiler* optimizing_compiler;
122 // The optimizing compiler is allocated in the CompilationInfo's zone. 128 // The optimizing compiler is allocated in the CompilationInfo's zone.
123 while (input_queue_.Dequeue(&optimizing_compiler)) { 129 while (input_queue_.Dequeue(&optimizing_compiler)) {
124 // This should not block, since we have one signal on the input queue 130 // This should not block, since we have one signal on the input queue
125 // semaphore corresponding to each element in the input queue. 131 // semaphore corresponding to each element in the input queue.
126 input_queue_semaphore_->Wait(); 132 input_queue_semaphore_->Wait();
(...skipping 11 matching lines...) Expand all
138 OptimizingCompiler* optimizing_compiler; 144 OptimizingCompiler* optimizing_compiler;
139 // The optimizing compiler is allocated in the CompilationInfo's zone. 145 // The optimizing compiler is allocated in the CompilationInfo's zone.
140 while (output_queue_.Dequeue(&optimizing_compiler)) { 146 while (output_queue_.Dequeue(&optimizing_compiler)) {
141 CompilationInfo* info = optimizing_compiler->info(); 147 CompilationInfo* info = optimizing_compiler->info();
142 if (restore_function_code) { 148 if (restore_function_code) {
143 Handle<JSFunction> function = info->closure(); 149 Handle<JSFunction> function = info->closure();
144 function->ReplaceCode(function->shared()->code()); 150 function->ReplaceCode(function->shared()->code());
145 } 151 }
146 delete info; 152 delete info;
147 } 153 }
154
155 osr_candidates_.Clear();
156 RemoveStaleOSRCandidates(0);
148 } 157 }
149 158
150 159
151 void OptimizingCompilerThread::Flush() { 160 void OptimizingCompilerThread::Flush() {
152 ASSERT(!IsOptimizerThread()); 161 ASSERT(!IsOptimizerThread());
153 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); 162 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
154 input_queue_semaphore_->Signal(); 163 input_queue_semaphore_->Signal();
155 stop_semaphore_->Wait(); 164 stop_semaphore_->Wait();
156 FlushOutputQueue(true); 165 FlushOutputQueue(true);
157 } 166 }
(...skipping 14 matching lines...) Expand all
172 } else { 181 } else {
173 FlushInputQueue(false); 182 FlushInputQueue(false);
174 FlushOutputQueue(false); 183 FlushOutputQueue(false);
175 } 184 }
176 185
177 if (FLAG_trace_concurrent_recompilation) { 186 if (FLAG_trace_concurrent_recompilation) {
178 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); 187 double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
179 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); 188 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
180 } 189 }
181 190
191 if (FLAG_trace_osr && FLAG_concurrent_osr) {
192 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
193 }
194
182 Join(); 195 Join();
183 } 196 }
184 197
185 198
186 void OptimizingCompilerThread::InstallOptimizedFunctions() { 199 void OptimizingCompilerThread::InstallOptimizedFunctions() {
187 ASSERT(!IsOptimizerThread()); 200 ASSERT(!IsOptimizerThread());
188 HandleScope handle_scope(isolate_); 201 HandleScope handle_scope(isolate_);
189 OptimizingCompiler* compiler; 202 OptimizingCompiler* compiler;
190 while (true) { 203 while (true) {
191 { // Memory barrier to ensure marked functions are queued. 204 { // Memory barrier to ensure marked functions are queued.
192 LockGuard<Mutex> marked_and_queued(&install_mutex_); 205 LockGuard<Mutex> marked_and_queued(&install_mutex_);
193 if (!output_queue_.Dequeue(&compiler)) return; 206 if (!output_queue_.Dequeue(&compiler)) return;
194 } 207 }
195 Compiler::InstallOptimizedCode(compiler); 208 Compiler::InstallOptimizedCode(compiler);
196 } 209 }
210
211 // Remove the oldest OSR candidates that are ready so that we
212 // only have limited number of them waiting.
213 if (FLAG_concurrent_osr) RemoveStaleOSRCandidates();
197 } 214 }
198 215
199 216
200 void OptimizingCompilerThread::QueueForOptimization( 217 void OptimizingCompilerThread::QueueForOptimization(
201 OptimizingCompiler* optimizing_compiler) { 218 OptimizingCompiler* optimizing_compiler) {
202 ASSERT(IsQueueAvailable()); 219 ASSERT(IsQueueAvailable());
203 ASSERT(!IsOptimizerThread()); 220 ASSERT(!IsOptimizerThread());
204 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); 221 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
205 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); 222 if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
223 optimizing_compiler->info()->closure()->MarkInRecompileQueue();
224 } else {
225 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
226 osr_candidates_.Add(optimizing_compiler);
227 osr_attempts_++;
228 }
206 input_queue_.Enqueue(optimizing_compiler); 229 input_queue_.Enqueue(optimizing_compiler);
207 input_queue_semaphore_->Signal(); 230 input_queue_semaphore_->Signal();
208 } 231 }
209 232
210 233
234 OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
235 Handle<JSFunction> function, uint32_t osr_pc_offset) {
236 ASSERT(!IsOptimizerThread());
237 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
238 for (int i = 0; i < ready_for_osr_.length(); i++) {
239 if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
240 osr_hits_++;
241 return ready_for_osr_.Remove(i);
242 }
243 }
244 return NULL;
245 }
246
247
248 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
249 uint32_t osr_pc_offset) {
250 ASSERT(!IsOptimizerThread());
251 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
252 for (int i = 0; i < osr_candidates_.length(); i++) {
253 if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
254 return true;
255 }
256 }
257 return false;
258 }
259
260
261 void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
262 ASSERT(!IsOptimizerThread());
263 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
264 while (ready_for_osr_.length() > limit) {
265 OptimizingCompiler* compiler = ready_for_osr_.Remove(0);
266 CompilationInfo* throw_away = compiler->info();
267 if (FLAG_trace_osr) {
268 PrintF("[COSR - Discarded ");
269 throw_away->closure()->PrintName();
270 PrintF(", AST id %d]\n",
271 throw_away->osr_ast_id().ToInt());
272 }
273 delete throw_away;
274 }
275 }
276
277
211 #ifdef DEBUG 278 #ifdef DEBUG
212 bool OptimizingCompilerThread::IsOptimizerThread() { 279 bool OptimizingCompilerThread::IsOptimizerThread() {
213 if (!FLAG_concurrent_recompilation) return false; 280 if (!FLAG_concurrent_recompilation) return false;
214 LockGuard<Mutex> lock_guard(&thread_id_mutex_); 281 LockGuard<Mutex> lock_guard(&thread_id_mutex_);
215 return ThreadId::Current().ToInteger() == thread_id_; 282 return ThreadId::Current().ToInteger() == thread_id_;
216 } 283 }
217 #endif 284 #endif
218 285
219 286
220 } } // namespace v8::internal 287 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/optimizing-compiler-thread.h ('k') | src/platform/mutex.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698