| OLD | NEW |
| (Empty) |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/optimizing-compile-dispatcher.h" | |
| 6 | |
| 7 #include "src/v8.h" | |
| 8 | |
| 9 #include "src/base/atomicops.h" | |
| 10 #include "src/full-codegen.h" | |
| 11 #include "src/hydrogen.h" | |
| 12 #include "src/isolate.h" | |
| 13 | |
| 14 namespace v8 { | |
| 15 namespace internal { | |
| 16 | |
| 17 namespace { | |
| 18 | |
| 19 void DisposeOptimizedCompileJob(OptimizedCompileJob* job, | |
| 20 bool restore_function_code) { | |
| 21 // The recompile job is allocated in the CompilationInfo's zone. | |
| 22 CompilationInfo* info = job->info(); | |
| 23 if (restore_function_code) { | |
| 24 if (info->is_osr()) { | |
| 25 if (!job->IsWaitingForInstall()) { | |
| 26 // Remove stack check that guards OSR entry on original code. | |
| 27 Handle<Code> code = info->unoptimized_code(); | |
| 28 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); | |
| 29 BackEdgeTable::RemoveStackCheck(code, offset); | |
| 30 } | |
| 31 } else { | |
| 32 Handle<JSFunction> function = info->closure(); | |
| 33 function->ReplaceCode(function->shared()->code()); | |
| 34 } | |
| 35 } | |
| 36 delete info; | |
| 37 } | |
| 38 | |
| 39 } // namespace | |
| 40 | |
| 41 | |
| 42 class OptimizingCompileDispatcher::CompileTask : public v8::Task { | |
| 43 public: | |
| 44 explicit CompileTask(Isolate* isolate) : isolate_(isolate) { | |
| 45 OptimizingCompileDispatcher* dispatcher = | |
| 46 isolate_->optimizing_compile_dispatcher(); | |
| 47 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_); | |
| 48 ++dispatcher->ref_count_; | |
| 49 } | |
| 50 | |
| 51 virtual ~CompileTask() {} | |
| 52 | |
| 53 private: | |
| 54 // v8::Task overrides. | |
| 55 void Run() OVERRIDE { | |
| 56 DisallowHeapAllocation no_allocation; | |
| 57 DisallowHandleAllocation no_handles; | |
| 58 DisallowHandleDereference no_deref; | |
| 59 | |
| 60 OptimizingCompileDispatcher* dispatcher = | |
| 61 isolate_->optimizing_compile_dispatcher(); | |
| 62 { | |
| 63 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); | |
| 64 | |
| 65 if (dispatcher->recompilation_delay_ != 0) { | |
| 66 base::OS::Sleep(dispatcher->recompilation_delay_); | |
| 67 } | |
| 68 | |
| 69 dispatcher->CompileNext(dispatcher->NextInput(true)); | |
| 70 } | |
| 71 { | |
| 72 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_); | |
| 73 if (--dispatcher->ref_count_ == 0) { | |
| 74 dispatcher->ref_count_zero_.NotifyOne(); | |
| 75 } | |
| 76 } | |
| 77 } | |
| 78 | |
| 79 Isolate* isolate_; | |
| 80 | |
| 81 DISALLOW_COPY_AND_ASSIGN(CompileTask); | |
| 82 }; | |
| 83 | |
| 84 | |
| 85 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() { | |
| 86 #ifdef DEBUG | |
| 87 { | |
| 88 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); | |
| 89 DCHECK_EQ(0, ref_count_); | |
| 90 } | |
| 91 #endif | |
| 92 DCHECK_EQ(0, input_queue_length_); | |
| 93 DeleteArray(input_queue_); | |
| 94 if (FLAG_concurrent_osr) { | |
| 95 #ifdef DEBUG | |
| 96 for (int i = 0; i < osr_buffer_capacity_; i++) { | |
| 97 CHECK_NULL(osr_buffer_[i]); | |
| 98 } | |
| 99 #endif | |
| 100 DeleteArray(osr_buffer_); | |
| 101 } | |
| 102 } | |
| 103 | |
| 104 | |
| 105 OptimizedCompileJob* OptimizingCompileDispatcher::NextInput( | |
| 106 bool check_if_flushing) { | |
| 107 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); | |
| 108 if (input_queue_length_ == 0) return NULL; | |
| 109 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; | |
| 110 DCHECK_NOT_NULL(job); | |
| 111 input_queue_shift_ = InputQueueIndex(1); | |
| 112 input_queue_length_--; | |
| 113 if (check_if_flushing) { | |
| 114 if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) { | |
| 115 if (!job->info()->is_osr()) { | |
| 116 AllowHandleDereference allow_handle_dereference; | |
| 117 DisposeOptimizedCompileJob(job, true); | |
| 118 } | |
| 119 return NULL; | |
| 120 } | |
| 121 } | |
| 122 return job; | |
| 123 } | |
| 124 | |
| 125 | |
| 126 void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) { | |
| 127 if (!job) return; | |
| 128 | |
| 129 // The function may have already been optimized by OSR. Simply continue. | |
| 130 OptimizedCompileJob::Status status = job->OptimizeGraph(); | |
| 131 USE(status); // Prevent an unused-variable error in release mode. | |
| 132 DCHECK(status != OptimizedCompileJob::FAILED); | |
| 133 | |
| 134 // The function may have already been optimized by OSR. Simply continue. | |
| 135 // Use a mutex to make sure that functions marked for install | |
| 136 // are always also queued. | |
| 137 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_); | |
| 138 output_queue_.push(job); | |
| 139 isolate_->stack_guard()->RequestInstallCode(); | |
| 140 } | |
| 141 | |
| 142 | |
| 143 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) { | |
| 144 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_); | |
| 145 while (!output_queue_.empty()) { | |
| 146 OptimizedCompileJob* job = output_queue_.front(); | |
| 147 output_queue_.pop(); | |
| 148 | |
| 149 // OSR jobs are dealt with separately. | |
| 150 if (!job->info()->is_osr()) { | |
| 151 DisposeOptimizedCompileJob(job, restore_function_code); | |
| 152 } | |
| 153 } | |
| 154 } | |
| 155 | |
| 156 | |
| 157 void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) { | |
| 158 for (int i = 0; i < osr_buffer_capacity_; i++) { | |
| 159 if (osr_buffer_[i] != NULL) { | |
| 160 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code); | |
| 161 osr_buffer_[i] = NULL; | |
| 162 } | |
| 163 } | |
| 164 } | |
| 165 | |
| 166 | |
| 167 void OptimizingCompileDispatcher::Flush() { | |
| 168 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH)); | |
| 169 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 170 { | |
| 171 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); | |
| 172 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_); | |
| 173 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE)); | |
| 174 } | |
| 175 FlushOutputQueue(true); | |
| 176 if (FLAG_concurrent_osr) FlushOsrBuffer(true); | |
| 177 if (FLAG_trace_concurrent_recompilation) { | |
| 178 PrintF(" ** Flushed concurrent recompilation queues.\n"); | |
| 179 } | |
| 180 } | |
| 181 | |
| 182 | |
| 183 void OptimizingCompileDispatcher::Stop() { | |
| 184 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH)); | |
| 185 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 186 { | |
| 187 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); | |
| 188 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_); | |
| 189 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE)); | |
| 190 } | |
| 191 | |
| 192 if (recompilation_delay_ != 0) { | |
| 193 // At this point the optimizing compiler thread's event loop has stopped. | |
| 194 // There is no need for a mutex when reading input_queue_length_. | |
| 195 while (input_queue_length_ > 0) CompileNext(NextInput()); | |
| 196 InstallOptimizedFunctions(); | |
| 197 } else { | |
| 198 FlushOutputQueue(false); | |
| 199 } | |
| 200 | |
| 201 if (FLAG_concurrent_osr) FlushOsrBuffer(false); | |
| 202 | |
| 203 if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) && | |
| 204 FLAG_concurrent_osr) { | |
| 205 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); | |
| 206 } | |
| 207 } | |
| 208 | |
| 209 | |
| 210 void OptimizingCompileDispatcher::InstallOptimizedFunctions() { | |
| 211 HandleScope handle_scope(isolate_); | |
| 212 | |
| 213 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_); | |
| 214 while (!output_queue_.empty()) { | |
| 215 OptimizedCompileJob* job = output_queue_.front(); | |
| 216 output_queue_.pop(); | |
| 217 CompilationInfo* info = job->info(); | |
| 218 Handle<JSFunction> function(*info->closure()); | |
| 219 if (info->is_osr()) { | |
| 220 if (FLAG_trace_osr) { | |
| 221 PrintF("[COSR - "); | |
| 222 function->ShortPrint(); | |
| 223 PrintF(" is ready for install and entry at AST id %d]\n", | |
| 224 info->osr_ast_id().ToInt()); | |
| 225 } | |
| 226 job->WaitForInstall(); | |
| 227 // Remove stack check that guards OSR entry on original code. | |
| 228 Handle<Code> code = info->unoptimized_code(); | |
| 229 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); | |
| 230 BackEdgeTable::RemoveStackCheck(code, offset); | |
| 231 } else { | |
| 232 if (function->IsOptimized()) { | |
| 233 if (FLAG_trace_concurrent_recompilation) { | |
| 234 PrintF(" ** Aborting compilation for "); | |
| 235 function->ShortPrint(); | |
| 236 PrintF(" as it has already been optimized.\n"); | |
| 237 } | |
| 238 DisposeOptimizedCompileJob(job, false); | |
| 239 } else { | |
| 240 Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job); | |
| 241 function->ReplaceCode(code.is_null() ? function->shared()->code() | |
| 242 : *code); | |
| 243 } | |
| 244 } | |
| 245 } | |
| 246 } | |
| 247 | |
| 248 | |
| 249 void OptimizingCompileDispatcher::QueueForOptimization( | |
| 250 OptimizedCompileJob* job) { | |
| 251 DCHECK(IsQueueAvailable()); | |
| 252 CompilationInfo* info = job->info(); | |
| 253 if (info->is_osr()) { | |
| 254 osr_attempts_++; | |
| 255 AddToOsrBuffer(job); | |
| 256 // Add job to the front of the input queue. | |
| 257 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | |
| 258 DCHECK_LT(input_queue_length_, input_queue_capacity_); | |
| 259 // Move shift_ back by one. | |
| 260 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); | |
| 261 input_queue_[InputQueueIndex(0)] = job; | |
| 262 input_queue_length_++; | |
| 263 } else { | |
| 264 // Add job to the back of the input queue. | |
| 265 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | |
| 266 DCHECK_LT(input_queue_length_, input_queue_capacity_); | |
| 267 input_queue_[InputQueueIndex(input_queue_length_)] = job; | |
| 268 input_queue_length_++; | |
| 269 } | |
| 270 if (FLAG_block_concurrent_recompilation) { | |
| 271 blocked_jobs_++; | |
| 272 } else { | |
| 273 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
| 274 new CompileTask(isolate_), v8::Platform::kShortRunningTask); | |
| 275 } | |
| 276 } | |
| 277 | |
| 278 | |
| 279 void OptimizingCompileDispatcher::Unblock() { | |
| 280 while (blocked_jobs_ > 0) { | |
| 281 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
| 282 new CompileTask(isolate_), v8::Platform::kShortRunningTask); | |
| 283 blocked_jobs_--; | |
| 284 } | |
| 285 } | |
| 286 | |
| 287 | |
| 288 OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate( | |
| 289 Handle<JSFunction> function, BailoutId osr_ast_id) { | |
| 290 for (int i = 0; i < osr_buffer_capacity_; i++) { | |
| 291 OptimizedCompileJob* current = osr_buffer_[i]; | |
| 292 if (current != NULL && current->IsWaitingForInstall() && | |
| 293 current->info()->HasSameOsrEntry(function, osr_ast_id)) { | |
| 294 osr_hits_++; | |
| 295 osr_buffer_[i] = NULL; | |
| 296 return current; | |
| 297 } | |
| 298 } | |
| 299 return NULL; | |
| 300 } | |
| 301 | |
| 302 | |
| 303 bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function, | |
| 304 BailoutId osr_ast_id) { | |
| 305 for (int i = 0; i < osr_buffer_capacity_; i++) { | |
| 306 OptimizedCompileJob* current = osr_buffer_[i]; | |
| 307 if (current != NULL && | |
| 308 current->info()->HasSameOsrEntry(function, osr_ast_id)) { | |
| 309 return !current->IsWaitingForInstall(); | |
| 310 } | |
| 311 } | |
| 312 return false; | |
| 313 } | |
| 314 | |
| 315 | |
| 316 bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) { | |
| 317 for (int i = 0; i < osr_buffer_capacity_; i++) { | |
| 318 OptimizedCompileJob* current = osr_buffer_[i]; | |
| 319 if (current != NULL && *current->info()->closure() == function) { | |
| 320 return !current->IsWaitingForInstall(); | |
| 321 } | |
| 322 } | |
| 323 return false; | |
| 324 } | |
| 325 | |
| 326 | |
| 327 void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) { | |
| 328 // Find the next slot that is empty or has a stale job. | |
| 329 OptimizedCompileJob* stale = NULL; | |
| 330 while (true) { | |
| 331 stale = osr_buffer_[osr_buffer_cursor_]; | |
| 332 if (stale == NULL || stale->IsWaitingForInstall()) break; | |
| 333 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; | |
| 334 } | |
| 335 | |
| 336 // Add to found slot and dispose the evicted job. | |
| 337 if (stale != NULL) { | |
| 338 DCHECK(stale->IsWaitingForInstall()); | |
| 339 CompilationInfo* info = stale->info(); | |
| 340 if (FLAG_trace_osr) { | |
| 341 PrintF("[COSR - Discarded "); | |
| 342 info->closure()->PrintName(); | |
| 343 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); | |
| 344 } | |
| 345 DisposeOptimizedCompileJob(stale, false); | |
| 346 } | |
| 347 osr_buffer_[osr_buffer_cursor_] = job; | |
| 348 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; | |
| 349 } | |
| 350 } | |
| 351 } // namespace v8::internal | |
| OLD | NEW |