| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 OptimizingCompiler* optimizing_compiler = NULL; | 82 OptimizingCompiler* optimizing_compiler = NULL; |
| 83 input_queue_.Dequeue(&optimizing_compiler); | 83 input_queue_.Dequeue(&optimizing_compiler); |
| 84 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); | 84 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); |
| 85 | 85 |
| 86 // The function may have already been optimized by OSR. Simply continue. | 86 // The function may have already been optimized by OSR. Simply continue. |
| 87 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); | 87 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); |
| 88 USE(status); // Prevent an unused-variable error in release mode. | 88 USE(status); // Prevent an unused-variable error in release mode. |
| 89 ASSERT(status != OptimizingCompiler::FAILED); | 89 ASSERT(status != OptimizingCompiler::FAILED); |
| 90 | 90 |
| 91 // The function may have already been optimized by OSR. Simply continue. | 91 // The function may have already been optimized by OSR. Simply continue. |
| 92 // Mark it for installing before queuing so that we can be sure of the write | 92 // Use a mutex to make sure that functions marked for install |
| 93 // order: marking first and (after being queued) installing code second. | 93 // are always also queued. |
| 94 ScopedLock mark_and_queue(install_mutex_); |
| 94 { Heap::RelocationLock relocation_lock(isolate_->heap()); | 95 { Heap::RelocationLock relocation_lock(isolate_->heap()); |
| 95 AllowHandleDereference ahd; | 96 AllowHandleDereference ahd; |
| 96 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); | 97 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); |
| 97 } | 98 } |
| 98 output_queue_.Enqueue(optimizing_compiler); | 99 output_queue_.Enqueue(optimizing_compiler); |
| 99 } | 100 } |
| 100 | 101 |
| 101 | 102 |
| 102 void OptimizingCompilerThread::Stop() { | 103 void OptimizingCompilerThread::Stop() { |
| 103 ASSERT(!IsOptimizerThread()); | 104 ASSERT(!IsOptimizerThread()); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 120 double total_time = static_cast<double>(time_spent_total_); | 121 double total_time = static_cast<double>(time_spent_total_); |
| 121 double percentage = (compile_time * 100) / total_time; | 122 double percentage = (compile_time * 100) / total_time; |
| 122 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); | 123 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); |
| 123 } | 124 } |
| 124 } | 125 } |
| 125 | 126 |
| 126 | 127 |
| 127 void OptimizingCompilerThread::InstallOptimizedFunctions() { | 128 void OptimizingCompilerThread::InstallOptimizedFunctions() { |
| 128 ASSERT(!IsOptimizerThread()); | 129 ASSERT(!IsOptimizerThread()); |
| 129 HandleScope handle_scope(isolate_); | 130 HandleScope handle_scope(isolate_); |
| 130 int functions_installed = 0; | |
| 131 OptimizingCompiler* compiler; | 131 OptimizingCompiler* compiler; |
| 132 while (output_queue_.Dequeue(&compiler)) { | 132 while (true) { |
| 133 { // Memory barrier to ensure marked functions are queued. |
| 134 ScopedLock marked_and_queued(install_mutex_); |
| 135 if (!output_queue_.Dequeue(&compiler)) return; |
| 136 } |
| 133 Compiler::InstallOptimizedCode(compiler); | 137 Compiler::InstallOptimizedCode(compiler); |
| 134 functions_installed++; | |
| 135 } | 138 } |
| 136 } | 139 } |
| 137 | 140 |
| 138 | 141 |
| 139 void OptimizingCompilerThread::QueueForOptimization( | 142 void OptimizingCompilerThread::QueueForOptimization( |
| 140 OptimizingCompiler* optimizing_compiler) { | 143 OptimizingCompiler* optimizing_compiler) { |
| 141 ASSERT(IsQueueAvailable()); | 144 ASSERT(IsQueueAvailable()); |
| 142 ASSERT(!IsOptimizerThread()); | 145 ASSERT(!IsOptimizerThread()); |
| 143 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); | 146 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); |
| 144 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); | 147 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); |
| 145 input_queue_.Enqueue(optimizing_compiler); | 148 input_queue_.Enqueue(optimizing_compiler); |
| 146 input_queue_semaphore_->Signal(); | 149 input_queue_semaphore_->Signal(); |
| 147 } | 150 } |
| 148 | 151 |
| 149 | 152 |
| 150 #ifdef DEBUG | 153 #ifdef DEBUG |
| 151 bool OptimizingCompilerThread::IsOptimizerThread() { | 154 bool OptimizingCompilerThread::IsOptimizerThread() { |
| 152 if (!FLAG_parallel_recompilation) return false; | 155 if (!FLAG_parallel_recompilation) return false; |
| 153 return ThreadId::Current().ToInteger() == thread_id_; | 156 return ThreadId::Current().ToInteger() == thread_id_; |
| 154 } | 157 } |
| 155 #endif | 158 #endif |
| 156 | 159 |
| 157 | 160 |
| 158 } } // namespace v8::internal | 161 } } // namespace v8::internal |
| OLD | NEW |