OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 21 matching lines...) Expand all Loading... |
32 #include "hydrogen.h" | 32 #include "hydrogen.h" |
33 #include "isolate.h" | 33 #include "isolate.h" |
34 #include "v8threads.h" | 34 #include "v8threads.h" |
35 | 35 |
36 namespace v8 { | 36 namespace v8 { |
37 namespace internal { | 37 namespace internal { |
38 | 38 |
39 | 39 |
40 void OptimizingCompilerThread::Run() { | 40 void OptimizingCompilerThread::Run() { |
41 #ifdef DEBUG | 41 #ifdef DEBUG |
42 { ScopedLock lock(&thread_id_mutex_); | 42 { ScopedLock lock(thread_id_mutex_); |
43 thread_id_ = ThreadId::Current().ToInteger(); | 43 thread_id_ = ThreadId::Current().ToInteger(); |
44 } | 44 } |
45 #endif | 45 #endif |
46 Isolate::SetIsolateThreadLocals(isolate_, NULL); | 46 Isolate::SetIsolateThreadLocals(isolate_, NULL); |
47 DisallowHeapAllocation no_allocation; | 47 DisallowHeapAllocation no_allocation; |
48 DisallowHandleAllocation no_handles; | 48 DisallowHandleAllocation no_handles; |
49 DisallowHandleDereference no_deref; | 49 DisallowHandleDereference no_deref; |
50 | 50 |
51 int64_t epoch = 0; | 51 int64_t epoch = 0; |
52 if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks(); | 52 if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks(); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
86 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); | 86 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); |
87 | 87 |
88 // The function may have already been optimized by OSR. Simply continue. | 88 // The function may have already been optimized by OSR. Simply continue. |
89 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); | 89 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); |
90 USE(status); // Prevent an unused-variable error in release mode. | 90 USE(status); // Prevent an unused-variable error in release mode. |
91 ASSERT(status != OptimizingCompiler::FAILED); | 91 ASSERT(status != OptimizingCompiler::FAILED); |
92 | 92 |
93 // The function may have already been optimized by OSR. Simply continue. | 93 // The function may have already been optimized by OSR. Simply continue. |
94 // Use a mutex to make sure that functions marked for install | 94 // Use a mutex to make sure that functions marked for install |
95 // are always also queued. | 95 // are always also queued. |
96 ScopedLock mark_and_queue(&install_mutex_); | 96 ScopedLock mark_and_queue(install_mutex_); |
97 { Heap::RelocationLock relocation_lock(isolate_->heap()); | 97 { Heap::RelocationLock relocation_lock(isolate_->heap()); |
98 AllowHandleDereference ahd; | 98 AllowHandleDereference ahd; |
99 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); | 99 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); |
100 } | 100 } |
101 output_queue_.Enqueue(optimizing_compiler); | 101 output_queue_.Enqueue(optimizing_compiler); |
102 } | 102 } |
103 | 103 |
104 | 104 |
105 void OptimizingCompilerThread::Stop() { | 105 void OptimizingCompilerThread::Stop() { |
106 ASSERT(!IsOptimizerThread()); | 106 ASSERT(!IsOptimizerThread()); |
(...skipping 27 matching lines...) Expand all Loading... |
134 Join(); | 134 Join(); |
135 } | 135 } |
136 | 136 |
137 | 137 |
138 void OptimizingCompilerThread::InstallOptimizedFunctions() { | 138 void OptimizingCompilerThread::InstallOptimizedFunctions() { |
139 ASSERT(!IsOptimizerThread()); | 139 ASSERT(!IsOptimizerThread()); |
140 HandleScope handle_scope(isolate_); | 140 HandleScope handle_scope(isolate_); |
141 OptimizingCompiler* compiler; | 141 OptimizingCompiler* compiler; |
142 while (true) { | 142 while (true) { |
143 { // Memory barrier to ensure marked functions are queued. | 143 { // Memory barrier to ensure marked functions are queued. |
144 ScopedLock marked_and_queued(&install_mutex_); | 144 ScopedLock marked_and_queued(install_mutex_); |
145 if (!output_queue_.Dequeue(&compiler)) return; | 145 if (!output_queue_.Dequeue(&compiler)) return; |
146 } | 146 } |
147 Compiler::InstallOptimizedCode(compiler); | 147 Compiler::InstallOptimizedCode(compiler); |
148 } | 148 } |
149 } | 149 } |
150 | 150 |
151 | 151 |
152 void OptimizingCompilerThread::QueueForOptimization( | 152 void OptimizingCompilerThread::QueueForOptimization( |
153 OptimizingCompiler* optimizing_compiler) { | 153 OptimizingCompiler* optimizing_compiler) { |
154 ASSERT(IsQueueAvailable()); | 154 ASSERT(IsQueueAvailable()); |
155 ASSERT(!IsOptimizerThread()); | 155 ASSERT(!IsOptimizerThread()); |
156 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); | 156 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); |
157 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); | 157 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); |
158 input_queue_.Enqueue(optimizing_compiler); | 158 input_queue_.Enqueue(optimizing_compiler); |
159 input_queue_semaphore_->Signal(); | 159 input_queue_semaphore_->Signal(); |
160 } | 160 } |
161 | 161 |
162 | 162 |
163 #ifdef DEBUG | 163 #ifdef DEBUG |
164 bool OptimizingCompilerThread::IsOptimizerThread() { | 164 bool OptimizingCompilerThread::IsOptimizerThread() { |
165 if (!FLAG_parallel_recompilation) return false; | 165 if (!FLAG_parallel_recompilation) return false; |
166 ScopedLock lock(&thread_id_mutex_); | 166 ScopedLock lock(thread_id_mutex_); |
167 return ThreadId::Current().ToInteger() == thread_id_; | 167 return ThreadId::Current().ToInteger() == thread_id_; |
168 } | 168 } |
169 #endif | 169 #endif |
170 | 170 |
171 | 171 |
172 } } // namespace v8::internal | 172 } } // namespace v8::internal |
OLD | NEW |