OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
43 class SharedFunctionInfo; | 43 class SharedFunctionInfo; |
44 | 44 |
45 class OptimizingCompilerThread : public Thread { | 45 class OptimizingCompilerThread : public Thread { |
46 public: | 46 public: |
47 explicit OptimizingCompilerThread(Isolate *isolate) : | 47 explicit OptimizingCompilerThread(Isolate *isolate) : |
48 Thread("OptimizingCompilerThread"), | 48 Thread("OptimizingCompilerThread"), |
49 #ifdef DEBUG | 49 #ifdef DEBUG |
50 thread_id_(0), | 50 thread_id_(0), |
51 #endif | 51 #endif |
52 isolate_(isolate), | 52 isolate_(isolate), |
53 stop_semaphore_(OS::CreateSemaphore(0)), | 53 stop_semaphore_(0), |
54 input_queue_semaphore_(OS::CreateSemaphore(0)) { | 54 input_queue_semaphore_(0) { |
55 NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); | 55 NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); |
56 NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0)); | 56 NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0)); |
57 } | 57 } |
| 58 ~OptimizingCompilerThread() {} |
58 | 59 |
59 void Run(); | 60 void Run(); |
60 void Stop(); | 61 void Stop(); |
61 void Flush(); | 62 void Flush(); |
62 void QueueForOptimization(OptimizingCompiler* optimizing_compiler); | 63 void QueueForOptimization(OptimizingCompiler* optimizing_compiler); |
63 void InstallOptimizedFunctions(); | 64 void InstallOptimizedFunctions(); |
64 | 65 |
65 inline bool IsQueueAvailable() { | 66 inline bool IsQueueAvailable() { |
66 // We don't need a barrier since we have a data dependency right | 67 // We don't need a barrier since we have a data dependency right |
67 // after. | 68 // after. |
68 Atomic32 current_length = NoBarrier_Load(&queue_length_); | 69 Atomic32 current_length = NoBarrier_Load(&queue_length_); |
69 | 70 |
70 // This can be queried only from the execution thread. | 71 // This can be queried only from the execution thread. |
71 ASSERT(!IsOptimizerThread()); | 72 ASSERT(!IsOptimizerThread()); |
72 // Since only the execution thread increments queue_length_ and | 73 // Since only the execution thread increments queue_length_ and |
73 // only one thread can run inside an Isolate at one time, a direct | 74 // only one thread can run inside an Isolate at one time, a direct |
74 // doesn't introduce a race -- queue_length_ may decreased in | 75 // doesn't introduce a race -- queue_length_ may decreased in |
75 // meantime, but not increased. | 76 // meantime, but not increased. |
76 return (current_length < FLAG_concurrent_recompilation_queue_length); | 77 return (current_length < FLAG_concurrent_recompilation_queue_length); |
77 } | 78 } |
78 | 79 |
79 #ifdef DEBUG | 80 #ifdef DEBUG |
80 bool IsOptimizerThread(); | 81 bool IsOptimizerThread(); |
81 #endif | 82 #endif |
82 | 83 |
83 ~OptimizingCompilerThread() { | |
84 delete input_queue_semaphore_; | |
85 delete stop_semaphore_; | |
86 #ifdef DEBUG | |
87 #endif | |
88 } | |
89 | |
90 private: | 84 private: |
91 enum StopFlag { CONTINUE, STOP, FLUSH }; | 85 enum StopFlag { CONTINUE, STOP, FLUSH }; |
92 | 86 |
93 void FlushInputQueue(bool restore_function_code); | 87 void FlushInputQueue(bool restore_function_code); |
94 void FlushOutputQueue(bool restore_function_code); | 88 void FlushOutputQueue(bool restore_function_code); |
95 | 89 |
96 void CompileNext(); | 90 void CompileNext(); |
97 | 91 |
98 #ifdef DEBUG | 92 #ifdef DEBUG |
99 int thread_id_; | 93 int thread_id_; |
100 Mutex thread_id_mutex_; | 94 Mutex thread_id_mutex_; |
101 #endif | 95 #endif |
102 | 96 |
103 Isolate* isolate_; | 97 Isolate* isolate_; |
104 Semaphore* stop_semaphore_; | 98 Semaphore stop_semaphore_; |
105 Semaphore* input_queue_semaphore_; | 99 Semaphore input_queue_semaphore_; |
106 UnboundQueue<OptimizingCompiler*> input_queue_; | 100 UnboundQueue<OptimizingCompiler*> input_queue_; |
107 UnboundQueue<OptimizingCompiler*> output_queue_; | 101 UnboundQueue<OptimizingCompiler*> output_queue_; |
108 Mutex install_mutex_; | 102 Mutex install_mutex_; |
109 volatile AtomicWord stop_thread_; | 103 volatile AtomicWord stop_thread_; |
110 volatile Atomic32 queue_length_; | 104 volatile Atomic32 queue_length_; |
111 TimeDelta time_spent_compiling_; | 105 TimeDelta time_spent_compiling_; |
112 TimeDelta time_spent_total_; | 106 TimeDelta time_spent_total_; |
113 }; | 107 }; |
114 | 108 |
115 } } // namespace v8::internal | 109 } } // namespace v8::internal |
116 | 110 |
117 #endif // V8_OPTIMIZING_COMPILER_THREAD_H_ | 111 #endif // V8_OPTIMIZING_COMPILER_THREAD_H_ |
OLD | NEW |