OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 17 matching lines...) Expand all Loading... |
28 // Tests of the circular queue. | 28 // Tests of the circular queue. |
29 | 29 |
30 #include "v8.h" | 30 #include "v8.h" |
31 #include "circular-queue-inl.h" | 31 #include "circular-queue-inl.h" |
32 #include "cctest.h" | 32 #include "cctest.h" |
33 | 33 |
34 using i::SamplingCircularQueue; | 34 using i::SamplingCircularQueue; |
35 | 35 |
36 | 36 |
37 TEST(SamplingCircularQueue) { | 37 TEST(SamplingCircularQueue) { |
38 typedef SamplingCircularQueue::Cell Record; | 38 typedef i::AtomicWord Record; |
39 const int kRecordsPerChunk = 4; | 39 const int kMaxRecordsInQueue = 4; |
40 SamplingCircularQueue scq(sizeof(Record), | 40 SamplingCircularQueue<Record, kMaxRecordsInQueue> scq; |
41 kRecordsPerChunk * sizeof(Record), | |
42 3); | |
43 | 41 |
44 // Check that we are using non-reserved values. | 42 // Check that we are using non-reserved values. |
45 // Fill up the first chunk. | 43 // Fill up the first chunk. |
46 CHECK_EQ(NULL, scq.StartDequeue()); | 44 CHECK_EQ(NULL, scq.StartDequeue()); |
47 for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) { | 45 for (Record i = 1; i < 1 + kMaxRecordsInQueue; ++i) { |
48 Record* rec = reinterpret_cast<Record*>(scq.Enqueue()); | 46 Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue()); |
49 CHECK_NE(NULL, rec); | 47 CHECK_NE(NULL, rec); |
50 *rec = i; | 48 *rec = i; |
51 CHECK_EQ(NULL, scq.StartDequeue()); | 49 scq.FinishEnqueue(); |
52 } | 50 } |
53 | 51 |
54 // Fill up the second chunk. Consumption must still be unavailable. | 52 // The queue is full, enqueue is not allowed. |
55 CHECK_EQ(NULL, scq.StartDequeue()); | 53 CHECK_EQ(NULL, scq.StartEnqueue()); |
56 for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) { | 54 |
57 Record* rec = reinterpret_cast<Record*>(scq.Enqueue()); | 55 // Try to enqueue when the the queue is full. Consumption must be available. |
58 CHECK_NE(NULL, rec); | 56 CHECK_NE(NULL, scq.StartDequeue()); |
59 *rec = i; | 57 for (int i = 0; i < 10; ++i) { |
60 CHECK_EQ(NULL, scq.StartDequeue()); | 58 Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue()); |
| 59 CHECK_EQ(NULL, rec); |
| 60 CHECK_NE(NULL, scq.StartDequeue()); |
61 } | 61 } |
62 | 62 |
63 Record* rec = reinterpret_cast<Record*>(scq.Enqueue()); | 63 // Consume all records. |
64 CHECK_NE(NULL, rec); | 64 for (Record i = 1; i < 1 + kMaxRecordsInQueue; ++i) { |
65 *rec = 20; | |
66 // Now as we started filling up the third chunk, consumption | |
67 // must become possible. | |
68 CHECK_NE(NULL, scq.StartDequeue()); | |
69 | |
70 // Consume the first chunk. | |
71 for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) { | |
72 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); | 65 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); |
73 CHECK_NE(NULL, rec); | 66 CHECK_NE(NULL, rec); |
74 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); | 67 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); |
75 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); | 68 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); |
76 scq.FinishDequeue(); | 69 scq.FinishDequeue(); |
77 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); | 70 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); |
78 } | 71 } |
79 // Now consumption must not be possible, as consumer now polls | 72 // The queue is empty. |
80 // the first chunk for emptinness. | |
81 CHECK_EQ(NULL, scq.StartDequeue()); | 73 CHECK_EQ(NULL, scq.StartDequeue()); |
82 | 74 |
83 scq.FlushResidualRecords(); | 75 |
84 // From now, consumer no more polls ahead of the current chunk, | 76 CHECK_EQ(NULL, scq.StartDequeue()); |
85 // so it's possible to consume the second chunk. | 77 for (Record i = 0; i < kMaxRecordsInQueue / 2; ++i) { |
| 78 Record* rec = reinterpret_cast<Record*>(scq.StartEnqueue()); |
| 79 CHECK_NE(NULL, rec); |
| 80 *rec = i; |
| 81 scq.FinishEnqueue(); |
| 82 } |
| 83 |
| 84 // Consume all available kMaxRecordsInQueue / 2 records. |
86 CHECK_NE(NULL, scq.StartDequeue()); | 85 CHECK_NE(NULL, scq.StartDequeue()); |
87 // Consume the second chunk | 86 for (Record i = 0; i < kMaxRecordsInQueue / 2; ++i) { |
88 for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) { | |
89 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); | 87 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); |
90 CHECK_NE(NULL, rec); | 88 CHECK_NE(NULL, rec); |
91 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); | 89 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); |
92 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); | 90 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); |
93 scq.FinishDequeue(); | 91 scq.FinishDequeue(); |
94 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); | 92 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); |
95 } | 93 } |
96 // Consumption must still be possible as the first cell of the | 94 |
97 // last chunk is not clean. | 95 // The queue is empty. |
98 CHECK_NE(NULL, scq.StartDequeue()); | 96 CHECK_EQ(NULL, scq.StartDequeue()); |
99 } | 97 } |
100 | 98 |
101 | 99 |
102 namespace { | 100 namespace { |
103 | 101 |
| 102 typedef i::AtomicWord Record; |
| 103 typedef SamplingCircularQueue<Record, 12> TestSampleQueue; |
| 104 |
104 class ProducerThread: public i::Thread { | 105 class ProducerThread: public i::Thread { |
105 public: | 106 public: |
106 typedef SamplingCircularQueue::Cell Record; | 107 ProducerThread(TestSampleQueue* scq, |
107 | |
108 ProducerThread(SamplingCircularQueue* scq, | |
109 int records_per_chunk, | 108 int records_per_chunk, |
110 Record value, | 109 Record value, |
111 i::Semaphore* finished) | 110 i::Semaphore* finished) |
112 : Thread("producer"), | 111 : Thread("producer"), |
113 scq_(scq), | 112 scq_(scq), |
114 records_per_chunk_(records_per_chunk), | 113 records_per_chunk_(records_per_chunk), |
115 value_(value), | 114 value_(value), |
116 finished_(finished) { } | 115 finished_(finished) { } |
117 | 116 |
118 virtual void Run() { | 117 virtual void Run() { |
119 for (Record i = value_; i < value_ + records_per_chunk_; ++i) { | 118 for (Record i = value_; i < value_ + records_per_chunk_; ++i) { |
120 Record* rec = reinterpret_cast<Record*>(scq_->Enqueue()); | 119 Record* rec = reinterpret_cast<Record*>(scq_->StartEnqueue()); |
121 CHECK_NE(NULL, rec); | 120 CHECK_NE(NULL, rec); |
122 *rec = i; | 121 *rec = i; |
| 122 scq_->FinishEnqueue(); |
123 } | 123 } |
124 | 124 |
125 finished_->Signal(); | 125 finished_->Signal(); |
126 } | 126 } |
127 | 127 |
128 private: | 128 private: |
129 SamplingCircularQueue* scq_; | 129 TestSampleQueue* scq_; |
130 const int records_per_chunk_; | 130 const int records_per_chunk_; |
131 Record value_; | 131 Record value_; |
132 i::Semaphore* finished_; | 132 i::Semaphore* finished_; |
133 }; | 133 }; |
134 | 134 |
135 } // namespace | 135 } // namespace |
136 | 136 |
137 TEST(SamplingCircularQueueMultithreading) { | 137 TEST(SamplingCircularQueueMultithreading) { |
138 // Emulate multiple VM threads working 'one thread at a time.' | 138 // Emulate multiple VM threads working 'one thread at a time.' |
139 // This test enqueues data from different threads. This corresponds | 139 // This test enqueues data from different threads. This corresponds |
140 // to the case of profiling under Linux, where signal handler that | 140 // to the case of profiling under Linux, where signal handler that |
141 // does sampling is called in the context of different VM threads. | 141 // does sampling is called in the context of different VM threads. |
142 | 142 |
143 typedef ProducerThread::Record Record; | |
144 const int kRecordsPerChunk = 4; | 143 const int kRecordsPerChunk = 4; |
145 SamplingCircularQueue scq(sizeof(Record), | 144 TestSampleQueue scq; |
146 kRecordsPerChunk * sizeof(Record), | |
147 3); | |
148 i::Semaphore* semaphore = i::OS::CreateSemaphore(0); | 145 i::Semaphore* semaphore = i::OS::CreateSemaphore(0); |
149 // Don't poll ahead, making possible to check data in the buffer | |
150 // immediately after enqueuing. | |
151 scq.FlushResidualRecords(); | |
152 | 146 |
153 // Check that we are using non-reserved values. | |
154 ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore); | 147 ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore); |
155 ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore); | 148 ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore); |
156 ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore); | 149 ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore); |
157 | 150 |
158 CHECK_EQ(NULL, scq.StartDequeue()); | 151 CHECK_EQ(NULL, scq.StartDequeue()); |
159 producer1.Start(); | 152 producer1.Start(); |
160 semaphore->Wait(); | 153 semaphore->Wait(); |
161 for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) { | 154 for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) { |
162 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); | 155 Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); |
163 CHECK_NE(NULL, rec); | 156 CHECK_NE(NULL, rec); |
(...skipping 24 matching lines...) Expand all Loading... |
188 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); | 181 CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); |
189 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); | 182 CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); |
190 scq.FinishDequeue(); | 183 scq.FinishDequeue(); |
191 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); | 184 CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); |
192 } | 185 } |
193 | 186 |
194 CHECK_EQ(NULL, scq.StartDequeue()); | 187 CHECK_EQ(NULL, scq.StartDequeue()); |
195 | 188 |
196 delete semaphore; | 189 delete semaphore; |
197 } | 190 } |
OLD | NEW |