| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_SLOT_SET_H | 5 #ifndef V8_SLOT_SET_H |
| 6 #define V8_SLOT_SET_H | 6 #define V8_SLOT_SET_H |
| 7 | 7 |
| 8 #include <stack> | 8 #include <stack> |
| 9 | 9 |
| 10 #include "src/allocation.h" | 10 #include "src/allocation.h" |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 153 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { | 153 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { |
| 154 if (bucket[bucket_index].Value() != nullptr) { | 154 if (bucket[bucket_index].Value() != nullptr) { |
| 155 int in_bucket_count = 0; | 155 int in_bucket_count = 0; |
| 156 base::AtomicValue<uint32_t>* current_bucket = | 156 base::AtomicValue<uint32_t>* current_bucket = |
| 157 bucket[bucket_index].Value(); | 157 bucket[bucket_index].Value(); |
| 158 int cell_offset = bucket_index * kBitsPerBucket; | 158 int cell_offset = bucket_index * kBitsPerBucket; |
| 159 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) { | 159 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) { |
| 160 if (current_bucket[i].Value()) { | 160 if (current_bucket[i].Value()) { |
| 161 uint32_t cell = current_bucket[i].Value(); | 161 uint32_t cell = current_bucket[i].Value(); |
| 162 uint32_t old_cell = cell; | 162 uint32_t old_cell = cell; |
| 163 uint32_t mask = 0; | 163 uint32_t new_cell = cell; |
| 164 while (cell) { | 164 while (cell) { |
| 165 int bit_offset = base::bits::CountTrailingZeros32(cell); | 165 int bit_offset = base::bits::CountTrailingZeros32(cell); |
| 166 uint32_t bit_mask = 1u << bit_offset; | 166 uint32_t bit_mask = 1u << bit_offset; |
| 167 uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2; | 167 uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2; |
| 168 if (callback(page_start_ + slot) == KEEP_SLOT) { | 168 if (callback(page_start_ + slot) == KEEP_SLOT) { |
| 169 ++in_bucket_count; | 169 ++in_bucket_count; |
| 170 } else { | 170 } else { |
| 171 mask |= bit_mask; | 171 new_cell ^= bit_mask; |
| 172 } | 172 } |
| 173 cell ^= bit_mask; | 173 cell ^= bit_mask; |
| 174 } | 174 } |
| 175 uint32_t new_cell = old_cell & ~mask; | |
| 176 if (old_cell != new_cell) { | 175 if (old_cell != new_cell) { |
| 177 while (!current_bucket[i].TrySetValue(old_cell, new_cell)) { | 176 while (!current_bucket[i].TrySetValue(old_cell, new_cell)) { |
| 178 // If TrySetValue fails, the cell must have changed. We just | 177 // If TrySetValue fails, the cell must have changed. We just |
| 179 // have to read the current value of the cell, & it with the | 178 // have to read the current value of the cell, & it with the |
| 180 // computed value, and retry. We can do this, because this | 179 // computed value, and retry. We can do this, because this |
| 181 // method will only be called on the main thread and filtering | 180 // method will only be called on the main thread and filtering |
| 182 // threads will only remove slots. | 181 // threads will only remove slots. |
| 183 old_cell = current_bucket[i].Value(); | 182 old_cell = current_bucket[i].Value(); |
| 184 new_cell = old_cell & ~mask; | 183 new_cell &= old_cell; |
| 185 } | 184 } |
| 186 } | 185 } |
| 187 } | 186 } |
| 188 } | 187 } |
| 189 if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) { | 188 if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) { |
| 190 base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_); | 189 base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_); |
| 191 base::AtomicValue<uint32_t>* bucket_ptr = | 190 base::AtomicValue<uint32_t>* bucket_ptr = |
| 192 bucket[bucket_index].Value(); | 191 bucket[bucket_index].Value(); |
| 193 to_be_freed_buckets_.push(bucket_ptr); | 192 to_be_freed_buckets_.push(bucket_ptr); |
| 194 bucket[bucket_index].SetValue(nullptr); | 193 bucket[bucket_index].SetValue(nullptr); |
| (...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 468 Address page_start_; | 467 Address page_start_; |
| 469 base::AtomicValue<Chunk*> chunk_; | 468 base::AtomicValue<Chunk*> chunk_; |
| 470 base::Mutex to_be_freed_chunks_mutex_; | 469 base::Mutex to_be_freed_chunks_mutex_; |
| 471 std::stack<Chunk*> to_be_freed_chunks_; | 470 std::stack<Chunk*> to_be_freed_chunks_; |
| 472 }; | 471 }; |
| 473 | 472 |
| 474 } // namespace internal | 473 } // namespace internal |
| 475 } // namespace v8 | 474 } // namespace v8 |
| 476 | 475 |
| 477 #endif // V8_SLOT_SET_H | 476 #endif // V8_SLOT_SET_H |
| OLD | NEW |