OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/zone/accounting-allocator.h" | 5 #include "src/zone/accounting-allocator.h" |
6 | 6 |
7 #include <cstdlib> | 7 #include <cstdlib> |
8 | 8 |
9 #if V8_LIBC_BIONIC | 9 #if V8_LIBC_BIONIC |
10 #include <malloc.h> // NOLINT | 10 #include <malloc.h> // NOLINT |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
78 } | 78 } |
79 } | 79 } |
80 | 80 |
81 return result; | 81 return result; |
82 } | 82 } |
83 | 83 |
84 Segment* AccountingAllocator::AllocateSegment(size_t bytes) { | 84 Segment* AccountingAllocator::AllocateSegment(size_t bytes) { |
85 void* memory = malloc(bytes); | 85 void* memory = malloc(bytes); |
86 if (memory) { | 86 if (memory) { |
87 base::AtomicWord current = | 87 base::AtomicWord current = |
88 base::NoBarrier_AtomicIncrement(¤t_memory_usage_, bytes); | 88 base::Relaxed_AtomicIncrement(¤t_memory_usage_, bytes); |
89 base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_); | 89 base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_); |
90 while (current > max) { | 90 while (current > max) { |
91 max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current); | 91 max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current); |
92 } | 92 } |
93 } | 93 } |
94 return reinterpret_cast<Segment*>(memory); | 94 return reinterpret_cast<Segment*>(memory); |
95 } | 95 } |
96 | 96 |
97 void AccountingAllocator::ReturnSegment(Segment* segment) { | 97 void AccountingAllocator::ReturnSegment(Segment* segment) { |
98 segment->ZapContents(); | 98 segment->ZapContents(); |
99 | 99 |
100 if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) { | 100 if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) { |
101 FreeSegment(segment); | 101 FreeSegment(segment); |
102 } else if (!AddSegmentToPool(segment)) { | 102 } else if (!AddSegmentToPool(segment)) { |
103 FreeSegment(segment); | 103 FreeSegment(segment); |
104 } | 104 } |
105 } | 105 } |
106 | 106 |
107 void AccountingAllocator::FreeSegment(Segment* memory) { | 107 void AccountingAllocator::FreeSegment(Segment* memory) { |
108 base::NoBarrier_AtomicIncrement( | 108 base::Relaxed_AtomicIncrement(¤t_memory_usage_, |
109 ¤t_memory_usage_, -static_cast<base::AtomicWord>(memory->size())); | 109 -static_cast<base::AtomicWord>(memory->size())); |
110 memory->ZapHeader(); | 110 memory->ZapHeader(); |
111 free(memory); | 111 free(memory); |
112 } | 112 } |
113 | 113 |
114 size_t AccountingAllocator::GetCurrentMemoryUsage() const { | 114 size_t AccountingAllocator::GetCurrentMemoryUsage() const { |
115 return base::NoBarrier_Load(¤t_memory_usage_); | 115 return base::Relaxed_Load(¤t_memory_usage_); |
116 } | 116 } |
117 | 117 |
118 size_t AccountingAllocator::GetMaxMemoryUsage() const { | 118 size_t AccountingAllocator::GetMaxMemoryUsage() const { |
119 return base::NoBarrier_Load(&max_memory_usage_); | 119 return base::Relaxed_Load(&max_memory_usage_); |
120 } | 120 } |
121 | 121 |
122 size_t AccountingAllocator::GetCurrentPoolSize() const { | 122 size_t AccountingAllocator::GetCurrentPoolSize() const { |
123 return base::NoBarrier_Load(¤t_pool_size_); | 123 return base::Relaxed_Load(¤t_pool_size_); |
124 } | 124 } |
125 | 125 |
126 Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) { | 126 Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) { |
127 if (requested_size > (1 << kMaxSegmentSizePower)) { | 127 if (requested_size > (1 << kMaxSegmentSizePower)) { |
128 return nullptr; | 128 return nullptr; |
129 } | 129 } |
130 | 130 |
131 size_t power = kMinSegmentSizePower; | 131 size_t power = kMinSegmentSizePower; |
132 while (requested_size > (static_cast<size_t>(1) << power)) power++; | 132 while (requested_size > (static_cast<size_t>(1) << power)) power++; |
133 | 133 |
134 DCHECK_GE(power, kMinSegmentSizePower + 0); | 134 DCHECK_GE(power, kMinSegmentSizePower + 0); |
135 power -= kMinSegmentSizePower; | 135 power -= kMinSegmentSizePower; |
136 | 136 |
137 Segment* segment; | 137 Segment* segment; |
138 { | 138 { |
139 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); | 139 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); |
140 | 140 |
141 segment = unused_segments_heads_[power]; | 141 segment = unused_segments_heads_[power]; |
142 | 142 |
143 if (segment != nullptr) { | 143 if (segment != nullptr) { |
144 unused_segments_heads_[power] = segment->next(); | 144 unused_segments_heads_[power] = segment->next(); |
145 segment->set_next(nullptr); | 145 segment->set_next(nullptr); |
146 | 146 |
147 unused_segments_sizes_[power]--; | 147 unused_segments_sizes_[power]--; |
148 base::NoBarrier_AtomicIncrement( | 148 base::Relaxed_AtomicIncrement( |
149 ¤t_pool_size_, -static_cast<base::AtomicWord>(segment->size())); | 149 ¤t_pool_size_, -static_cast<base::AtomicWord>(segment->size())); |
150 } | 150 } |
151 } | 151 } |
152 | 152 |
153 if (segment) { | 153 if (segment) { |
154 DCHECK_GE(segment->size(), requested_size); | 154 DCHECK_GE(segment->size(), requested_size); |
155 } | 155 } |
156 return segment; | 156 return segment; |
157 } | 157 } |
158 | 158 |
(...skipping 13 matching lines...) Expand all Loading... |
172 | 172 |
173 { | 173 { |
174 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); | 174 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); |
175 | 175 |
176 if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) { | 176 if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) { |
177 return false; | 177 return false; |
178 } | 178 } |
179 | 179 |
180 segment->set_next(unused_segments_heads_[power]); | 180 segment->set_next(unused_segments_heads_[power]); |
181 unused_segments_heads_[power] = segment; | 181 unused_segments_heads_[power] = segment; |
182 base::NoBarrier_AtomicIncrement(¤t_pool_size_, size); | 182 base::Relaxed_AtomicIncrement(¤t_pool_size_, size); |
183 unused_segments_sizes_[power]++; | 183 unused_segments_sizes_[power]++; |
184 } | 184 } |
185 | 185 |
186 return true; | 186 return true; |
187 } | 187 } |
188 | 188 |
189 void AccountingAllocator::ClearPool() { | 189 void AccountingAllocator::ClearPool() { |
190 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); | 190 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_); |
191 | 191 |
192 for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower; | 192 for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower; |
193 power++) { | 193 power++) { |
194 Segment* current = unused_segments_heads_[power]; | 194 Segment* current = unused_segments_heads_[power]; |
195 while (current) { | 195 while (current) { |
196 Segment* next = current->next(); | 196 Segment* next = current->next(); |
197 FreeSegment(current); | 197 FreeSegment(current); |
198 current = next; | 198 current = next; |
199 } | 199 } |
200 unused_segments_heads_[power] = nullptr; | 200 unused_segments_heads_[power] = nullptr; |
201 } | 201 } |
202 } | 202 } |
203 | 203 |
204 } // namespace internal | 204 } // namespace internal |
205 } // namespace v8 | 205 } // namespace v8 |
OLD | NEW |