OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/zone/accounting-allocator.h" | 5 #include "src/zone/accounting-allocator.h" |
6 | 6 |
7 #include <cstdlib> | 7 #include <cstdlib> |
8 | 8 |
9 #if V8_LIBC_BIONIC | 9 #if V8_LIBC_BIONIC |
10 #include <malloc.h> // NOLINT | 10 #include <malloc.h> // NOLINT |
11 #endif | 11 #endif |
12 | 12 |
13 namespace v8 { | 13 namespace v8 { |
14 namespace internal { | 14 namespace internal { |
15 | 15 |
| 16 AccountingAllocator::AccountingAllocator() { |
| 17 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone); |
| 18 std::fill(unused_segments_heads_, |
| 19 unused_segments_heads_ + |
| 20 (1 + kMaxSegmentSizePower - kMinSegmentSizePower), |
| 21 nullptr); |
| 22 std::fill( |
| 23 unused_segments_sizes, |
| 24 unused_segments_sizes + (1 + kMaxSegmentSizePower - kMinSegmentSizePower), |
| 25 0); |
| 26 } |
| 27 |
| 28 AccountingAllocator::~AccountingAllocator() { |
| 29 ClearPool(); |
| 30 delete[] unused_segments_heads_; |
| 31 delete[] unused_segments_sizes; |
| 32 delete unused_segments_mutex_; |
| 33 } |
| 34 |
| 35 void AccountingAllocator::MemoryPressureNotification( |
| 36 MemoryPressureLevel level) { |
| 37 memory_pressure_level_.SetValue(level); |
| 38 |
| 39 if (level != MemoryPressureLevel::kNone) { |
| 40 ClearPool(); |
| 41 } |
| 42 } |
| 43 |
| 44 Segment* AccountingAllocator::GetSegment(size_t bytes) { |
| 45 Segment* result = GetSegmentFromPool(bytes); |
| 46 if (result == nullptr) { |
| 47 result = AllocateSegment(bytes); |
| 48 } |
| 49 |
| 50 return result; |
| 51 } |
| 52 |
16 Segment* AccountingAllocator::AllocateSegment(size_t bytes) { | 53 Segment* AccountingAllocator::AllocateSegment(size_t bytes) { |
17 void* memory = malloc(bytes); | 54 void* memory = malloc(bytes); |
18 if (memory) { | 55 if (memory) { |
19 base::AtomicWord current = | 56 base::AtomicWord current = |
20 base::NoBarrier_AtomicIncrement(¤t_memory_usage_, bytes); | 57 base::NoBarrier_AtomicIncrement(¤t_memory_usage_, bytes); |
21 base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_); | 58 base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_); |
22 while (current > max) { | 59 while (current > max) { |
23 max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current); | 60 max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current); |
24 } | 61 } |
25 } | 62 } |
26 return reinterpret_cast<Segment*>(memory); | 63 return reinterpret_cast<Segment*>(memory); |
27 } | 64 } |
28 | 65 |
| 66 void AccountingAllocator::ReturnSegment(Segment* segment) { |
| 67 segment->ZapContents(); |
| 68 if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) { |
| 69 FreeSegment(segment); |
| 70 } else if (!AddSegmentToPool(segment)) { |
| 71 FreeSegment(segment); |
| 72 } |
| 73 } |
| 74 |
29 void AccountingAllocator::FreeSegment(Segment* memory) { | 75 void AccountingAllocator::FreeSegment(Segment* memory) { |
30 base::NoBarrier_AtomicIncrement( | 76 base::NoBarrier_AtomicIncrement( |
31 ¤t_memory_usage_, -static_cast<base::AtomicWord>(memory->size())); | 77 ¤t_memory_usage_, -static_cast<base::AtomicWord>(memory->size())); |
| 78 memory->ZapHeader(); |
32 free(memory); | 79 free(memory); |
33 } | 80 } |
34 | 81 |
35 size_t AccountingAllocator::GetCurrentMemoryUsage() const { | 82 size_t AccountingAllocator::GetCurrentMemoryUsage() const { |
36 return base::NoBarrier_Load(¤t_memory_usage_); | 83 return base::NoBarrier_Load(¤t_memory_usage_); |
37 } | 84 } |
38 | 85 |
39 size_t AccountingAllocator::GetMaxMemoryUsage() const { | 86 size_t AccountingAllocator::GetMaxMemoryUsage() const { |
40 return base::NoBarrier_Load(&max_memory_usage_); | 87 return base::NoBarrier_Load(&max_memory_usage_); |
41 } | 88 } |
42 | 89 |
| 90 Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) { |
| 91 if (requested_size > 1 << kMaxSegmentSizePower) { |
| 92 return nullptr; |
| 93 } |
| 94 |
| 95 uint8_t power = kMinSegmentSizePower; |
| 96 while (requested_size > static_cast<size_t>(1 << power)) power++; |
| 97 |
| 98 power -= kMinSegmentSizePower; |
| 99 |
| 100 DCHECK_GE(power, 0); |
| 101 |
| 102 Segment* segment; |
| 103 { |
| 104 base::LockGuard<base::Mutex> lock_guard(unused_segments_mutex_); |
| 105 |
| 106 segment = unused_segments_heads_[power]; |
| 107 |
| 108 if (segment) { |
| 109 unused_segments_heads_[power] = segment->next(); |
| 110 segment->set_next(nullptr); |
| 111 |
| 112 unused_segments_sizes[power]--; |
| 113 unused_segments_size_ -= segment->size(); |
| 114 } |
| 115 } |
| 116 |
| 117 if (segment) { |
| 118 DCHECK_GE(segment->size(), requested_size); |
| 119 // PrintF("%f; 0;-%lu\n", static_cast<double>(clock() - begin) / |
| 120 // CLOCKS_PER_SEC, segment->size()); |
| 121 } |
| 122 return segment; |
| 123 } |
| 124 |
| 125 bool AccountingAllocator::AddSegmentToPool(Segment* segment) { |
| 126 size_t size = segment->size(); |
| 127 |
| 128 if (size >= (1 << (kMaxSegmentSizePower + 1))) { |
| 129 return false; |
| 130 } |
| 131 |
| 132 if (size < (1 << kMinSegmentSizePower)) { |
| 133 return false; |
| 134 } |
| 135 |
| 136 uint8_t power = kMaxSegmentSizePower; |
| 137 |
| 138 while (size < static_cast<size_t>(1 << power)) power--; |
| 139 |
| 140 power -= kMinSegmentSizePower; |
| 141 |
| 142 DCHECK_GE(power, 0); |
| 143 |
| 144 { |
| 145 base::LockGuard<base::Mutex> lock_guard(unused_segments_mutex_); |
| 146 |
| 147 if (unused_segments_sizes[power] >= kMaxSegmentsPerBucket) { |
| 148 return false; |
| 149 } |
| 150 |
| 151 segment->set_next(unused_segments_heads_[power]); |
| 152 unused_segments_heads_[power] = segment; |
| 153 unused_segments_size_ += size; |
| 154 unused_segments_sizes[power]++; |
| 155 } |
| 156 |
| 157 // PrintF("%f; 0;+%lu\n", static_cast<double>(clock() - begin) / |
| 158 // CLOCKS_PER_SEC, size); |
| 159 |
| 160 return true; |
| 161 } |
| 162 |
| 163 void AccountingAllocator::ClearPool() { |
| 164 base::LockGuard<base::Mutex> lock_guard(unused_segments_mutex_); |
| 165 |
| 166 for (uint8_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower; |
| 167 power++) { |
| 168 Segment* current = unused_segments_heads_[power]; |
| 169 while (current) { |
| 170 Segment* next = current->next(); |
| 171 |
| 172 FreeSegment(current); |
| 173 |
| 174 current = next; |
| 175 } |
| 176 unused_segments_heads_[power] = nullptr; |
| 177 } |
| 178 } |
| 179 |
43 } // namespace internal | 180 } // namespace internal |
44 } // namespace v8 | 181 } // namespace v8 |
OLD | NEW |