OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/zone.h" | 5 #include "src/zone/zone.h" |
6 | 6 |
7 #include <cstring> | 7 #include <cstring> |
8 | 8 |
9 #include "src/v8.h" | 9 #include "src/v8.h" |
10 | 10 |
11 #ifdef V8_USE_ADDRESS_SANITIZER | 11 #ifdef V8_USE_ADDRESS_SANITIZER |
12 #include <sanitizer/asan_interface.h> | 12 #include <sanitizer/asan_interface.h> |
13 #endif // V8_USE_ADDRESS_SANITIZER | 13 #endif // V8_USE_ADDRESS_SANITIZER |
14 | 14 |
15 namespace v8 { | 15 namespace v8 { |
(...skipping 18 matching lines...) Expand all Loading... |
34 USE(start); \ | 34 USE(start); \ |
35 USE(size); \ | 35 USE(size); \ |
36 } while (false) | 36 } while (false) |
37 | 37 |
38 const size_t kASanRedzoneBytes = 0; | 38 const size_t kASanRedzoneBytes = 0; |
39 | 39 |
40 #endif // V8_USE_ADDRESS_SANITIZER | 40 #endif // V8_USE_ADDRESS_SANITIZER |
41 | 41 |
42 } // namespace | 42 } // namespace |
43 | 43 |
44 | 44 Zone::Zone(AccountingAllocator* allocator) |
45 // Segments represent chunks of memory: They have starting address | |
46 // (encoded in the this pointer) and a size in bytes. Segments are | |
47 // chained together forming a LIFO structure with the newest segment | |
48 // available as segment_head_. Segments are allocated using malloc() | |
49 // and de-allocated using free(). | |
50 | |
51 class Segment { | |
52 public: | |
53 void Initialize(Segment* next, size_t size) { | |
54 next_ = next; | |
55 size_ = size; | |
56 } | |
57 | |
58 Segment* next() const { return next_; } | |
59 void clear_next() { next_ = nullptr; } | |
60 | |
61 size_t size() const { return size_; } | |
62 size_t capacity() const { return size_ - sizeof(Segment); } | |
63 | |
64 Address start() const { return address(sizeof(Segment)); } | |
65 Address end() const { return address(size_); } | |
66 | |
67 private: | |
68 // Computes the address of the nth byte in this segment. | |
69 Address address(size_t n) const { return Address(this) + n; } | |
70 | |
71 Segment* next_; | |
72 size_t size_; | |
73 }; | |
74 | |
75 Zone::Zone(base::AccountingAllocator* allocator) | |
76 : allocation_size_(0), | 45 : allocation_size_(0), |
77 segment_bytes_allocated_(0), | 46 segment_bytes_allocated_(0), |
78 position_(0), | 47 position_(0), |
79 limit_(0), | 48 limit_(0), |
80 allocator_(allocator), | 49 allocator_(allocator), |
81 segment_head_(nullptr) {} | 50 segment_head_(nullptr) {} |
82 | 51 |
83 Zone::~Zone() { | 52 Zone::~Zone() { |
84 DeleteAll(); | 53 DeleteAll(); |
85 DeleteKeptSegment(); | 54 DeleteKeptSegment(); |
86 | 55 |
87 DCHECK(segment_bytes_allocated_ == 0); | 56 DCHECK(segment_bytes_allocated_ == 0); |
88 } | 57 } |
89 | 58 |
90 | |
91 void* Zone::New(size_t size) { | 59 void* Zone::New(size_t size) { |
92 // Round up the requested size to fit the alignment. | 60 // Round up the requested size to fit the alignment. |
93 size = RoundUp(size, kAlignment); | 61 size = RoundUp(size, kAlignment); |
94 | 62 |
95 // If the allocation size is divisible by 8 then we return an 8-byte aligned | 63 // If the allocation size is divisible by 8 then we return an 8-byte aligned |
96 // address. | 64 // address. |
97 if (kPointerSize == 4 && kAlignment == 4) { | 65 if (kPointerSize == 4 && kAlignment == 4) { |
98 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); | 66 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); |
99 } else { | 67 } else { |
100 DCHECK(kAlignment >= kPointerSize); | 68 DCHECK(kAlignment >= kPointerSize); |
(...skipping 15 matching lines...) Expand all Loading... |
116 Address redzone_position = result + size; | 84 Address redzone_position = result + size; |
117 DCHECK(redzone_position + kASanRedzoneBytes == position_); | 85 DCHECK(redzone_position + kASanRedzoneBytes == position_); |
118 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); | 86 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); |
119 | 87 |
120 // Check that the result has the proper alignment and return it. | 88 // Check that the result has the proper alignment and return it. |
121 DCHECK(IsAddressAligned(result, kAlignment, 0)); | 89 DCHECK(IsAddressAligned(result, kAlignment, 0)); |
122 allocation_size_ += size; | 90 allocation_size_ += size; |
123 return reinterpret_cast<void*>(result); | 91 return reinterpret_cast<void*>(result); |
124 } | 92 } |
125 | 93 |
126 | |
127 void Zone::DeleteAll() { | 94 void Zone::DeleteAll() { |
128 #ifdef DEBUG | 95 #ifdef DEBUG |
129 // Constant byte value used for zapping dead memory in debug mode. | 96 // Constant byte value used for zapping dead memory in debug mode. |
130 static const unsigned char kZapDeadByte = 0xcd; | 97 static const unsigned char kZapDeadByte = 0xcd; |
131 #endif | 98 #endif |
132 | 99 |
133 // Find a segment with a suitable size to keep around. | 100 // Find a segment with a suitable size to keep around. |
134 Segment* keep = nullptr; | 101 Segment* keep = nullptr; |
135 // Traverse the chained list of segments, zapping (in debug mode) | 102 // Traverse the chained list of segments, zapping (in debug mode) |
136 // and freeing every segment except the one we wish to keep. | 103 // and freeing every segment except the one we wish to keep. |
137 for (Segment* current = segment_head_; current;) { | 104 for (Segment* current = segment_head_; current;) { |
138 Segment* next = current->next(); | 105 Segment* next = current->next(); |
139 if (!keep && current->size() <= kMaximumKeptSegmentSize) { | 106 if (!keep && current->size() <= kMaximumKeptSegmentSize) { |
140 // Unlink the segment we wish to keep from the list. | 107 // Unlink the segment we wish to keep from the list. |
141 keep = current; | 108 keep = current; |
142 keep->clear_next(); | 109 keep->set_next(nullptr); |
143 } else { | 110 } else { |
144 size_t size = current->size(); | 111 size_t size = current->size(); |
145 #ifdef DEBUG | 112 #ifdef DEBUG |
146 // Un-poison first so the zapping doesn't trigger ASan complaints. | 113 // Un-poison first so the zapping doesn't trigger ASan complaints. |
147 ASAN_UNPOISON_MEMORY_REGION(current, size); | 114 ASAN_UNPOISON_MEMORY_REGION(current, size); |
148 // Zap the entire current segment (including the header). | 115 // Zap the entire current segment (including the header). |
149 memset(current, kZapDeadByte, size); | 116 memset(current, kZapDeadByte, size); |
150 #endif | 117 #endif |
151 DeleteSegment(current, size); | 118 segment_bytes_allocated_ -= size; |
| 119 allocator_->FreeSegment(current); |
152 } | 120 } |
153 current = next; | 121 current = next; |
154 } | 122 } |
155 | 123 |
156 // If we have found a segment we want to keep, we must recompute the | 124 // If we have found a segment we want to keep, we must recompute the |
157 // variables 'position' and 'limit' to prepare for future allocate | 125 // variables 'position' and 'limit' to prepare for future allocate |
158 // attempts. Otherwise, we must clear the position and limit to | 126 // attempts. Otherwise, we must clear the position and limit to |
159 // force a new segment to be allocated on demand. | 127 // force a new segment to be allocated on demand. |
160 if (keep) { | 128 if (keep) { |
161 Address start = keep->start(); | 129 Address start = keep->start(); |
162 position_ = RoundUp(start, kAlignment); | 130 position_ = RoundUp(start, kAlignment); |
163 limit_ = keep->end(); | 131 limit_ = keep->end(); |
164 // Un-poison so we can re-use the segment later. | 132 // Un-poison so we can re-use the segment later. |
165 ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); | 133 ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); |
166 #ifdef DEBUG | 134 #ifdef DEBUG |
167 // Zap the contents of the kept segment (but not the header). | 135 // Zap the contents of the kept segment (but not the header). |
168 memset(start, kZapDeadByte, keep->capacity()); | 136 memset(start, kZapDeadByte, keep->capacity()); |
169 #endif | 137 #endif |
170 } else { | 138 } else { |
171 position_ = limit_ = 0; | 139 position_ = limit_ = 0; |
172 } | 140 } |
173 | 141 |
174 allocation_size_ = 0; | 142 allocation_size_ = 0; |
175 // Update the head segment to be the kept segment (if any). | 143 // Update the head segment to be the kept segment (if any). |
176 segment_head_ = keep; | 144 segment_head_ = keep; |
177 } | 145 } |
178 | 146 |
179 | |
180 void Zone::DeleteKeptSegment() { | 147 void Zone::DeleteKeptSegment() { |
181 #ifdef DEBUG | 148 #ifdef DEBUG |
182 // Constant byte value used for zapping dead memory in debug mode. | 149 // Constant byte value used for zapping dead memory in debug mode. |
183 static const unsigned char kZapDeadByte = 0xcd; | 150 static const unsigned char kZapDeadByte = 0xcd; |
184 #endif | 151 #endif |
185 | 152 |
186 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); | 153 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); |
187 if (segment_head_ != nullptr) { | 154 if (segment_head_ != nullptr) { |
188 size_t size = segment_head_->size(); | 155 size_t size = segment_head_->size(); |
189 #ifdef DEBUG | 156 #ifdef DEBUG |
190 // Un-poison first so the zapping doesn't trigger ASan complaints. | 157 // Un-poison first so the zapping doesn't trigger ASan complaints. |
191 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); | 158 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); |
192 // Zap the entire kept segment (including the header). | 159 // Zap the entire kept segment (including the header). |
193 memset(segment_head_, kZapDeadByte, size); | 160 memset(segment_head_, kZapDeadByte, size); |
194 #endif | 161 #endif |
195 DeleteSegment(segment_head_, size); | 162 segment_bytes_allocated_ -= size; |
| 163 allocator_->FreeSegment(segment_head_); |
196 segment_head_ = nullptr; | 164 segment_head_ = nullptr; |
197 } | 165 } |
198 | 166 |
199 DCHECK(segment_bytes_allocated_ == 0); | 167 DCHECK(segment_bytes_allocated_ == 0); |
200 } | 168 } |
201 | 169 |
202 | |
203 // Creates a new segment, sets it size, and pushes it to the front | 170 // Creates a new segment, sets it size, and pushes it to the front |
204 // of the segment chain. Returns the new segment. | 171 // of the segment chain. Returns the new segment. |
205 Segment* Zone::NewSegment(size_t size) { | 172 Segment* Zone::NewSegment(size_t size) { |
206 Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size)); | 173 Segment* result = allocator_->AllocateSegment(size); |
207 segment_bytes_allocated_ += size; | 174 segment_bytes_allocated_ += size; |
208 if (result != nullptr) { | 175 if (result != nullptr) { |
209 result->Initialize(segment_head_, size); | 176 result->Initialize(segment_head_, size, this); |
210 segment_head_ = result; | 177 segment_head_ = result; |
211 } | 178 } |
212 return result; | 179 return result; |
213 } | 180 } |
214 | 181 |
215 | |
216 // Deletes the given segment. Does not touch the segment chain. | |
217 void Zone::DeleteSegment(Segment* segment, size_t size) { | |
218 segment_bytes_allocated_ -= size; | |
219 allocator_->Free(segment, size); | |
220 } | |
221 | |
222 | |
223 Address Zone::NewExpand(size_t size) { | 182 Address Zone::NewExpand(size_t size) { |
224 // Make sure the requested size is already properly aligned and that | 183 // Make sure the requested size is already properly aligned and that |
225 // there isn't enough room in the Zone to satisfy the request. | 184 // there isn't enough room in the Zone to satisfy the request. |
226 DCHECK_EQ(size, RoundDown(size, kAlignment)); | 185 DCHECK_EQ(size, RoundDown(size, kAlignment)); |
227 DCHECK(limit_ < position_ || | 186 DCHECK(limit_ < position_ || |
228 reinterpret_cast<uintptr_t>(limit_) - | 187 reinterpret_cast<uintptr_t>(limit_) - |
229 reinterpret_cast<uintptr_t>(position_) < | 188 reinterpret_cast<uintptr_t>(position_) < |
230 size); | 189 size); |
231 | 190 |
232 // Compute the new segment size. We use a 'high water mark' | 191 // Compute the new segment size. We use a 'high water mark' |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
271 // size bytes + header and alignment padding) | 230 // size bytes + header and alignment padding) |
272 DCHECK(reinterpret_cast<uintptr_t>(position_) >= | 231 DCHECK(reinterpret_cast<uintptr_t>(position_) >= |
273 reinterpret_cast<uintptr_t>(result)); | 232 reinterpret_cast<uintptr_t>(result)); |
274 limit_ = segment->end(); | 233 limit_ = segment->end(); |
275 DCHECK(position_ <= limit_); | 234 DCHECK(position_ <= limit_); |
276 return result; | 235 return result; |
277 } | 236 } |
278 | 237 |
279 } // namespace internal | 238 } // namespace internal |
280 } // namespace v8 | 239 } // namespace v8 |
OLD | NEW |