OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/zone.h" | 5 #include "src/zone.h" |
6 | 6 |
7 #include <cstring> | 7 #include <cstring> |
8 | 8 |
9 #include "src/v8.h" | 9 #include "src/v8.h" |
10 | 10 |
(...skipping 25 matching lines...) Expand all Loading... | |
36 } while (false) | 36 } while (false) |
37 | 37 |
38 const size_t kASanRedzoneBytes = 0; | 38 const size_t kASanRedzoneBytes = 0; |
39 | 39 |
40 #endif // V8_USE_ADDRESS_SANITIZER | 40 #endif // V8_USE_ADDRESS_SANITIZER |
41 | 41 |
42 } // namespace | 42 } // namespace |
43 | 43 |
44 | 44 |
45 // Segments represent chunks of memory: They have starting address | 45 // Segments represent chunks of memory: They have starting address |
46 // (encoded in the this pointer) and a size in bytes. Segments are | 46 // (encoded in the this pointer) and a VirtualMemory instance. Segments are |
47 // chained together forming a LIFO structure with the newest segment | 47 // chained together forming a LIFO structure with the newest segment |
48 // available as segment_head_. Segments are allocated using malloc() | 48 // available as segment_head_. Segments are allocated aligned via the |
49 // and de-allocated using free(). | 49 // VirtualMemory instance and released using it. |
50 | 50 |
51 class Segment { | 51 class Segment { |
52 public: | 52 public: |
53 void Initialize(Segment* next, size_t size) { | 53 void Initialize(Zone* zone, v8::base::VirtualMemory* virtual_memory) { |
54 next_ = next; | 54 DCHECK_EQ(reinterpret_cast<uintptr_t>(this) & Zone::kSegmentAlignmentMask, |
55 size_ = size; | 55 reinterpret_cast<uintptr_t>(this)); |
56 | |
57 next_ = nullptr; | |
58 zone_ = zone; | |
59 virtual_memory_.Reset(); | |
60 virtual_memory_.TakeControl(virtual_memory); | |
56 } | 61 } |
57 | 62 |
63 Zone* zone() const { return zone_; } | |
58 Segment* next() const { return next_; } | 64 Segment* next() const { return next_; } |
59 void clear_next() { next_ = nullptr; } | 65 void set_next(Segment* const value) { next_ = value; } |
60 | 66 |
61 size_t size() const { return size_; } | 67 size_t size() const { return virtual_memory_.size(); } |
62 size_t capacity() const { return size_ - sizeof(Segment); } | 68 |
69 size_t capacity() const { return size() - sizeof(Segment); } | |
63 | 70 |
64 Address start() const { return address(sizeof(Segment)); } | 71 Address start() const { return address(sizeof(Segment)); } |
65 Address end() const { return address(size_); } | 72 Address end() const { return address(size()); } |
73 | |
74 bool is_big_object_segment() const { | |
75 return size() > Zone::kMaximumSegmentSize; | |
76 } | |
77 | |
78 void Release() { | |
79 v8::base::VirtualMemory vm = v8::base::VirtualMemory(); | |
80 vm.TakeControl(&virtual_memory_); | |
81 | |
82 #ifdef DEBUG | |
83 // Un-poison first so the zapping doesn't trigger ASan complaints. | |
84 ASAN_UNPOISON_MEMORY_REGION(vm.address(), vm.size()); | |
jochen (gone - plz use gerrit)
2016/09/02 11:53:42
that should also be independent of DEBUG
heimbuef
2016/09/05 12:38:14
Done.
| |
85 // Zap the entire current segment (including the header). | |
86 memset(vm.address(), kZapDeadByte, vm.size()); | |
jochen (gone - plz use gerrit)
2016/09/02 11:53:42
nit. do this memsetting whenever ENABLE_HANDLE_ZAP
heimbuef
2016/09/05 12:38:14
Done.
| |
87 #endif | |
88 | |
89 vm.Release(); | |
90 } | |
91 | |
92 void Reset() { | |
93 // Un-poison so neither the zapping not the reusing does trigger ASan | |
94 // complaints. | |
95 ASAN_UNPOISON_MEMORY_REGION(virtual_memory_.address(), | |
96 virtual_memory_.size()); | |
97 #ifdef DEBUG | |
98 // Zap the entire current segment (including the header). | |
99 memset(reinterpret_cast<void*>(start()), kZapDeadByte, capacity()); | |
100 #endif | |
101 next_ = nullptr; | |
102 } | |
66 | 103 |
67 private: | 104 private: |
105 #ifdef DEBUG | |
106 // Constant byte value used for zapping dead memory in debug mode. | |
107 static const unsigned char kZapDeadByte = 0xcd; | |
108 #endif | |
109 | |
68 // Computes the address of the nth byte in this segment. | 110 // Computes the address of the nth byte in this segment. |
69 Address address(size_t n) const { return Address(this) + n; } | 111 Address address(size_t n) const { return Address(this) + n; } |
70 | 112 |
113 Zone* zone_; | |
71 Segment* next_; | 114 Segment* next_; |
72 size_t size_; | 115 v8::base::VirtualMemory virtual_memory_; |
73 }; | 116 }; |
jochen (gone - plz use gerrit)
2016/09/02 11:53:42
DISALLOW_COPY_AND_ASSIGN(Segment)
heimbuef
2016/09/05 12:38:14
Done.
| |
74 | 117 |
75 Zone::Zone(base::AccountingAllocator* allocator) | 118 Zone::Zone(base::AccountingAllocator* allocator) |
76 : allocation_size_(0), | 119 : allocation_size_(0), |
77 segment_bytes_allocated_(0), | 120 segment_bytes_allocated_(0), |
78 position_(0), | 121 position_(0), |
79 limit_(0), | 122 limit_(0), |
80 allocator_(allocator), | 123 allocator_(allocator), |
81 segment_head_(nullptr) {} | 124 segment_head_(nullptr) {} |
82 | 125 |
83 Zone::~Zone() { | 126 Zone::~Zone() { |
84 DeleteAll(); | 127 DeleteAll(); |
85 DeleteKeptSegment(); | 128 DeleteKeptSegment(); |
86 | 129 |
87 DCHECK(segment_bytes_allocated_ == 0); | 130 DCHECK(segment_bytes_allocated_ == 0); |
88 } | 131 } |
89 | 132 |
133 Segment* Zone::GetZoneSegmentFromPointer(const void* ptr) { | |
134 return reinterpret_cast<Segment*>(reinterpret_cast<uintptr_t>(ptr) & | |
135 kSegmentAlignmentMask); | |
136 } | |
137 | |
138 Zone* Zone::GetZoneFromPointer(const void* ptr) { | |
139 return GetZoneSegmentFromPointer(ptr)->zone(); | |
140 } | |
90 | 141 |
91 void* Zone::New(size_t size) { | 142 void* Zone::New(size_t size) { |
143 Address result = position_; | |
144 | |
145 // corner case: zero size | |
jochen (gone - plz use gerrit)
2016/09/02 11:53:42
does this ever happen? malloc(0) would just return
heimbuef
2016/09/05 12:38:14
Yes, quite frequently. I want to guarantee that th
| |
146 if (size == 0) { | |
147 // there has to be a normal segment to reference | |
148 if (segment_head_ == nullptr || segment_head_->is_big_object_segment()) { | |
149 // we allocate a segment of minimal size | |
150 result = NewNormalSegment(kAlignment); | |
151 } | |
152 | |
153 DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); | |
154 DCHECK_EQ(GetZoneFromPointer(result), this); | |
155 return reinterpret_cast<void*>(result); | |
156 } | |
157 | |
158 // Large objects are a special case and get their own segment to live in. | |
159 if (CalculateSegmentSize(size) > kMaximumSegmentSize) { | |
160 return reinterpret_cast<void*>(NewLargeObjectSegment(size)); | |
161 DCHECK(GetZoneSegmentFromPointer(result)->is_big_object_segment()); | |
jochen (gone - plz use gerrit)
2016/09/02 11:53:42
dead code ^^^
heimbuef
2016/09/05 12:38:14
Done.
| |
162 } | |
163 | |
92 // Round up the requested size to fit the alignment. | 164 // Round up the requested size to fit the alignment. |
93 size = RoundUp(size, kAlignment); | 165 size = RoundUp(size, kAlignment); |
94 | 166 |
95 // If the allocation size is divisible by 8 then we return an 8-byte aligned | 167 // If the allocation size is divisible by 8 then we return an 8-byte aligned |
96 // address. | 168 // address. |
97 if (kPointerSize == 4 && kAlignment == 4) { | 169 if (kPointerSize == 4 && kAlignment == 4) { |
98 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); | 170 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); |
99 } else { | 171 } else { |
100 DCHECK(kAlignment >= kPointerSize); | 172 DCHECK(kAlignment >= kPointerSize); |
101 } | 173 } |
102 | 174 |
103 // Check if the requested size is available without expanding. | 175 // Check if the requested size is available without expanding. |
104 Address result = position_; | |
105 | 176 |
106 const size_t size_with_redzone = size + kASanRedzoneBytes; | 177 const size_t size_with_redzone = size + kASanRedzoneBytes; |
107 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); | 178 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); |
108 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); | 179 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); |
109 // position_ > limit_ can be true after the alignment correction above. | 180 // position_ > limit_ can be true after the alignment correction above. |
110 if (limit < position || size_with_redzone > limit - position) { | 181 if (limit < position || size_with_redzone > limit - position) { |
111 result = NewExpand(size_with_redzone); | 182 result = NewNormalSegment(size_with_redzone); |
112 } else { | 183 } else { |
113 position_ += size_with_redzone; | 184 position_ += size_with_redzone; |
114 } | 185 } |
115 | 186 |
116 Address redzone_position = result + size; | 187 Address redzone_position = result + size; |
117 DCHECK(redzone_position + kASanRedzoneBytes == position_); | 188 DCHECK(redzone_position + kASanRedzoneBytes == position_); |
118 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); | 189 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); |
119 | 190 |
120 // Check that the result has the proper alignment and return it. | 191 // Check that the result has the proper alignment and return it. |
121 DCHECK(IsAddressAligned(result, kAlignment, 0)); | 192 DCHECK(IsAddressAligned(result, kAlignment, 0)); |
193 DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment()); | |
194 DCHECK_EQ(GetZoneFromPointer(result), this); | |
122 allocation_size_ += size; | 195 allocation_size_ += size; |
123 return reinterpret_cast<void*>(result); | 196 return reinterpret_cast<void*>(result); |
124 } | 197 } |
125 | 198 |
126 | 199 |
127 void Zone::DeleteAll() { | 200 void Zone::DeleteAll() { |
128 #ifdef DEBUG | |
129 // Constant byte value used for zapping dead memory in debug mode. | |
130 static const unsigned char kZapDeadByte = 0xcd; | |
131 #endif | |
132 | |
133 // Find a segment with a suitable size to keep around. | 201 // Find a segment with a suitable size to keep around. |
134 Segment* keep = nullptr; | 202 Segment* keep = nullptr; |
135 // Traverse the chained list of segments, zapping (in debug mode) | 203 // Traverse the chained list of segments, zapping (in debug mode) |
136 // and freeing every segment except the one we wish to keep. | 204 // and freeing every segment except the one we wish to keep. |
137 for (Segment* current = segment_head_; current;) { | 205 for (Segment* current = segment_head_; current;) { |
138 Segment* next = current->next(); | 206 Segment* next = current->next(); |
139 if (!keep && current->size() <= kMaximumKeptSegmentSize) { | 207 if (!keep && current->size() <= kMaximumKeptSegmentSize) { |
140 // Unlink the segment we wish to keep from the list. | 208 // Unlink the segment we wish to keep from the list. |
141 keep = current; | 209 keep = current; |
142 keep->clear_next(); | 210 keep->Reset(); |
143 } else { | 211 } else { |
144 size_t size = current->size(); | 212 segment_bytes_allocated_ -= current->size(); |
145 #ifdef DEBUG | 213 allocator_->ChangeCurrentMemoryUsage( |
146 // Un-poison first so the zapping doesn't trigger ASan complaints. | 214 -static_cast<int64_t>(current->size())); |
147 ASAN_UNPOISON_MEMORY_REGION(current, size); | 215 current->Release(); |
148 // Zap the entire current segment (including the header). | |
149 memset(current, kZapDeadByte, size); | |
150 #endif | |
151 DeleteSegment(current, size); | |
152 } | 216 } |
153 current = next; | 217 current = next; |
154 } | 218 } |
155 | 219 |
156 // If we have found a segment we want to keep, we must recompute the | 220 // If we have found a segment we want to keep, we must recompute the |
157 // variables 'position' and 'limit' to prepare for future allocate | 221 // variables 'position' and 'limit' to prepare for future allocate |
158 // attempts. Otherwise, we must clear the position and limit to | 222 // attempts. Otherwise, we must clear the position and limit to |
159 // force a new segment to be allocated on demand. | 223 // force a new segment to be allocated on demand. |
160 if (keep) { | 224 if (keep) { |
161 Address start = keep->start(); | 225 Address start = keep->start(); |
162 position_ = RoundUp(start, kAlignment); | 226 position_ = RoundUp(start, kAlignment); |
163 limit_ = keep->end(); | 227 limit_ = keep->end(); |
164 // Un-poison so we can re-use the segment later. | |
165 ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); | |
166 #ifdef DEBUG | |
167 // Zap the contents of the kept segment (but not the header). | |
168 memset(start, kZapDeadByte, keep->capacity()); | |
169 #endif | |
170 } else { | 228 } else { |
171 position_ = limit_ = 0; | 229 position_ = limit_ = 0; |
172 } | 230 } |
173 | 231 |
174 allocation_size_ = 0; | 232 allocation_size_ = 0; |
175 // Update the head segment to be the kept segment (if any). | 233 // Update the head segment to be the kept segment (if any). |
176 segment_head_ = keep; | 234 segment_head_ = keep; |
177 } | 235 } |
178 | 236 |
179 | 237 |
180 void Zone::DeleteKeptSegment() { | 238 void Zone::DeleteKeptSegment() { |
181 #ifdef DEBUG | |
182 // Constant byte value used for zapping dead memory in debug mode. | |
183 static const unsigned char kZapDeadByte = 0xcd; | |
184 #endif | |
185 | |
186 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); | 239 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); |
187 if (segment_head_ != nullptr) { | 240 if (segment_head_ != nullptr) { |
188 size_t size = segment_head_->size(); | 241 segment_bytes_allocated_ -= segment_head_->size(); |
189 #ifdef DEBUG | 242 allocator_->ChangeCurrentMemoryUsage( |
190 // Un-poison first so the zapping doesn't trigger ASan complaints. | 243 -static_cast<int64_t>(segment_head_->size())); |
191 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); | 244 segment_head_->Release(); |
192 // Zap the entire kept segment (including the header). | |
193 memset(segment_head_, kZapDeadByte, size); | |
194 #endif | |
195 DeleteSegment(segment_head_, size); | |
196 segment_head_ = nullptr; | 245 segment_head_ = nullptr; |
197 } | 246 } |
198 | 247 |
199 DCHECK(segment_bytes_allocated_ == 0); | 248 DCHECK(segment_bytes_allocated_ == 0); |
200 } | 249 } |
201 | 250 |
202 | 251 |
203 // Creates a new segment, sets it size, and pushes it to the front | |
204 // of the segment chain. Returns the new segment. | |
205 Segment* Zone::NewSegment(size_t size) { | 252 Segment* Zone::NewSegment(size_t size) { |
206 Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size)); | 253 v8::base::VirtualMemory vm(size, kSegmentAlignmentSize); |
207 segment_bytes_allocated_ += size; | 254 |
208 if (result != nullptr) { | 255 if (vm.IsReserved()) { |
209 result->Initialize(segment_head_, size); | 256 DCHECK_EQ(reinterpret_cast<uintptr_t>(vm.address()) & kSegmentAlignmentMask, |
210 segment_head_ = result; | 257 reinterpret_cast<uintptr_t>(vm.address())); |
258 DCHECK_EQ(vm.size(), size); | |
259 v8::base::VirtualMemory::CommitRegion(vm.address(), vm.size(), false); | |
260 | |
261 Segment* result = reinterpret_cast<Segment*>(vm.address()); | |
262 | |
263 result->Initialize(this, &vm); | |
264 | |
265 segment_bytes_allocated_ += result->size(); | |
266 allocator_->ChangeCurrentMemoryUsage(result->size()); | |
267 | |
268 return result; | |
269 } else { | |
270 V8::FatalProcessOutOfMemory("Zone"); | |
271 return nullptr; | |
211 } | 272 } |
273 } | |
274 | |
275 Address Zone::NewLargeObjectSegment(size_t size) { | |
276 size_t new_size = CalculateSegmentSize(size); | |
277 Segment* segment = NewSegment(new_size); | |
278 | |
279 if (segment_head_ == nullptr) { | |
280 // corner case in which a large object segment becomes the head | |
281 // of the segment list. | |
282 segment_head_ = segment; | |
283 } else { | |
284 // large object segments should be inserted second into the list | |
285 segment->set_next(segment_head_->next()); | |
286 segment_head_->set_next(segment); | |
287 } | |
288 | |
289 Address result = RoundUp(segment->start(), kAlignment); | |
290 DCHECK_EQ(GetZoneFromPointer(segment), this); | |
291 DCHECK_EQ(GetZoneFromPointer(result), this); | |
212 return result; | 292 return result; |
213 } | 293 } |
214 | 294 |
215 | 295 Address Zone::NewNormalSegment(size_t size) { |
216 // Deletes the given segment. Does not touch the segment chain. | |
217 void Zone::DeleteSegment(Segment* segment, size_t size) { | |
218 segment_bytes_allocated_ -= size; | |
219 allocator_->Free(segment, size); | |
220 } | |
221 | |
222 | |
223 Address Zone::NewExpand(size_t size) { | |
224 // Make sure the requested size is already properly aligned and that | 296 // Make sure the requested size is already properly aligned and that |
225 // there isn't enough room in the Zone to satisfy the request. | 297 // there isn't enough room in the Zone to satisfy the request. |
226 DCHECK_EQ(size, RoundDown(size, kAlignment)); | 298 DCHECK_EQ(size, RoundDown(size, kAlignment)); |
227 DCHECK(limit_ < position_ || | 299 DCHECK(limit_ < position_ || |
228 reinterpret_cast<uintptr_t>(limit_) - | 300 reinterpret_cast<uintptr_t>(limit_) - |
229 reinterpret_cast<uintptr_t>(position_) < | 301 reinterpret_cast<uintptr_t>(position_) < |
230 size); | 302 size); |
231 | 303 |
232 // Compute the new segment size. We use a 'high water mark' | 304 // Only normal segments here |
233 // strategy, where we increase the segment size every time we expand | 305 DCHECK_LE(size, kMaximumSegmentSize + 0); |
234 // except that we employ a maximum segment size when we delete. This | 306 |
235 // is to avoid excessive malloc() and free() overhead. | 307 size_t new_size = CalculateSegmentSize(size); |
236 Segment* head = segment_head_; | 308 const size_t old_size = |
237 const size_t old_size = (head == nullptr) ? 0 : head->size(); | 309 (segment_head_ == nullptr) ? 0 : segment_head_->size(); |
238 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; | 310 new_size = Max(new_size, old_size << 1); |
239 const size_t new_size_no_overhead = size + (old_size << 1); | 311 new_size = Min(new_size, kMaximumSegmentSize); |
240 size_t new_size = kSegmentOverhead + new_size_no_overhead; | 312 |
241 const size_t min_new_size = kSegmentOverhead + size; | 313 // Rounding up shall not mess with our limits |
242 // Guard against integer overflow. | 314 DCHECK_LE(new_size, kMaximumSegmentSize + 0); |
243 if (new_size_no_overhead < size || new_size < kSegmentOverhead) { | 315 |
244 V8::FatalProcessOutOfMemory("Zone"); | |
245 return nullptr; | |
246 } | |
247 if (new_size < kMinimumSegmentSize) { | |
248 new_size = kMinimumSegmentSize; | |
249 } else if (new_size > kMaximumSegmentSize) { | |
250 // Limit the size of new segments to avoid growing the segment size | |
251 // exponentially, thus putting pressure on contiguous virtual address space. | |
252 // All the while making sure to allocate a segment large enough to hold the | |
253 // requested size. | |
254 new_size = Max(min_new_size, kMaximumSegmentSize); | |
255 } | |
256 if (new_size > INT_MAX) { | |
257 V8::FatalProcessOutOfMemory("Zone"); | |
258 return nullptr; | |
259 } | |
260 Segment* segment = NewSegment(new_size); | 316 Segment* segment = NewSegment(new_size); |
261 if (segment == nullptr) { | 317 |
262 V8::FatalProcessOutOfMemory("Zone"); | 318 // Put in front of the segment list |
263 return nullptr; | 319 segment->set_next(segment_head_); |
264 } | 320 segment_head_ = segment; |
321 | |
322 // Normal segments must not be bigger than the alignment size | |
323 DCHECK_LE(segment->size(), kSegmentAlignmentSize + 0); | |
jochen (gone - plz use gerrit)
2016/09/02 11:53:42
why + 0?
heimbuef
2016/09/05 12:38:14
The Macro will not compile with a constant, but wi
| |
265 | 324 |
266 // Recompute 'top' and 'limit' based on the new segment. | 325 // Recompute 'top' and 'limit' based on the new segment. |
267 Address result = RoundUp(segment->start(), kAlignment); | 326 Address result = RoundUp(segment->start(), kAlignment); |
268 position_ = result + size; | 327 position_ = result + size; |
269 // Check for address overflow. | 328 // Check for address overflow. |
270 // (Should not happen since the segment is guaranteed to accomodate | 329 // (Should not happen since the segment is guaranteed to accomodate |
271 // size bytes + header and alignment padding) | 330 // size bytes + header and alignment padding) |
272 DCHECK(reinterpret_cast<uintptr_t>(position_) >= | 331 DCHECK_GE(reinterpret_cast<uintptr_t>(position_), |
273 reinterpret_cast<uintptr_t>(result)); | 332 reinterpret_cast<uintptr_t>(result)); |
333 DCHECK_EQ(GetZoneFromPointer(segment), this); | |
334 DCHECK_EQ(GetZoneFromPointer(result), this); | |
335 DCHECK_EQ(GetZoneFromPointer(segment->end() - 1), this); | |
274 limit_ = segment->end(); | 336 limit_ = segment->end(); |
275 DCHECK(position_ <= limit_); | 337 DCHECK(position_ <= limit_); |
276 return result; | 338 return result; |
277 } | 339 } |
278 | 340 |
341 size_t Zone::CalculateSegmentSize(const size_t requested) { | |
342 if (requested > INT_MAX) { | |
jochen (gone - plz use gerrit)
2016/09/02 11:53:42
why? isn't INT_MAX 2^31 - 1 on Windows, even on 64
heimbuef
2016/09/05 12:38:14
Done.
| |
343 V8::FatalProcessOutOfMemory("Zone"); | |
344 } | |
345 | |
346 return RoundUp(requested + sizeof(Segment) + kAlignment, kMinimumSegmentSize); | |
347 } | |
348 | |
279 } // namespace internal | 349 } // namespace internal |
280 } // namespace v8 | 350 } // namespace v8 |
OLD | NEW |