Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(273)

Side by Side Diff: src/zone.cc

Issue 2299753002: Made zone segments aligned in memory and included a pointer to the zone in the header. Larger objec…
Patch Set: Fix for windows Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/zone.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/zone.h" 5 #include "src/zone.h"
6 6
7 #include <cstring> 7 #include <cstring>
8 8
9 #include "src/v8.h" 9 #include "src/v8.h"
10 10
(...skipping 25 matching lines...) Expand all
36 } while (false) 36 } while (false)
37 37
38 const size_t kASanRedzoneBytes = 0; 38 const size_t kASanRedzoneBytes = 0;
39 39
40 #endif // V8_USE_ADDRESS_SANITIZER 40 #endif // V8_USE_ADDRESS_SANITIZER
41 41
42 } // namespace 42 } // namespace
43 43
44 44
45 // Segments represent chunks of memory: They have starting address 45 // Segments represent chunks of memory: They have starting address
46 // (encoded in the this pointer) and a size in bytes. Segments are 46 // (encoded in the this pointer) and a VirtualMemory instance. Segments are
47 // chained together forming a LIFO structure with the newest segment 47 // chained together forming a LIFO structure with the newest segment
48 // available as segment_head_. Segments are allocated using malloc() 48 // available as segment_head_. Segments are allocated aligned via the
49 // and de-allocated using free(). 49 // VirtualMemory instance and released using it.
50 50
51 class Segment { 51 class Segment {
52 public: 52 public:
53 void Initialize(Segment* next, size_t size) { 53 void Initialize(Zone* zone, v8::base::VirtualMemory* virtual_memory,
54 next_ = next; 54 size_t size) {
55 DCHECK_EQ(reinterpret_cast<uintptr_t>(this) & Zone::kSegmentAlignmentMask,
56 reinterpret_cast<uintptr_t>(this));
57
58 next_ = nullptr;
59 zone_ = zone;
60 virtual_memory_.Reset();
61 virtual_memory_.TakeControl(virtual_memory);
55 size_ = size; 62 size_ = size;
56 } 63 }
57 64
65 Zone* zone() const { return zone_; }
58 Segment* next() const { return next_; } 66 Segment* next() const { return next_; }
59 void clear_next() { next_ = nullptr; } 67 void set_next(Segment* const value) { next_ = value; }
60 68
61 size_t size() const { return size_; } 69 size_t size() const { return size_; }
62 size_t capacity() const { return size_ - sizeof(Segment); } 70
71 size_t capacity() const { return size() - sizeof(Segment); }
63 72
64 Address start() const { return address(sizeof(Segment)); } 73 Address start() const { return address(sizeof(Segment)); }
65 Address end() const { return address(size_); } 74 Address end() const { return address(size()); }
75
76 bool is_big_object_segment() const {
77 return size() > Zone::kMaximumSegmentSize;
78 }
79
80 void Release() {
81 #ifdef ENABLE_HANDLE_ZAPPING
82 // We are going to zap the memory the segment is stored in, so we
83 // need to save the virtual memory information to be able to release
84 // it.
85 v8::base::VirtualMemory vm = v8::base::VirtualMemory();
86 vm.TakeControl(&virtual_memory_);
87 // Un-poison first so the zapping doesn't trigger ASan complaints.
88 ASAN_UNPOISON_MEMORY_REGION(vm.address(), vm.size());
89 // Zap the entire current segment (including the header).
90 memset(vm.address(), kZapDeadByte, vm.size());
91
92 vm.Release();
93 #else
94 virtual_memory_.Release();
95 #endif
96 }
97
98 void Reset() {
99 // Un-poison so neither the zapping not the reusing does trigger ASan
100 // complaints.
101 ASAN_UNPOISON_MEMORY_REGION(virtual_memory_.address(),
102 virtual_memory_.size());
103 #ifdef ENABLE_HANDLE_ZAPPING
104 // Zap the entire current segment (including the header).
105 memset(reinterpret_cast<void*>(start()), kZapDeadByte, capacity());
106 #endif
107 next_ = nullptr;
108 }
66 109
67 private: 110 private:
111 #ifdef ENABLE_HANDLE_ZAPPING
112 // Constant byte value used for zapping dead memory in debug mode.
113 static const unsigned char kZapDeadByte = 0xcd;
114 #endif
115
68 // Computes the address of the nth byte in this segment. 116 // Computes the address of the nth byte in this segment.
69 Address address(size_t n) const { return Address(this) + n; } 117 Address address(size_t n) const { return Address(this) + n; }
70 118
119 Zone* zone_;
71 Segment* next_; 120 Segment* next_;
121 v8::base::VirtualMemory virtual_memory_;
122
123 // The beginning of the aligned memory of the segment.
Jakob Kummerow 2016/09/05 16:04:04 This comment does not match the field name.
heimbuef 2016/09/07 11:54:22 Done.
72 size_t size_; 124 size_t size_;
125
126 DISALLOW_COPY_AND_ASSIGN(Segment);
73 }; 127 };
74 128
75 Zone::Zone(base::AccountingAllocator* allocator) 129 Zone::Zone(base::AccountingAllocator* allocator)
76 : allocation_size_(0), 130 : allocation_size_(0),
77 segment_bytes_allocated_(0), 131 segment_bytes_allocated_(0),
78 position_(0), 132 position_(0),
79 limit_(0), 133 limit_(0),
80 allocator_(allocator), 134 allocator_(allocator),
81 segment_head_(nullptr) {} 135 segment_head_(nullptr) {}
82 136
83 Zone::~Zone() { 137 Zone::~Zone() {
84 DeleteAll(); 138 DeleteAll();
85 DeleteKeptSegment(); 139 DeleteKeptSegment();
86 140
87 DCHECK(segment_bytes_allocated_ == 0); 141 DCHECK(segment_bytes_allocated_ == 0);
88 } 142 }
89 143
144 Segment* Zone::GetZoneSegmentFromPointer(const void* ptr) {
145 return reinterpret_cast<Segment*>(reinterpret_cast<uintptr_t>(ptr) &
146 kSegmentAlignmentMask);
147 }
148
149 Zone* Zone::GetZoneFromPointer(const void* ptr) {
150 return GetZoneSegmentFromPointer(ptr)->zone();
151 }
90 152
91 void* Zone::New(size_t size) { 153 void* Zone::New(size_t size) {
92 // Round up the requested size to fit the alignment. 154 // Round up the requested size to fit the alignment.
93 size = RoundUp(size, kAlignment); 155 size = RoundUp(size, kAlignment);
94 156
95 // If the allocation size is divisible by 8 then we return an 8-byte aligned 157 // If the allocation size is divisible by 8 then we return an 8-byte aligned
96 // address. 158 // address.
97 if (kPointerSize == 4 && kAlignment == 4) { 159 if (kPointerSize == 4 && kAlignment == 4) {
98 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); 160 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
99 } else { 161 } else {
100 DCHECK(kAlignment >= kPointerSize); 162 DCHECK(kAlignment >= kPointerSize);
101 } 163 }
102 164
103 // Check if the requested size is available without expanding. 165 // Check if the requested size is available without expanding.
104 Address result = position_; 166 Address result = position_;
105 167
168 // In case the requested size is zero, we still want to return a pointer
169 // to a valid segment, so the zone is obtainable from it.
170 if (size == 0) {
171 // there has to be a normal segment to reference
172 if (segment_head_ == nullptr || segment_head_->is_big_object_segment()) {
173 // We create a segment of minimal size.
174 result = NewNormalSegment(kAlignment);
175 }
176
177 DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment());
178 DCHECK_EQ(GetZoneFromPointer(result), this);
179 return reinterpret_cast<void*>(result);
180 }
181
182 // Large objects are a special case and get their own segment to live in.
183 if (CalculateSegmentSize(size) > kMaximumSegmentSize) {
184 result = NewLargeObjectSegment(size);
185 DCHECK(GetZoneSegmentFromPointer(result)->is_big_object_segment());
186 return reinterpret_cast<void*>(result);
187 }
188
106 const size_t size_with_redzone = size + kASanRedzoneBytes; 189 const size_t size_with_redzone = size + kASanRedzoneBytes;
107 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); 190 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_);
108 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); 191 const uintptr_t position = reinterpret_cast<uintptr_t>(position_);
109 // position_ > limit_ can be true after the alignment correction above. 192 // position_ > limit_ can be true after the alignment correction above.
110 if (limit < position || size_with_redzone > limit - position) { 193 if (limit < position || size_with_redzone > limit - position) {
111 result = NewExpand(size_with_redzone); 194 result = NewNormalSegment(size_with_redzone);
112 } else { 195 } else {
113 position_ += size_with_redzone; 196 position_ += size_with_redzone;
114 } 197 }
115 198
116 Address redzone_position = result + size; 199 Address redzone_position = result + size;
117 DCHECK(redzone_position + kASanRedzoneBytes == position_); 200 DCHECK_EQ(redzone_position + kASanRedzoneBytes, position_);
118 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); 201 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
119 202
120 // Check that the result has the proper alignment and return it. 203 // Check that the result has the proper alignment and return it.
121 DCHECK(IsAddressAligned(result, kAlignment, 0)); 204 DCHECK(IsAddressAligned(result, kAlignment, 0));
205 DCHECK(!GetZoneSegmentFromPointer(result)->is_big_object_segment());
206 DCHECK_EQ(GetZoneFromPointer(result), this);
122 allocation_size_ += size; 207 allocation_size_ += size;
123 return reinterpret_cast<void*>(result); 208 return reinterpret_cast<void*>(result);
124 } 209 }
125 210
126 211
127 void Zone::DeleteAll() { 212 void Zone::DeleteAll() {
128 #ifdef DEBUG
129 // Constant byte value used for zapping dead memory in debug mode.
130 static const unsigned char kZapDeadByte = 0xcd;
131 #endif
132
133 // Find a segment with a suitable size to keep around. 213 // Find a segment with a suitable size to keep around.
134 Segment* keep = nullptr; 214 Segment* keep = nullptr;
135 // Traverse the chained list of segments, zapping (in debug mode) 215 // Traverse the chained list of segments, zapping (in debug mode)
136 // and freeing every segment except the one we wish to keep. 216 // and freeing every segment except the one we wish to keep.
137 for (Segment* current = segment_head_; current;) { 217 for (Segment* current = segment_head_; current;) {
138 Segment* next = current->next(); 218 Segment* next = current->next();
139 if (!keep && current->size() <= kMaximumKeptSegmentSize) { 219 if (!keep && current->size() <= kMaximumKeptSegmentSize) {
140 // Unlink the segment we wish to keep from the list. 220 // Unlink the segment we wish to keep from the list.
141 keep = current; 221 keep = current;
142 keep->clear_next(); 222 keep->Reset();
143 } else { 223 } else {
144 size_t size = current->size(); 224 segment_bytes_allocated_ -= current->size();
145 #ifdef DEBUG 225 allocator_->ChangeCurrentMemoryUsage(
146 // Un-poison first so the zapping doesn't trigger ASan complaints. 226 -static_cast<int64_t>(current->size()));
147 ASAN_UNPOISON_MEMORY_REGION(current, size); 227 current->Release();
148 // Zap the entire current segment (including the header).
149 memset(current, kZapDeadByte, size);
150 #endif
151 DeleteSegment(current, size);
152 } 228 }
153 current = next; 229 current = next;
154 } 230 }
155 231
156 // If we have found a segment we want to keep, we must recompute the 232 // If we have found a segment we want to keep, we must recompute the
157 // variables 'position' and 'limit' to prepare for future allocate 233 // variables 'position' and 'limit' to prepare for future allocate
158 // attempts. Otherwise, we must clear the position and limit to 234 // attempts. Otherwise, we must clear the position and limit to
159 // force a new segment to be allocated on demand. 235 // force a new segment to be allocated on demand.
160 if (keep) { 236 if (keep) {
161 Address start = keep->start(); 237 Address start = keep->start();
162 position_ = RoundUp(start, kAlignment); 238 position_ = RoundUp(start, kAlignment);
163 limit_ = keep->end(); 239 limit_ = keep->end();
164 // Un-poison so we can re-use the segment later.
165 ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
166 #ifdef DEBUG
167 // Zap the contents of the kept segment (but not the header).
168 memset(start, kZapDeadByte, keep->capacity());
169 #endif
170 } else { 240 } else {
171 position_ = limit_ = 0; 241 position_ = limit_ = 0;
172 } 242 }
173 243
174 allocation_size_ = 0; 244 allocation_size_ = 0;
175 // Update the head segment to be the kept segment (if any). 245 // Update the head segment to be the kept segment (if any).
176 segment_head_ = keep; 246 segment_head_ = keep;
177 } 247 }
178 248
179 249
180 void Zone::DeleteKeptSegment() { 250 void Zone::DeleteKeptSegment() {
181 #ifdef DEBUG
182 // Constant byte value used for zapping dead memory in debug mode.
183 static const unsigned char kZapDeadByte = 0xcd;
184 #endif
185
186 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); 251 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
187 if (segment_head_ != nullptr) { 252 if (segment_head_ != nullptr) {
188 size_t size = segment_head_->size(); 253 segment_bytes_allocated_ -= segment_head_->size();
189 #ifdef DEBUG 254 allocator_->ChangeCurrentMemoryUsage(
190 // Un-poison first so the zapping doesn't trigger ASan complaints. 255 -static_cast<int64_t>(segment_head_->size()));
191 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); 256 segment_head_->Release();
192 // Zap the entire kept segment (including the header).
193 memset(segment_head_, kZapDeadByte, size);
194 #endif
195 DeleteSegment(segment_head_, size);
196 segment_head_ = nullptr; 257 segment_head_ = nullptr;
197 } 258 }
198 259
199 DCHECK(segment_bytes_allocated_ == 0); 260 DCHECK(segment_bytes_allocated_ == 0);
200 } 261 }
201 262
202 263
203 // Creates a new segment, sets it size, and pushes it to the front
204 // of the segment chain. Returns the new segment.
205 Segment* Zone::NewSegment(size_t size) { 264 Segment* Zone::NewSegment(size_t size) {
206 Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size)); 265 v8::base::VirtualMemory vm(size, kSegmentAlignmentSize);
207 segment_bytes_allocated_ += size; 266
208 if (result != nullptr) { 267 if (!vm.IsReserved()) {
209 result->Initialize(segment_head_, size); 268 V8::FatalProcessOutOfMemory("Zone");
210 segment_head_ = result; 269 return nullptr;
211 } 270 }
271
272 auto base = reinterpret_cast<uintptr_t>(vm.address()) & kSegmentAlignmentMask;
Jakob Kummerow 2016/09/05 16:04:04 don't use auto
heimbuef 2016/09/07 11:54:22 Done.
273
274 // On Windows, the address can actually be off.
Jakob Kummerow 2016/09/05 16:04:04 it's not exactly *off*... // On Windows, VirtualM
heimbuef 2016/09/07 11:54:22 Done.
275 if (base != reinterpret_cast<uintptr_t>(vm.address())) {
276 // Address is not aligned.
277 base += kSegmentAlignmentSize;
278 }
279
280 // The address of the end of the virtual memory
281 auto end = reinterpret_cast<uintptr_t>(vm.address()) + vm.size();
Jakob Kummerow 2016/09/05 16:04:04 don't use auto
heimbuef 2016/09/07 11:54:22 Done.
282
283 // Check whether the virtual memory is big enough to fit our aligned chunk.
284 DCHECK_LE(base + size, end);
285
286 // In case the virtual memory is too big, we want to use as much of it as
287 // possible. In normal segments, the segment alignment size is the upper
288 // limit.
289 if (size <= kSegmentAlignmentSize) {
290 size = Min(end - base, kSegmentAlignmentSize);
291 }
292
293 if (!v8::base::VirtualMemory::CommitRegion(reinterpret_cast<void*>(base),
294 size, false)) {
295 V8::FatalProcessOutOfMemory("Zone");
296 return nullptr;
297 }
298
299 Segment* result = reinterpret_cast<Segment*>(base);
300
301 result->Initialize(this, &vm, size);
302
303 segment_bytes_allocated_ += result->size();
304 allocator_->ChangeCurrentMemoryUsage(result->size());
305
212 return result; 306 return result;
213 } 307 }
214 308
309 Address Zone::NewLargeObjectSegment(size_t size) {
310 size_t new_size = CalculateSegmentSize(size);
311 Segment* segment = NewSegment(new_size);
215 312
216 // Deletes the given segment. Does not touch the segment chain. 313 if (segment_head_ == nullptr) {
217 void Zone::DeleteSegment(Segment* segment, size_t size) { 314 // This is the only case in which a large object segment becomes head of
218 segment_bytes_allocated_ -= size; 315 // the segment list.
219 allocator_->Free(segment, size); 316 segment_head_ = segment;
317 } else {
318 // Large object segments should be inserted second into the list when
319 // possible.
320 segment->set_next(segment_head_->next());
321 segment_head_->set_next(segment);
322 }
323
324 Address result = RoundUp(segment->start(), kAlignment);
325 DCHECK_EQ(GetZoneFromPointer(segment), this);
326 DCHECK_EQ(GetZoneFromPointer(result), this);
327 return result;
220 } 328 }
221 329
222 330 Address Zone::NewNormalSegment(size_t size) {
223 Address Zone::NewExpand(size_t size) {
224 // Make sure the requested size is already properly aligned and that 331 // Make sure the requested size is already properly aligned and that
225 // there isn't enough room in the Zone to satisfy the request. 332 // there isn't enough room in the Zone to satisfy the request.
226 DCHECK_EQ(size, RoundDown(size, kAlignment)); 333 DCHECK_EQ(size, RoundDown(size, kAlignment));
227 DCHECK(limit_ < position_ || 334 DCHECK(limit_ < position_ ||
228 reinterpret_cast<uintptr_t>(limit_) - 335 reinterpret_cast<uintptr_t>(limit_) -
229 reinterpret_cast<uintptr_t>(position_) < 336 reinterpret_cast<uintptr_t>(position_) <
230 size); 337 size);
231 338
232 // Compute the new segment size. We use a 'high water mark' 339 DCHECK_LE(size, kMaximumSegmentSize + 0);
233 // strategy, where we increase the segment size every time we expand 340
234 // except that we employ a maximum segment size when we delete. This 341 size_t new_size = CalculateSegmentSize(size);
235 // is to avoid excessive malloc() and free() overhead. 342 const size_t old_size =
236 Segment* head = segment_head_; 343 (segment_head_ == nullptr) ? 0 : segment_head_->size();
237 const size_t old_size = (head == nullptr) ? 0 : head->size(); 344 new_size = Max(new_size, old_size << 1);
238 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; 345 new_size = Min(new_size, kMaximumSegmentSize);
239 const size_t new_size_no_overhead = size + (old_size << 1); 346
240 size_t new_size = kSegmentOverhead + new_size_no_overhead; 347 DCHECK_LE(new_size, kMaximumSegmentSize + 0);
241 const size_t min_new_size = kSegmentOverhead + size; 348
242 // Guard against integer overflow.
243 if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
244 V8::FatalProcessOutOfMemory("Zone");
245 return nullptr;
246 }
247 if (new_size < kMinimumSegmentSize) {
248 new_size = kMinimumSegmentSize;
249 } else if (new_size > kMaximumSegmentSize) {
250 // Limit the size of new segments to avoid growing the segment size
251 // exponentially, thus putting pressure on contiguous virtual address space.
252 // All the while making sure to allocate a segment large enough to hold the
253 // requested size.
254 new_size = Max(min_new_size, kMaximumSegmentSize);
255 }
256 if (new_size > INT_MAX) {
257 V8::FatalProcessOutOfMemory("Zone");
258 return nullptr;
259 }
260 Segment* segment = NewSegment(new_size); 349 Segment* segment = NewSegment(new_size);
261 if (segment == nullptr) { 350
262 V8::FatalProcessOutOfMemory("Zone"); 351 // Put segment in front of the segment list.
263 return nullptr; 352 segment->set_next(segment_head_);
264 } 353 segment_head_ = segment;
354
355 // Normal segments must not be bigger than the alignment size.
356 DCHECK_LE(segment->size(), kSegmentAlignmentSize + 0);
265 357
266 // Recompute 'top' and 'limit' based on the new segment. 358 // Recompute 'top' and 'limit' based on the new segment.
267 Address result = RoundUp(segment->start(), kAlignment); 359 Address result = RoundUp(segment->start(), kAlignment);
268 position_ = result + size; 360 position_ = result + size;
269 // Check for address overflow. 361 // Check for address overflow.
270 // (Should not happen since the segment is guaranteed to accomodate 362 // (Should not happen since the segment is guaranteed to accomodate
271 // size bytes + header and alignment padding) 363 // size bytes + header and alignment padding)
272 DCHECK(reinterpret_cast<uintptr_t>(position_) >= 364 DCHECK_GE(reinterpret_cast<uintptr_t>(position_),
273 reinterpret_cast<uintptr_t>(result)); 365 reinterpret_cast<uintptr_t>(result));
366 DCHECK_EQ(GetZoneFromPointer(segment), this);
367 DCHECK_EQ(GetZoneFromPointer(result), this);
368 DCHECK_EQ(GetZoneFromPointer(segment->end() - 1), this);
274 limit_ = segment->end(); 369 limit_ = segment->end();
275 DCHECK(position_ <= limit_); 370 DCHECK(position_ <= limit_);
276 return result; 371 return result;
277 } 372 }
278 373
374 size_t Zone::CalculateSegmentSize(const size_t requested) {
375 if (UINTPTR_MAX - (sizeof(Segment) + kAlignment) < requested) {
376 V8::FatalProcessOutOfMemory("Zone");
377 }
378
379 return RoundUp(requested + sizeof(Segment) + kAlignment, kMinimumSegmentSize);
380 }
381
279 } // namespace internal 382 } // namespace internal
280 } // namespace v8 383 } // namespace v8
OLDNEW
« no previous file with comments | « src/zone.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698