Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(151)

Side by Side Diff: src/zone/zone.cc

Issue 2692473002: Merged: Ensure we align zone memory at 8 byte boundaries on all platforms (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/zone/zone.h ('k') | test/unittests/BUILD.gn » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/zone/zone.h" 5 #include "src/zone/zone.h"
6 6
7 #include <cstring> 7 #include <cstring>
8 8
9 #include "src/v8.h" 9 #include "src/v8.h"
10 10
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
55 Zone::~Zone() { 55 Zone::~Zone() {
56 allocator_->ZoneDestruction(this); 56 allocator_->ZoneDestruction(this);
57 57
58 DeleteAll(); 58 DeleteAll();
59 59
60 DCHECK(segment_bytes_allocated_ == 0); 60 DCHECK(segment_bytes_allocated_ == 0);
61 } 61 }
62 62
63 void* Zone::New(size_t size) { 63 void* Zone::New(size_t size) {
64 // Round up the requested size to fit the alignment. 64 // Round up the requested size to fit the alignment.
65 size = RoundUp(size, kAlignment); 65 size = RoundUp(size, kAlignmentInBytes);
66
67 // If the allocation size is divisible by 8 then we return an 8-byte aligned
68 // address.
69 if (kPointerSize == 4 && kAlignment == 4) {
70 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
71 } else {
72 DCHECK(kAlignment >= kPointerSize);
73 }
74 66
75 // Check if the requested size is available without expanding. 67 // Check if the requested size is available without expanding.
76 Address result = position_; 68 Address result = position_;
77 69
78 const size_t size_with_redzone = size + kASanRedzoneBytes; 70 const size_t size_with_redzone = size + kASanRedzoneBytes;
79 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); 71 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_);
80 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); 72 const uintptr_t position = reinterpret_cast<uintptr_t>(position_);
81 // position_ > limit_ can be true after the alignment correction above. 73 // position_ > limit_ can be true after the alignment correction above.
82 if (limit < position || size_with_redzone > limit - position) { 74 if (limit < position || size_with_redzone > limit - position) {
83 result = NewExpand(size_with_redzone); 75 result = NewExpand(size_with_redzone);
84 } else { 76 } else {
85 position_ += size_with_redzone; 77 position_ += size_with_redzone;
86 } 78 }
87 79
88 Address redzone_position = result + size; 80 Address redzone_position = result + size;
89 DCHECK(redzone_position + kASanRedzoneBytes == position_); 81 DCHECK(redzone_position + kASanRedzoneBytes == position_);
90 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); 82 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
91 83
92 // Check that the result has the proper alignment and return it. 84 // Check that the result has the proper alignment and return it.
93 DCHECK(IsAddressAligned(result, kAlignment, 0)); 85 DCHECK(IsAddressAligned(result, kAlignmentInBytes, 0));
94 allocation_size_ += size; 86 allocation_size_ += size;
95 return reinterpret_cast<void*>(result); 87 return reinterpret_cast<void*>(result);
96 } 88 }
97 89
98 void Zone::DeleteAll() { 90 void Zone::DeleteAll() {
99 // Traverse the chained list of segments and return them all to the allocator. 91 // Traverse the chained list of segments and return them all to the allocator.
100 for (Segment* current = segment_head_; current;) { 92 for (Segment* current = segment_head_; current;) {
101 Segment* next = current->next(); 93 Segment* next = current->next();
102 size_t size = current->size(); 94 size_t size = current->size();
103 95
(...skipping 20 matching lines...) Expand all
124 result->set_zone(this); 116 result->set_zone(this);
125 result->set_next(segment_head_); 117 result->set_next(segment_head_);
126 segment_head_ = result; 118 segment_head_ = result;
127 } 119 }
128 return result; 120 return result;
129 } 121 }
130 122
131 Address Zone::NewExpand(size_t size) { 123 Address Zone::NewExpand(size_t size) {
132 // Make sure the requested size is already properly aligned and that 124 // Make sure the requested size is already properly aligned and that
133 // there isn't enough room in the Zone to satisfy the request. 125 // there isn't enough room in the Zone to satisfy the request.
134 DCHECK_EQ(size, RoundDown(size, kAlignment)); 126 DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
135 DCHECK(limit_ < position_ || 127 DCHECK(limit_ < position_ ||
136 reinterpret_cast<uintptr_t>(limit_) - 128 reinterpret_cast<uintptr_t>(limit_) -
137 reinterpret_cast<uintptr_t>(position_) < 129 reinterpret_cast<uintptr_t>(position_) <
138 size); 130 size);
139 131
140 // Compute the new segment size. We use a 'high water mark' 132 // Compute the new segment size. We use a 'high water mark'
141 // strategy, where we increase the segment size every time we expand 133 // strategy, where we increase the segment size every time we expand
142 // except that we employ a maximum segment size when we delete. This 134 // except that we employ a maximum segment size when we delete. This
143 // is to avoid excessive malloc() and free() overhead. 135 // is to avoid excessive malloc() and free() overhead.
144 Segment* head = segment_head_; 136 Segment* head = segment_head_;
145 const size_t old_size = (head == nullptr) ? 0 : head->size(); 137 const size_t old_size = (head == nullptr) ? 0 : head->size();
146 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; 138 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
147 const size_t new_size_no_overhead = size + (old_size << 1); 139 const size_t new_size_no_overhead = size + (old_size << 1);
148 size_t new_size = kSegmentOverhead + new_size_no_overhead; 140 size_t new_size = kSegmentOverhead + new_size_no_overhead;
149 const size_t min_new_size = kSegmentOverhead + size; 141 const size_t min_new_size = kSegmentOverhead + size;
150 // Guard against integer overflow. 142 // Guard against integer overflow.
151 if (new_size_no_overhead < size || new_size < kSegmentOverhead) { 143 if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
152 V8::FatalProcessOutOfMemory("Zone"); 144 V8::FatalProcessOutOfMemory("Zone");
153 return nullptr; 145 return nullptr;
154 } 146 }
155 if (new_size < kMinimumSegmentSize) { 147 if (new_size < kMinimumSegmentSize) {
156 new_size = kMinimumSegmentSize; 148 new_size = kMinimumSegmentSize;
157 } else if (new_size > kMaximumSegmentSize) { 149 } else if (new_size > kMaximumSegmentSize) {
158 // Limit the size of new segments to avoid growing the segment size 150 // Limit the size of new segments to avoid growing the segment size
159 // exponentially, thus putting pressure on contiguous virtual address space. 151 // exponentially, thus putting pressure on contiguous virtual address space.
160 // All the while making sure to allocate a segment large enough to hold the 152 // All the while making sure to allocate a segment large enough to hold the
161 // requested size. 153 // requested size.
162 new_size = Max(min_new_size, kMaximumSegmentSize); 154 new_size = Max(min_new_size, kMaximumSegmentSize);
163 } 155 }
164 if (new_size > INT_MAX) { 156 if (new_size > INT_MAX) {
165 V8::FatalProcessOutOfMemory("Zone"); 157 V8::FatalProcessOutOfMemory("Zone");
166 return nullptr; 158 return nullptr;
167 } 159 }
168 Segment* segment = NewSegment(new_size); 160 Segment* segment = NewSegment(new_size);
169 if (segment == nullptr) { 161 if (segment == nullptr) {
170 V8::FatalProcessOutOfMemory("Zone"); 162 V8::FatalProcessOutOfMemory("Zone");
171 return nullptr; 163 return nullptr;
172 } 164 }
173 165
174 // Recompute 'top' and 'limit' based on the new segment. 166 // Recompute 'top' and 'limit' based on the new segment.
175 Address result = RoundUp(segment->start(), kAlignment); 167 Address result = RoundUp(segment->start(), kAlignmentInBytes);
176 position_ = result + size; 168 position_ = result + size;
177 // Check for address overflow. 169 // Check for address overflow.
178 // (Should not happen since the segment is guaranteed to accomodate 170 // (Should not happen since the segment is guaranteed to accomodate
179 // size bytes + header and alignment padding) 171 // size bytes + header and alignment padding)
180 DCHECK(reinterpret_cast<uintptr_t>(position_) >= 172 DCHECK(reinterpret_cast<uintptr_t>(position_) >=
181 reinterpret_cast<uintptr_t>(result)); 173 reinterpret_cast<uintptr_t>(result));
182 limit_ = segment->end(); 174 limit_ = segment->end();
183 DCHECK(position_ <= limit_); 175 DCHECK(position_ <= limit_);
184 return result; 176 return result;
185 } 177 }
186 178
187 } // namespace internal 179 } // namespace internal
188 } // namespace v8 180 } // namespace v8
OLDNEW
« no previous file with comments | « src/zone/zone.h ('k') | test/unittests/BUILD.gn » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698