OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/zone/zone.h" | 5 #include "src/zone/zone.h" |
6 | 6 |
7 #include <cstring> | 7 #include <cstring> |
8 | 8 |
9 #include "src/utils.h" | 9 #include "src/utils.h" |
10 #include "src/v8.h" | 10 #include "src/v8.h" |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
56 Zone::~Zone() { | 56 Zone::~Zone() { |
57 allocator_->ZoneDestruction(this); | 57 allocator_->ZoneDestruction(this); |
58 | 58 |
59 DeleteAll(); | 59 DeleteAll(); |
60 | 60 |
61 DCHECK(segment_bytes_allocated_ == 0); | 61 DCHECK(segment_bytes_allocated_ == 0); |
62 } | 62 } |
63 | 63 |
64 void* Zone::New(size_t size) { | 64 void* Zone::New(size_t size) { |
65 // Round up the requested size to fit the alignment. | 65 // Round up the requested size to fit the alignment. |
66 size = RoundUp(size, kAlignment); | 66 size = RoundUp(size, kAlignmentInBytes); |
67 | |
68 // If the allocation size is divisible by 8 then we return an 8-byte aligned | |
69 // address. | |
70 if (kPointerSize == 4 && kAlignment == 4) { | |
71 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); | |
72 } else { | |
73 DCHECK(kAlignment >= kPointerSize); | |
74 } | |
75 | 67 |
76 // Check if the requested size is available without expanding. | 68 // Check if the requested size is available without expanding. |
77 Address result = position_; | 69 Address result = position_; |
78 | 70 |
79 const size_t size_with_redzone = size + kASanRedzoneBytes; | 71 const size_t size_with_redzone = size + kASanRedzoneBytes; |
80 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); | 72 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_); |
81 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); | 73 const uintptr_t position = reinterpret_cast<uintptr_t>(position_); |
82 // position_ > limit_ can be true after the alignment correction above. | 74 // position_ > limit_ can be true after the alignment correction above. |
83 if (limit < position || size_with_redzone > limit - position) { | 75 if (limit < position || size_with_redzone > limit - position) { |
84 result = NewExpand(size_with_redzone); | 76 result = NewExpand(size_with_redzone); |
85 } else { | 77 } else { |
86 position_ += size_with_redzone; | 78 position_ += size_with_redzone; |
87 } | 79 } |
88 | 80 |
89 Address redzone_position = result + size; | 81 Address redzone_position = result + size; |
90 DCHECK(redzone_position + kASanRedzoneBytes == position_); | 82 DCHECK(redzone_position + kASanRedzoneBytes == position_); |
91 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); | 83 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); |
92 | 84 |
93 // Check that the result has the proper alignment and return it. | 85 // Check that the result has the proper alignment and return it. |
94 DCHECK(IsAddressAligned(result, kAlignment, 0)); | 86 DCHECK(IsAddressAligned(result, kAlignmentInBytes, 0)); |
95 allocation_size_ += size; | 87 allocation_size_ += size; |
96 return reinterpret_cast<void*>(result); | 88 return reinterpret_cast<void*>(result); |
97 } | 89 } |
98 | 90 |
99 void Zone::DeleteAll() { | 91 void Zone::DeleteAll() { |
100 // Traverse the chained list of segments and return them all to the allocator. | 92 // Traverse the chained list of segments and return them all to the allocator. |
101 for (Segment* current = segment_head_; current;) { | 93 for (Segment* current = segment_head_; current;) { |
102 Segment* next = current->next(); | 94 Segment* next = current->next(); |
103 size_t size = current->size(); | 95 size_t size = current->size(); |
104 | 96 |
(...skipping 20 matching lines...) Expand all Loading... |
125 result->set_zone(this); | 117 result->set_zone(this); |
126 result->set_next(segment_head_); | 118 result->set_next(segment_head_); |
127 segment_head_ = result; | 119 segment_head_ = result; |
128 } | 120 } |
129 return result; | 121 return result; |
130 } | 122 } |
131 | 123 |
132 Address Zone::NewExpand(size_t size) { | 124 Address Zone::NewExpand(size_t size) { |
133 // Make sure the requested size is already properly aligned and that | 125 // Make sure the requested size is already properly aligned and that |
134 // there isn't enough room in the Zone to satisfy the request. | 126 // there isn't enough room in the Zone to satisfy the request. |
135 DCHECK_EQ(size, RoundDown(size, kAlignment)); | 127 DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes)); |
136 DCHECK(limit_ < position_ || | 128 DCHECK(limit_ < position_ || |
137 reinterpret_cast<uintptr_t>(limit_) - | 129 reinterpret_cast<uintptr_t>(limit_) - |
138 reinterpret_cast<uintptr_t>(position_) < | 130 reinterpret_cast<uintptr_t>(position_) < |
139 size); | 131 size); |
140 | 132 |
141 // Compute the new segment size. We use a 'high water mark' | 133 // Compute the new segment size. We use a 'high water mark' |
142 // strategy, where we increase the segment size every time we expand | 134 // strategy, where we increase the segment size every time we expand |
143 // except that we employ a maximum segment size when we delete. This | 135 // except that we employ a maximum segment size when we delete. This |
144 // is to avoid excessive malloc() and free() overhead. | 136 // is to avoid excessive malloc() and free() overhead. |
145 Segment* head = segment_head_; | 137 Segment* head = segment_head_; |
146 const size_t old_size = (head == nullptr) ? 0 : head->size(); | 138 const size_t old_size = (head == nullptr) ? 0 : head->size(); |
147 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; | 139 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes; |
148 const size_t new_size_no_overhead = size + (old_size << 1); | 140 const size_t new_size_no_overhead = size + (old_size << 1); |
149 size_t new_size = kSegmentOverhead + new_size_no_overhead; | 141 size_t new_size = kSegmentOverhead + new_size_no_overhead; |
150 const size_t min_new_size = kSegmentOverhead + size; | 142 const size_t min_new_size = kSegmentOverhead + size; |
151 // Guard against integer overflow. | 143 // Guard against integer overflow. |
152 if (new_size_no_overhead < size || new_size < kSegmentOverhead) { | 144 if (new_size_no_overhead < size || new_size < kSegmentOverhead) { |
153 V8::FatalProcessOutOfMemory("Zone"); | 145 V8::FatalProcessOutOfMemory("Zone"); |
154 return nullptr; | 146 return nullptr; |
155 } | 147 } |
156 if (new_size < kMinimumSegmentSize) { | 148 if (new_size < kMinimumSegmentSize) { |
157 new_size = kMinimumSegmentSize; | 149 new_size = kMinimumSegmentSize; |
158 } else if (new_size > kMaximumSegmentSize) { | 150 } else if (new_size > kMaximumSegmentSize) { |
159 // Limit the size of new segments to avoid growing the segment size | 151 // Limit the size of new segments to avoid growing the segment size |
160 // exponentially, thus putting pressure on contiguous virtual address space. | 152 // exponentially, thus putting pressure on contiguous virtual address space. |
161 // All the while making sure to allocate a segment large enough to hold the | 153 // All the while making sure to allocate a segment large enough to hold the |
162 // requested size. | 154 // requested size. |
163 new_size = Max(min_new_size, kMaximumSegmentSize); | 155 new_size = Max(min_new_size, kMaximumSegmentSize); |
164 } | 156 } |
165 if (new_size > INT_MAX) { | 157 if (new_size > INT_MAX) { |
166 V8::FatalProcessOutOfMemory("Zone"); | 158 V8::FatalProcessOutOfMemory("Zone"); |
167 return nullptr; | 159 return nullptr; |
168 } | 160 } |
169 Segment* segment = NewSegment(new_size); | 161 Segment* segment = NewSegment(new_size); |
170 if (segment == nullptr) { | 162 if (segment == nullptr) { |
171 V8::FatalProcessOutOfMemory("Zone"); | 163 V8::FatalProcessOutOfMemory("Zone"); |
172 return nullptr; | 164 return nullptr; |
173 } | 165 } |
174 | 166 |
175 // Recompute 'top' and 'limit' based on the new segment. | 167 // Recompute 'top' and 'limit' based on the new segment. |
176 Address result = RoundUp(segment->start(), kAlignment); | 168 Address result = RoundUp(segment->start(), kAlignmentInBytes); |
177 position_ = result + size; | 169 position_ = result + size; |
178 // Check for address overflow. | 170 // Check for address overflow. |
179 // (Should not happen since the segment is guaranteed to accomodate | 171 // (Should not happen since the segment is guaranteed to accomodate |
180 // size bytes + header and alignment padding) | 172 // size bytes + header and alignment padding) |
181 DCHECK(reinterpret_cast<uintptr_t>(position_) >= | 173 DCHECK(reinterpret_cast<uintptr_t>(position_) >= |
182 reinterpret_cast<uintptr_t>(result)); | 174 reinterpret_cast<uintptr_t>(result)); |
183 limit_ = segment->end(); | 175 limit_ = segment->end(); |
184 DCHECK(position_ <= limit_); | 176 DCHECK(position_ <= limit_); |
185 return result; | 177 return result; |
186 } | 178 } |
187 | 179 |
188 } // namespace internal | 180 } // namespace internal |
189 } // namespace v8 | 181 } // namespace v8 |
OLD | NEW |