Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/zone.h" | 5 #include "src/zone.h" |
| 6 | 6 |
| 7 #include <cstring> | 7 #include <cstring> |
| 8 | 8 |
| 9 #include "src/v8.h" | 9 #include "src/v8.h" |
| 10 | 10 |
| 11 #ifdef V8_USE_ADDRESS_SANITIZER | 11 #ifdef V8_USE_ADDRESS_SANITIZER |
| 12 #include <sanitizer/asan_interface.h> | 12 #include <sanitizer/asan_interface.h> |
| 13 #endif // V8_USE_ADDRESS_SANITIZER | 13 #endif // V8_USE_ADDRESS_SANITIZER |
| 14 | 14 |
| 15 namespace v8 { | 15 namespace v8 { |
| 16 namespace internal { | 16 namespace internal { |
| 17 | 17 |
| 18 namespace { | 18 namespace { |
| 19 | 19 |
| 20 #if V8_USE_ADDRESS_SANITIZER | 20 #if V8_USE_ADDRESS_SANITIZER |
| 21 | 21 |
| 22 const int kASanRedzoneBytes = 24; // Must be a multiple of 8. | 22 const size_t kASanRedzoneBytes = 24; // Must be a multiple of 8. |
| 23 | 23 |
| 24 #else | 24 #else |
| 25 | 25 |
| 26 #define ASAN_POISON_MEMORY_REGION(start, size) \ | 26 #define ASAN_POISON_MEMORY_REGION(start, size) \ |
| 27 do { \ | 27 do { \ |
| 28 USE(start); \ | 28 USE(start); \ |
| 29 USE(size); \ | 29 USE(size); \ |
| 30 } while (false) | 30 } while (false) |
| 31 | 31 |
| 32 #define ASAN_UNPOISON_MEMORY_REGION(start, size) \ | 32 #define ASAN_UNPOISON_MEMORY_REGION(start, size) \ |
| 33 do { \ | 33 do { \ |
| 34 USE(start); \ | 34 USE(start); \ |
| 35 USE(size); \ | 35 USE(size); \ |
| 36 } while (false) | 36 } while (false) |
| 37 | 37 |
| 38 const int kASanRedzoneBytes = 0; | 38 const size_t kASanRedzoneBytes = 0; |
| 39 | 39 |
| 40 #endif // V8_USE_ADDRESS_SANITIZER | 40 #endif // V8_USE_ADDRESS_SANITIZER |
| 41 | 41 |
| 42 } // namespace | 42 } // namespace |
| 43 | 43 |
| 44 | 44 |
| 45 // Segments represent chunks of memory: They have starting address | 45 // Segments represent chunks of memory: They have starting address |
| 46 // (encoded in the this pointer) and a size in bytes. Segments are | 46 // (encoded in the this pointer) and a size in bytes. Segments are |
| 47 // chained together forming a LIFO structure with the newest segment | 47 // chained together forming a LIFO structure with the newest segment |
| 48 // available as segment_head_. Segments are allocated using malloc() | 48 // available as segment_head_. Segments are allocated using malloc() |
| 49 // and de-allocated using free(). | 49 // and de-allocated using free(). |
| 50 | 50 |
| 51 class Segment { | 51 class Segment { |
| 52 public: | 52 public: |
| 53 void Initialize(Segment* next, int size) { | 53 void Initialize(Segment* next, size_t size) { |
| 54 next_ = next; | 54 next_ = next; |
| 55 size_ = size; | 55 size_ = size; |
| 56 } | 56 } |
| 57 | 57 |
| 58 Segment* next() const { return next_; } | 58 Segment* next() const { return next_; } |
| 59 void clear_next() { next_ = nullptr; } | 59 void clear_next() { next_ = nullptr; } |
| 60 | 60 |
| 61 int size() const { return size_; } | 61 size_t size() const { return size_; } |
| 62 int capacity() const { return size_ - sizeof(Segment); } | 62 size_t capacity() const { return size_ - sizeof(Segment); } |
| 63 | 63 |
| 64 Address start() const { return address(sizeof(Segment)); } | 64 Address start() const { return address(sizeof(Segment)); } |
| 65 Address end() const { return address(size_); } | 65 Address end() const { return address(size_); } |
| 66 | 66 |
| 67 private: | 67 private: |
| 68 // Computes the address of the nth byte in this segment. | 68 // Computes the address of the nth byte in this segment. |
| 69 Address address(int n) const { | 69 Address address(size_t n) const { return Address(this) + n; } |
| 70 return Address(this) + n; | |
| 71 } | |
| 72 | 70 |
| 73 Segment* next_; | 71 Segment* next_; |
| 74 int size_; | 72 size_t size_; |
| 75 }; | 73 }; |
| 76 | 74 |
| 77 | 75 |
| 78 Zone::Zone() | 76 Zone::Zone() |
| 79 : allocation_size_(0), | 77 : allocation_size_(0), |
| 80 segment_bytes_allocated_(0), | 78 segment_bytes_allocated_(0), |
| 81 position_(0), | 79 position_(0), |
| 82 limit_(0), | 80 limit_(0), |
| 83 segment_head_(nullptr) {} | 81 segment_head_(nullptr) {} |
| 84 | 82 |
| 85 | 83 |
| 86 Zone::~Zone() { | 84 Zone::~Zone() { |
| 87 DeleteAll(); | 85 DeleteAll(); |
| 88 DeleteKeptSegment(); | 86 DeleteKeptSegment(); |
| 89 | 87 |
| 90 DCHECK(segment_bytes_allocated_ == 0); | 88 DCHECK(segment_bytes_allocated_ == 0); |
| 91 } | 89 } |
| 92 | 90 |
| 93 | 91 |
| 94 void* Zone::New(int size) { | 92 void* Zone::New(size_t size) { |
| 95 // Round up the requested size to fit the alignment. | 93 // Round up the requested size to fit the alignment. |
| 96 size = RoundUp(size, kAlignment); | 94 size = RoundUp(size, kAlignment); |
| 97 | 95 |
| 98 // If the allocation size is divisible by 8 then we return an 8-byte aligned | 96 // If the allocation size is divisible by 8 then we return an 8-byte aligned |
| 99 // address. | 97 // address. |
| 100 if (kPointerSize == 4 && kAlignment == 4) { | 98 if (kPointerSize == 4 && kAlignment == 4) { |
| 101 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); | 99 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); |
| 102 } else { | 100 } else { |
| 103 DCHECK(kAlignment >= kPointerSize); | 101 DCHECK(kAlignment >= kPointerSize); |
| 104 } | 102 } |
| 105 | 103 |
| 106 // Check if the requested size is available without expanding. | 104 // Check if the requested size is available without expanding. |
| 107 Address result = position_; | 105 Address result = position_; |
| 108 | 106 |
| 109 const int size_with_redzone = size + kASanRedzoneBytes; | 107 const size_t size_with_redzone = size + kASanRedzoneBytes; |
| 110 if (size_with_redzone > limit_ - position_) { | 108 if (limit_ < position_ + size_with_redzone) { |
|
Jakob Kummerow
2016/04/28 12:16:14
This is not just a cosmetic change! The addition c
| |
| 111 result = NewExpand(size_with_redzone); | 109 result = NewExpand(size_with_redzone); |
| 112 } else { | 110 } else { |
| 113 position_ += size_with_redzone; | 111 position_ += size_with_redzone; |
| 114 } | 112 } |
| 115 | 113 |
| 116 Address redzone_position = result + size; | 114 Address redzone_position = result + size; |
| 117 DCHECK(redzone_position + kASanRedzoneBytes == position_); | 115 DCHECK(redzone_position + kASanRedzoneBytes == position_); |
| 118 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); | 116 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); |
| 119 | 117 |
| 120 // Check that the result has the proper alignment and return it. | 118 // Check that the result has the proper alignment and return it. |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 134 Segment* keep = nullptr; | 132 Segment* keep = nullptr; |
| 135 // Traverse the chained list of segments, zapping (in debug mode) | 133 // Traverse the chained list of segments, zapping (in debug mode) |
| 136 // and freeing every segment except the one we wish to keep. | 134 // and freeing every segment except the one we wish to keep. |
| 137 for (Segment* current = segment_head_; current;) { | 135 for (Segment* current = segment_head_; current;) { |
| 138 Segment* next = current->next(); | 136 Segment* next = current->next(); |
| 139 if (!keep && current->size() <= kMaximumKeptSegmentSize) { | 137 if (!keep && current->size() <= kMaximumKeptSegmentSize) { |
| 140 // Unlink the segment we wish to keep from the list. | 138 // Unlink the segment we wish to keep from the list. |
| 141 keep = current; | 139 keep = current; |
| 142 keep->clear_next(); | 140 keep->clear_next(); |
| 143 } else { | 141 } else { |
| 144 int size = current->size(); | 142 size_t size = current->size(); |
| 145 #ifdef DEBUG | 143 #ifdef DEBUG |
| 146 // Un-poison first so the zapping doesn't trigger ASan complaints. | 144 // Un-poison first so the zapping doesn't trigger ASan complaints. |
| 147 ASAN_UNPOISON_MEMORY_REGION(current, size); | 145 ASAN_UNPOISON_MEMORY_REGION(current, size); |
| 148 // Zap the entire current segment (including the header). | 146 // Zap the entire current segment (including the header). |
| 149 memset(current, kZapDeadByte, size); | 147 memset(current, kZapDeadByte, size); |
| 150 #endif | 148 #endif |
| 151 DeleteSegment(current, size); | 149 DeleteSegment(current, size); |
| 152 } | 150 } |
| 153 current = next; | 151 current = next; |
| 154 } | 152 } |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 178 | 176 |
| 179 | 177 |
| 180 void Zone::DeleteKeptSegment() { | 178 void Zone::DeleteKeptSegment() { |
| 181 #ifdef DEBUG | 179 #ifdef DEBUG |
| 182 // Constant byte value used for zapping dead memory in debug mode. | 180 // Constant byte value used for zapping dead memory in debug mode. |
| 183 static const unsigned char kZapDeadByte = 0xcd; | 181 static const unsigned char kZapDeadByte = 0xcd; |
| 184 #endif | 182 #endif |
| 185 | 183 |
| 186 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); | 184 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); |
| 187 if (segment_head_ != nullptr) { | 185 if (segment_head_ != nullptr) { |
| 188 int size = segment_head_->size(); | 186 size_t size = segment_head_->size(); |
| 189 #ifdef DEBUG | 187 #ifdef DEBUG |
| 190 // Un-poison first so the zapping doesn't trigger ASan complaints. | 188 // Un-poison first so the zapping doesn't trigger ASan complaints. |
| 191 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); | 189 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); |
| 192 // Zap the entire kept segment (including the header). | 190 // Zap the entire kept segment (including the header). |
| 193 memset(segment_head_, kZapDeadByte, size); | 191 memset(segment_head_, kZapDeadByte, size); |
| 194 #endif | 192 #endif |
| 195 DeleteSegment(segment_head_, size); | 193 DeleteSegment(segment_head_, size); |
| 196 segment_head_ = nullptr; | 194 segment_head_ = nullptr; |
| 197 } | 195 } |
| 198 | 196 |
| 199 DCHECK(segment_bytes_allocated_ == 0); | 197 DCHECK(segment_bytes_allocated_ == 0); |
| 200 } | 198 } |
| 201 | 199 |
| 202 | 200 |
| 203 // Creates a new segment, sets it size, and pushes it to the front | 201 // Creates a new segment, sets it size, and pushes it to the front |
| 204 // of the segment chain. Returns the new segment. | 202 // of the segment chain. Returns the new segment. |
| 205 Segment* Zone::NewSegment(int size) { | 203 Segment* Zone::NewSegment(size_t size) { |
| 206 Segment* result = reinterpret_cast<Segment*>(Malloced::New(size)); | 204 Segment* result = reinterpret_cast<Segment*>(Malloced::New(size)); |
| 207 segment_bytes_allocated_ += size; | 205 segment_bytes_allocated_ += size; |
| 208 if (result != nullptr) { | 206 if (result != nullptr) { |
| 209 result->Initialize(segment_head_, size); | 207 result->Initialize(segment_head_, size); |
| 210 segment_head_ = result; | 208 segment_head_ = result; |
| 211 } | 209 } |
| 212 return result; | 210 return result; |
| 213 } | 211 } |
| 214 | 212 |
| 215 | 213 |
| 216 // Deletes the given segment. Does not touch the segment chain. | 214 // Deletes the given segment. Does not touch the segment chain. |
| 217 void Zone::DeleteSegment(Segment* segment, int size) { | 215 void Zone::DeleteSegment(Segment* segment, size_t size) { |
| 218 segment_bytes_allocated_ -= size; | 216 segment_bytes_allocated_ -= size; |
| 219 Malloced::Delete(segment); | 217 Malloced::Delete(segment); |
| 220 } | 218 } |
| 221 | 219 |
| 222 | 220 |
| 223 Address Zone::NewExpand(int size) { | 221 Address Zone::NewExpand(size_t size) { |
| 224 // Make sure the requested size is already properly aligned and that | 222 // Make sure the requested size is already properly aligned and that |
| 225 // there isn't enough room in the Zone to satisfy the request. | 223 // there isn't enough room in the Zone to satisfy the request. |
| 226 DCHECK(size == RoundDown(size, kAlignment)); | 224 DCHECK_EQ(size, RoundDown(size, kAlignment)); |
| 227 DCHECK(size > limit_ - position_); | 225 DCHECK_LT(limit_, position_ + size); |
| 228 | 226 |
| 229 // Compute the new segment size. We use a 'high water mark' | 227 // Compute the new segment size. We use a 'high water mark' |
| 230 // strategy, where we increase the segment size every time we expand | 228 // strategy, where we increase the segment size every time we expand |
| 231 // except that we employ a maximum segment size when we delete. This | 229 // except that we employ a maximum segment size when we delete. This |
| 232 // is to avoid excessive malloc() and free() overhead. | 230 // is to avoid excessive malloc() and free() overhead. |
| 233 Segment* head = segment_head_; | 231 Segment* head = segment_head_; |
| 234 const size_t old_size = (head == nullptr) ? 0 : head->size(); | 232 const size_t old_size = (head == nullptr) ? 0 : head->size(); |
| 235 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; | 233 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; |
| 236 const size_t new_size_no_overhead = size + (old_size << 1); | 234 const size_t new_size_no_overhead = size + (old_size << 1); |
| 237 size_t new_size = kSegmentOverhead + new_size_no_overhead; | 235 size_t new_size = kSegmentOverhead + new_size_no_overhead; |
| 238 const size_t min_new_size = kSegmentOverhead + static_cast<size_t>(size); | 236 const size_t min_new_size = kSegmentOverhead + size; |
| 239 // Guard against integer overflow. | 237 // Guard against integer overflow. |
| 240 if (new_size_no_overhead < static_cast<size_t>(size) || | 238 if (new_size_no_overhead < size || new_size < kSegmentOverhead) { |
| 241 new_size < static_cast<size_t>(kSegmentOverhead)) { | |
| 242 V8::FatalProcessOutOfMemory("Zone"); | 239 V8::FatalProcessOutOfMemory("Zone"); |
| 243 return nullptr; | 240 return nullptr; |
| 244 } | 241 } |
| 245 if (new_size < static_cast<size_t>(kMinimumSegmentSize)) { | 242 if (new_size < kMinimumSegmentSize) { |
| 246 new_size = kMinimumSegmentSize; | 243 new_size = kMinimumSegmentSize; |
| 247 } else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) { | 244 } else if (new_size > kMaximumSegmentSize) { |
| 248 // Limit the size of new segments to avoid growing the segment size | 245 // Limit the size of new segments to avoid growing the segment size |
| 249 // exponentially, thus putting pressure on contiguous virtual address space. | 246 // exponentially, thus putting pressure on contiguous virtual address space. |
| 250 // All the while making sure to allocate a segment large enough to hold the | 247 // All the while making sure to allocate a segment large enough to hold the |
| 251 // requested size. | 248 // requested size. |
| 252 new_size = Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize)); | 249 new_size = Max(min_new_size, kMaximumSegmentSize); |
| 253 } | 250 } |
| 254 if (new_size > INT_MAX) { | 251 if (new_size > INT_MAX) { |
| 255 V8::FatalProcessOutOfMemory("Zone"); | 252 V8::FatalProcessOutOfMemory("Zone"); |
| 256 return nullptr; | 253 return nullptr; |
| 257 } | 254 } |
| 258 Segment* segment = NewSegment(static_cast<int>(new_size)); | 255 Segment* segment = NewSegment(new_size); |
| 259 if (segment == nullptr) { | 256 if (segment == nullptr) { |
| 260 V8::FatalProcessOutOfMemory("Zone"); | 257 V8::FatalProcessOutOfMemory("Zone"); |
| 261 return nullptr; | 258 return nullptr; |
| 262 } | 259 } |
| 263 | 260 |
| 264 // Recompute 'top' and 'limit' based on the new segment. | 261 // Recompute 'top' and 'limit' based on the new segment. |
| 265 Address result = RoundUp(segment->start(), kAlignment); | 262 Address result = RoundUp(segment->start(), kAlignment); |
| 266 position_ = result + size; | 263 position_ = result + size; |
| 267 // Check for address overflow. | 264 // Check for address overflow. |
| 268 // (Should not happen since the segment is guaranteed to accomodate | 265 // (Should not happen since the segment is guaranteed to accomodate |
| 269 // size bytes + header and alignment padding) | 266 // size bytes + header and alignment padding) |
| 270 DCHECK_GE(reinterpret_cast<uintptr_t>(position_), | 267 DCHECK_GE(reinterpret_cast<uintptr_t>(position_), |
| 271 reinterpret_cast<uintptr_t>(result)); | 268 reinterpret_cast<uintptr_t>(result)); |
| 272 limit_ = segment->end(); | 269 limit_ = segment->end(); |
| 273 DCHECK(position_ <= limit_); | 270 DCHECK(position_ <= limit_); |
| 274 return result; | 271 return result; |
| 275 } | 272 } |
| 276 | 273 |
| 277 } // namespace internal | 274 } // namespace internal |
| 278 } // namespace v8 | 275 } // namespace v8 |
| OLD | NEW |