| OLD | NEW | 
|    1 // Copyright 2012 the V8 project authors. All rights reserved. |    1 // Copyright 2012 the V8 project authors. All rights reserved. | 
|    2 // Use of this source code is governed by a BSD-style license that can be |    2 // Use of this source code is governed by a BSD-style license that can be | 
|    3 // found in the LICENSE file. |    3 // found in the LICENSE file. | 
|    4  |    4  | 
|    5 #include <string.h> |    5 #include "src/zone.h" | 
|    6  |    6  | 
|    7 #include "src/v8.h" |    7 #include <cstring> | 
|    8 #include "src/zone-inl.h" |    8  | 
 |    9 #ifdef V8_USE_ADDRESS_SANITIZER | 
 |   10 #include <sanitizer/asan_interface.h> | 
 |   11 #endif  // V8_USE_ADDRESS_SANITIZER | 
|    9  |   12  | 
|   10 namespace v8 { |   13 namespace v8 { | 
|   11 namespace internal { |   14 namespace internal { | 
|   12  |   15  | 
 |   16 namespace { | 
 |   17  | 
 |   18 #if V8_USE_ADDRESS_SANITIZER | 
 |   19  | 
 |   20 const int kASanRedzoneBytes = 24;  // Must be a multiple of 8. | 
 |   21  | 
 |   22 #else | 
 |   23  | 
 |   24 #define ASAN_POISON_MEMORY_REGION(start, size) \ | 
 |   25   do {                                         \ | 
 |   26     USE(start);                                \ | 
 |   27     USE(size);                                 \ | 
 |   28   } while (false) | 
 |   29  | 
 |   30 #define ASAN_UNPOISON_MEMORY_REGION(start, size) \ | 
 |   31   do {                                           \ | 
 |   32     USE(start);                                  \ | 
 |   33     USE(size);                                   \ | 
 |   34   } while (false) | 
 |   35  | 
 |   36 const int kASanRedzoneBytes = 0; | 
 |   37  | 
 |   38 #endif  // V8_USE_ADDRESS_SANITIZER | 
 |   39  | 
 |   40 }  // namespace | 
 |   41  | 
|   13  |   42  | 
|   14 // Segments represent chunks of memory: They have starting address |   43 // Segments represent chunks of memory: They have starting address | 
|   15 // (encoded in the this pointer) and a size in bytes. Segments are |   44 // (encoded in the this pointer) and a size in bytes. Segments are | 
|   16 // chained together forming a LIFO structure with the newest segment |   45 // chained together forming a LIFO structure with the newest segment | 
|   17 // available as segment_head_. Segments are allocated using malloc() |   46 // available as segment_head_. Segments are allocated using malloc() | 
|   18 // and de-allocated using free(). |   47 // and de-allocated using free(). | 
|   19  |   48  | 
|   20 class Segment { |   49 class Segment { | 
|   21  public: |   50  public: | 
|   22   void Initialize(Segment* next, int size) { |   51   void Initialize(Segment* next, int size) { | 
|   23     next_ = next; |   52     next_ = next; | 
|   24     size_ = size; |   53     size_ = size; | 
|   25   } |   54   } | 
|   26  |   55  | 
|   27   Segment* next() const { return next_; } |   56   Segment* next() const { return next_; } | 
|   28   void clear_next() { next_ = NULL; } |   57   void clear_next() { next_ = nullptr; } | 
|   29  |   58  | 
|   30   int size() const { return size_; } |   59   int size() const { return size_; } | 
|   31   int capacity() const { return size_ - sizeof(Segment); } |   60   int capacity() const { return size_ - sizeof(Segment); } | 
|   32  |   61  | 
|   33   Address start() const { return address(sizeof(Segment)); } |   62   Address start() const { return address(sizeof(Segment)); } | 
|   34   Address end() const { return address(size_); } |   63   Address end() const { return address(size_); } | 
|   35  |   64  | 
|   36  private: |   65  private: | 
|   37   // Computes the address of the nth byte in this segment. |   66   // Computes the address of the nth byte in this segment. | 
|   38   Address address(int n) const { |   67   Address address(int n) const { | 
|   39     return Address(this) + n; |   68     return Address(this) + n; | 
|   40   } |   69   } | 
|   41  |   70  | 
|   42   Segment* next_; |   71   Segment* next_; | 
|   43   int size_; |   72   int size_; | 
|   44 }; |   73 }; | 
|   45  |   74  | 
|   46  |   75  | 
|   47 Zone::Zone() |   76 Zone::Zone() | 
|   48     : allocation_size_(0), |   77     : allocation_size_(0), | 
|   49       segment_bytes_allocated_(0), |   78       segment_bytes_allocated_(0), | 
|   50       position_(0), |   79       position_(0), | 
|   51       limit_(0), |   80       limit_(0), | 
|   52       segment_head_(NULL) {} |   81       segment_head_(nullptr) {} | 
|   53  |   82  | 
|   54  |   83  | 
|   55 Zone::~Zone() { |   84 Zone::~Zone() { | 
|   56   DeleteAll(); |   85   DeleteAll(); | 
|   57   DeleteKeptSegment(); |   86   DeleteKeptSegment(); | 
|   58  |   87  | 
|   59   DCHECK(segment_bytes_allocated_ == 0); |   88   DCHECK(segment_bytes_allocated_ == 0); | 
|   60 } |   89 } | 
|   61  |   90  | 
|   62  |   91  | 
|   63 void* Zone::New(int size) { |   92 void* Zone::New(int size) { | 
|   64   // Round up the requested size to fit the alignment. |   93   // Round up the requested size to fit the alignment. | 
|   65   size = RoundUp(size, kAlignment); |   94   size = RoundUp(size, kAlignment); | 
|   66  |   95  | 
|   67   // If the allocation size is divisible by 8 then we return an 8-byte aligned |   96   // If the allocation size is divisible by 8 then we return an 8-byte aligned | 
|   68   // address. |   97   // address. | 
|   69   if (kPointerSize == 4 && kAlignment == 4) { |   98   if (kPointerSize == 4 && kAlignment == 4) { | 
|   70     position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); |   99     position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4); | 
|   71   } else { |  100   } else { | 
|   72     DCHECK(kAlignment >= kPointerSize); |  101     DCHECK(kAlignment >= kPointerSize); | 
|   73   } |  102   } | 
|   74  |  103  | 
|   75   // Check if the requested size is available without expanding. |  104   // Check if the requested size is available without expanding. | 
|   76   Address result = position_; |  105   Address result = position_; | 
|   77  |  106  | 
|   78   int size_with_redzone = |  107   const int size_with_redzone = size + kASanRedzoneBytes; | 
|   79 #ifdef V8_USE_ADDRESS_SANITIZER |  | 
|   80       size + kASanRedzoneBytes; |  | 
|   81 #else |  | 
|   82       size; |  | 
|   83 #endif |  | 
|   84  |  | 
|   85   if (size_with_redzone > limit_ - position_) { |  108   if (size_with_redzone > limit_ - position_) { | 
|   86      result = NewExpand(size_with_redzone); |  109     result = NewExpand(size_with_redzone); | 
|   87   } else { |  110   } else { | 
|   88      position_ += size_with_redzone; |  111     position_ += size_with_redzone; | 
|   89   } |  112   } | 
|   90  |  113  | 
|   91 #ifdef V8_USE_ADDRESS_SANITIZER |  | 
|   92   Address redzone_position = result + size; |  114   Address redzone_position = result + size; | 
|   93   DCHECK(redzone_position + kASanRedzoneBytes == position_); |  115   DCHECK(redzone_position + kASanRedzoneBytes == position_); | 
|   94   ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); |  116   ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); | 
|   95 #endif |  | 
|   96  |  117  | 
|   97   // Check that the result has the proper alignment and return it. |  118   // Check that the result has the proper alignment and return it. | 
|   98   DCHECK(IsAddressAligned(result, kAlignment, 0)); |  119   DCHECK(IsAddressAligned(result, kAlignment, 0)); | 
|   99   allocation_size_ += size; |  120   allocation_size_ += size; | 
|  100   return reinterpret_cast<void*>(result); |  121   return reinterpret_cast<void*>(result); | 
|  101 } |  122 } | 
|  102  |  123  | 
|  103  |  124  | 
|  104 void Zone::DeleteAll() { |  125 void Zone::DeleteAll() { | 
|  105 #ifdef DEBUG |  126 #ifdef DEBUG | 
|  106   // Constant byte value used for zapping dead memory in debug mode. |  127   // Constant byte value used for zapping dead memory in debug mode. | 
|  107   static const unsigned char kZapDeadByte = 0xcd; |  128   static const unsigned char kZapDeadByte = 0xcd; | 
|  108 #endif |  129 #endif | 
|  109  |  130  | 
|  110   // Find a segment with a suitable size to keep around. |  131   // Find a segment with a suitable size to keep around. | 
|  111   Segment* keep = NULL; |  132   Segment* keep = nullptr; | 
|  112   // Traverse the chained list of segments, zapping (in debug mode) |  133   // Traverse the chained list of segments, zapping (in debug mode) | 
|  113   // and freeing every segment except the one we wish to keep. |  134   // and freeing every segment except the one we wish to keep. | 
|  114   for (Segment* current = segment_head_; current != NULL; ) { |  135   for (Segment* current = segment_head_; current;) { | 
|  115     Segment* next = current->next(); |  136     Segment* next = current->next(); | 
|  116     if (keep == NULL && current->size() <= kMaximumKeptSegmentSize) { |  137     if (!keep && current->size() <= kMaximumKeptSegmentSize) { | 
|  117       // Unlink the segment we wish to keep from the list. |  138       // Unlink the segment we wish to keep from the list. | 
|  118       keep = current; |  139       keep = current; | 
|  119       keep->clear_next(); |  140       keep->clear_next(); | 
|  120     } else { |  141     } else { | 
|  121       int size = current->size(); |  142       int size = current->size(); | 
|  122 #ifdef DEBUG |  143 #ifdef DEBUG | 
|  123       // Un-poison first so the zapping doesn't trigger ASan complaints. |  144       // Un-poison first so the zapping doesn't trigger ASan complaints. | 
|  124       ASAN_UNPOISON_MEMORY_REGION(current, size); |  145       ASAN_UNPOISON_MEMORY_REGION(current, size); | 
|  125       // Zap the entire current segment (including the header). |  146       // Zap the entire current segment (including the header). | 
|  126       memset(current, kZapDeadByte, size); |  147       memset(current, kZapDeadByte, size); | 
|  127 #endif |  148 #endif | 
|  128       DeleteSegment(current, size); |  149       DeleteSegment(current, size); | 
|  129     } |  150     } | 
|  130     current = next; |  151     current = next; | 
|  131   } |  152   } | 
|  132  |  153  | 
|  133   // If we have found a segment we want to keep, we must recompute the |  154   // If we have found a segment we want to keep, we must recompute the | 
|  134   // variables 'position' and 'limit' to prepare for future allocate |  155   // variables 'position' and 'limit' to prepare for future allocate | 
|  135   // attempts. Otherwise, we must clear the position and limit to |  156   // attempts. Otherwise, we must clear the position and limit to | 
|  136   // force a new segment to be allocated on demand. |  157   // force a new segment to be allocated on demand. | 
|  137   if (keep != NULL) { |  158   if (keep) { | 
|  138     Address start = keep->start(); |  159     Address start = keep->start(); | 
|  139     position_ = RoundUp(start, kAlignment); |  160     position_ = RoundUp(start, kAlignment); | 
|  140     limit_ = keep->end(); |  161     limit_ = keep->end(); | 
|  141     // Un-poison so we can re-use the segment later. |  162     // Un-poison so we can re-use the segment later. | 
|  142     ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); |  163     ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity()); | 
|  143 #ifdef DEBUG |  164 #ifdef DEBUG | 
|  144     // Zap the contents of the kept segment (but not the header). |  165     // Zap the contents of the kept segment (but not the header). | 
|  145     memset(start, kZapDeadByte, keep->capacity()); |  166     memset(start, kZapDeadByte, keep->capacity()); | 
|  146 #endif |  167 #endif | 
|  147   } else { |  168   } else { | 
|  148     position_ = limit_ = 0; |  169     position_ = limit_ = 0; | 
|  149   } |  170   } | 
|  150  |  171  | 
|  151   allocation_size_ = 0; |  172   allocation_size_ = 0; | 
|  152   // Update the head segment to be the kept segment (if any). |  173   // Update the head segment to be the kept segment (if any). | 
|  153   segment_head_ = keep; |  174   segment_head_ = keep; | 
|  154 } |  175 } | 
|  155  |  176  | 
|  156  |  177  | 
|  157 void Zone::DeleteKeptSegment() { |  178 void Zone::DeleteKeptSegment() { | 
|  158 #ifdef DEBUG |  179 #ifdef DEBUG | 
|  159   // Constant byte value used for zapping dead memory in debug mode. |  180   // Constant byte value used for zapping dead memory in debug mode. | 
|  160   static const unsigned char kZapDeadByte = 0xcd; |  181   static const unsigned char kZapDeadByte = 0xcd; | 
|  161 #endif |  182 #endif | 
|  162  |  183  | 
|  163   DCHECK(segment_head_ == NULL || segment_head_->next() == NULL); |  184   DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr); | 
|  164   if (segment_head_ != NULL) { |  185   if (segment_head_ != nullptr) { | 
|  165     int size = segment_head_->size(); |  186     int size = segment_head_->size(); | 
|  166 #ifdef DEBUG |  187 #ifdef DEBUG | 
|  167     // Un-poison first so the zapping doesn't trigger ASan complaints. |  188     // Un-poison first so the zapping doesn't trigger ASan complaints. | 
|  168     ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); |  189     ASAN_UNPOISON_MEMORY_REGION(segment_head_, size); | 
|  169     // Zap the entire kept segment (including the header). |  190     // Zap the entire kept segment (including the header). | 
|  170     memset(segment_head_, kZapDeadByte, size); |  191     memset(segment_head_, kZapDeadByte, size); | 
|  171 #endif |  192 #endif | 
|  172     DeleteSegment(segment_head_, size); |  193     DeleteSegment(segment_head_, size); | 
|  173     segment_head_ = NULL; |  194     segment_head_ = nullptr; | 
|  174   } |  195   } | 
|  175  |  196  | 
|  176   DCHECK(segment_bytes_allocated_ == 0); |  197   DCHECK(segment_bytes_allocated_ == 0); | 
|  177 } |  198 } | 
|  178  |  199  | 
|  179  |  200  | 
|  180 // Creates a new segment, sets it size, and pushes it to the front |  201 // Creates a new segment, sets it size, and pushes it to the front | 
|  181 // of the segment chain. Returns the new segment. |  202 // of the segment chain. Returns the new segment. | 
|  182 Segment* Zone::NewSegment(int size) { |  203 Segment* Zone::NewSegment(int size) { | 
|  183   Segment* result = reinterpret_cast<Segment*>(Malloced::New(size)); |  204   Segment* result = reinterpret_cast<Segment*>(Malloced::New(size)); | 
|  184   adjust_segment_bytes_allocated(size); |  205   segment_bytes_allocated_ += size; | 
|  185   if (result != NULL) { |  206   if (result != nullptr) { | 
|  186     result->Initialize(segment_head_, size); |  207     result->Initialize(segment_head_, size); | 
|  187     segment_head_ = result; |  208     segment_head_ = result; | 
|  188   } |  209   } | 
|  189   return result; |  210   return result; | 
|  190 } |  211 } | 
|  191  |  212  | 
|  192  |  213  | 
|  193 // Deletes the given segment. Does not touch the segment chain. |  214 // Deletes the given segment. Does not touch the segment chain. | 
|  194 void Zone::DeleteSegment(Segment* segment, int size) { |  215 void Zone::DeleteSegment(Segment* segment, int size) { | 
|  195   adjust_segment_bytes_allocated(-size); |  216   segment_bytes_allocated_ -= size; | 
|  196   Malloced::Delete(segment); |  217   Malloced::Delete(segment); | 
|  197 } |  218 } | 
|  198  |  219  | 
|  199  |  220  | 
|  200 Address Zone::NewExpand(int size) { |  221 Address Zone::NewExpand(int size) { | 
|  201   // Make sure the requested size is already properly aligned and that |  222   // Make sure the requested size is already properly aligned and that | 
|  202   // there isn't enough room in the Zone to satisfy the request. |  223   // there isn't enough room in the Zone to satisfy the request. | 
|  203   DCHECK(size == RoundDown(size, kAlignment)); |  224   DCHECK(size == RoundDown(size, kAlignment)); | 
|  204   DCHECK(size > limit_ - position_); |  225   DCHECK(size > limit_ - position_); | 
|  205  |  226  | 
|  206   // Compute the new segment size. We use a 'high water mark' |  227   // Compute the new segment size. We use a 'high water mark' | 
|  207   // strategy, where we increase the segment size every time we expand |  228   // strategy, where we increase the segment size every time we expand | 
|  208   // except that we employ a maximum segment size when we delete. This |  229   // except that we employ a maximum segment size when we delete. This | 
|  209   // is to avoid excessive malloc() and free() overhead. |  230   // is to avoid excessive malloc() and free() overhead. | 
|  210   Segment* head = segment_head_; |  231   Segment* head = segment_head_; | 
|  211   const size_t old_size = (head == NULL) ? 0 : head->size(); |  232   const size_t old_size = (head == nullptr) ? 0 : head->size(); | 
|  212   static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; |  233   static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; | 
|  213   const size_t new_size_no_overhead = size + (old_size << 1); |  234   const size_t new_size_no_overhead = size + (old_size << 1); | 
|  214   size_t new_size = kSegmentOverhead + new_size_no_overhead; |  235   size_t new_size = kSegmentOverhead + new_size_no_overhead; | 
|  215   const size_t min_new_size = kSegmentOverhead + static_cast<size_t>(size); |  236   const size_t min_new_size = kSegmentOverhead + static_cast<size_t>(size); | 
|  216   // Guard against integer overflow. |  237   // Guard against integer overflow. | 
|  217   if (new_size_no_overhead < static_cast<size_t>(size) || |  238   if (new_size_no_overhead < static_cast<size_t>(size) || | 
|  218       new_size < static_cast<size_t>(kSegmentOverhead)) { |  239       new_size < static_cast<size_t>(kSegmentOverhead)) { | 
|  219     V8::FatalProcessOutOfMemory("Zone"); |  240     FatalProcessOutOfMemory("Zone"); | 
|  220     return NULL; |  241     return nullptr; | 
|  221   } |  242   } | 
|  222   if (new_size < static_cast<size_t>(kMinimumSegmentSize)) { |  243   if (new_size < static_cast<size_t>(kMinimumSegmentSize)) { | 
|  223     new_size = kMinimumSegmentSize; |  244     new_size = kMinimumSegmentSize; | 
|  224   } else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) { |  245   } else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) { | 
|  225     // Limit the size of new segments to avoid growing the segment size |  246     // Limit the size of new segments to avoid growing the segment size | 
|  226     // exponentially, thus putting pressure on contiguous virtual address space. |  247     // exponentially, thus putting pressure on contiguous virtual address space. | 
|  227     // All the while making sure to allocate a segment large enough to hold the |  248     // All the while making sure to allocate a segment large enough to hold the | 
|  228     // requested size. |  249     // requested size. | 
|  229     new_size = Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize)); |  250     new_size = Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize)); | 
|  230   } |  251   } | 
|  231   if (new_size > INT_MAX) { |  252   if (new_size > INT_MAX) { | 
|  232     V8::FatalProcessOutOfMemory("Zone"); |  253     FatalProcessOutOfMemory("Zone"); | 
|  233     return NULL; |  254     return nullptr; | 
|  234   } |  255   } | 
|  235   Segment* segment = NewSegment(static_cast<int>(new_size)); |  256   Segment* segment = NewSegment(static_cast<int>(new_size)); | 
|  236   if (segment == NULL) { |  257   if (segment == nullptr) { | 
|  237     V8::FatalProcessOutOfMemory("Zone"); |  258     FatalProcessOutOfMemory("Zone"); | 
|  238     return NULL; |  259     return nullptr; | 
|  239   } |  260   } | 
|  240  |  261  | 
|  241   // Recompute 'top' and 'limit' based on the new segment. |  262   // Recompute 'top' and 'limit' based on the new segment. | 
|  242   Address result = RoundUp(segment->start(), kAlignment); |  263   Address result = RoundUp(segment->start(), kAlignment); | 
|  243   position_ = result + size; |  264   position_ = result + size; | 
|  244   // Check for address overflow. |  265   // Check for address overflow. | 
|  245   // (Should not happen since the segment is guaranteed to accomodate |  266   // (Should not happen since the segment is guaranteed to accomodate | 
|  246   // size bytes + header and alignment padding) |  267   // size bytes + header and alignment padding) | 
|  247   if (reinterpret_cast<uintptr_t>(position_) |  268   DCHECK_GE(reinterpret_cast<uintptr_t>(position_), | 
|  248       < reinterpret_cast<uintptr_t>(result)) { |  269             reinterpret_cast<uintptr_t>(result)); | 
|  249     V8::FatalProcessOutOfMemory("Zone"); |  | 
|  250     return NULL; |  | 
|  251   } |  | 
|  252   limit_ = segment->end(); |  270   limit_ = segment->end(); | 
|  253   DCHECK(position_ <= limit_); |  271   DCHECK(position_ <= limit_); | 
|  254   return result; |  272   return result; | 
|  255 } |  273 } | 
|  256  |  274  | 
|  257  |  275 }  // namespace internal | 
|  258 } }  // namespace v8::internal |  276 }  // namespace v8 | 
| OLD | NEW |