OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
66 int size_; | 66 int size_; |
67 }; | 67 }; |
68 | 68 |
69 | 69 |
70 Zone::Zone(Isolate* isolate) | 70 Zone::Zone(Isolate* isolate) |
71 : zone_excess_limit_(256 * MB), | 71 : zone_excess_limit_(256 * MB), |
72 allocation_size_(0), | 72 allocation_size_(0), |
73 segment_bytes_allocated_(0), | 73 segment_bytes_allocated_(0), |
74 position_(0), | 74 position_(0), |
75 limit_(0), | 75 limit_(0), |
76 scope_nesting_(0), | |
77 segment_head_(NULL), | 76 segment_head_(NULL), |
78 isolate_(isolate) { | 77 isolate_(isolate) { |
79 } | 78 } |
80 | 79 |
81 | 80 |
82 ZoneScope::~ZoneScope() { | 81 Zone::~Zone() { |
83 if (ShouldDeleteOnExit()) zone_->DeleteAll(); | 82 #ifdef DEBUG |
84 zone_->scope_nesting_--; | 83 // Constant byte value used for zapping dead memory in debug mode. |
| 84 static const unsigned char kZapDeadByte = 0xcd; |
| 85 #endif |
| 86 |
| 87 // Traverse the chained list of segments, zapping |
| 88 // (in debug mode) and freeing every segment |
| 89 Segment* current = segment_head_; |
| 90 while (current != NULL) { |
| 91 Segment* next = current->next(); |
| 92 int size = current->size(); |
| 93 #ifdef DEBUG |
| 94 // Zap the entire current segment (including the header). |
| 95 memset(current, kZapDeadByte, size); |
| 96 #endif |
| 97 DeleteSegment(current, size); |
| 98 current = next; |
| 99 } |
| 100 |
| 101 // We must clear the position and limit to force |
| 102 // a new segment to be allocated on demand. |
| 103 position_ = limit_ = 0; |
| 104 |
| 105 // Update the head segment. |
| 106 segment_head_ = NULL; |
85 } | 107 } |
86 | 108 |
87 | 109 |
88 // Creates a new segment, sets it size, and pushes it to the front | 110 // Creates a new segment, sets it size, and pushes it to the front |
89 // of the segment chain. Returns the new segment. | 111 // of the segment chain. Returns the new segment. |
90 Segment* Zone::NewSegment(int size) { | 112 Segment* Zone::NewSegment(int size) { |
91 Segment* result = reinterpret_cast<Segment*>(Malloced::New(size)); | 113 Segment* result = reinterpret_cast<Segment*>(Malloced::New(size)); |
92 adjust_segment_bytes_allocated(size); | 114 adjust_segment_bytes_allocated(size); |
93 if (result != NULL) { | 115 if (result != NULL) { |
94 result->Initialize(segment_head_, size); | 116 result->Initialize(segment_head_, size); |
95 segment_head_ = result; | 117 segment_head_ = result; |
96 } | 118 } |
97 return result; | 119 return result; |
98 } | 120 } |
99 | 121 |
100 | 122 |
101 // Deletes the given segment. Does not touch the segment chain. | 123 // Deletes the given segment. Does not touch the segment chain. |
102 void Zone::DeleteSegment(Segment* segment, int size) { | 124 void Zone::DeleteSegment(Segment* segment, int size) { |
103 adjust_segment_bytes_allocated(-size); | 125 adjust_segment_bytes_allocated(-size); |
104 Malloced::Delete(segment); | 126 Malloced::Delete(segment); |
105 } | 127 } |
106 | 128 |
107 | 129 |
108 void Zone::DeleteAll() { | |
109 #ifdef DEBUG | |
110 // Constant byte value used for zapping dead memory in debug mode. | |
111 static const unsigned char kZapDeadByte = 0xcd; | |
112 #endif | |
113 | |
114 // Find a segment with a suitable size to keep around. | |
115 Segment* keep = segment_head_; | |
116 while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) { | |
117 keep = keep->next(); | |
118 } | |
119 | |
120 // Traverse the chained list of segments, zapping (in debug mode) | |
121 // and freeing every segment except the one we wish to keep. | |
122 Segment* current = segment_head_; | |
123 while (current != NULL) { | |
124 Segment* next = current->next(); | |
125 if (current == keep) { | |
126 // Unlink the segment we wish to keep from the list. | |
127 current->clear_next(); | |
128 } else { | |
129 int size = current->size(); | |
130 #ifdef DEBUG | |
131 // Zap the entire current segment (including the header). | |
132 memset(current, kZapDeadByte, size); | |
133 #endif | |
134 DeleteSegment(current, size); | |
135 } | |
136 current = next; | |
137 } | |
138 | |
139 // If we have found a segment we want to keep, we must recompute the | |
140 // variables 'position' and 'limit' to prepare for future allocate | |
141 // attempts. Otherwise, we must clear the position and limit to | |
142 // force a new segment to be allocated on demand. | |
143 if (keep != NULL) { | |
144 Address start = keep->start(); | |
145 position_ = RoundUp(start, kAlignment); | |
146 limit_ = keep->end(); | |
147 #ifdef DEBUG | |
148 // Zap the contents of the kept segment (but not the header). | |
149 memset(start, kZapDeadByte, keep->capacity()); | |
150 #endif | |
151 } else { | |
152 position_ = limit_ = 0; | |
153 } | |
154 | |
155 // Update the head segment to be the kept segment (if any). | |
156 segment_head_ = keep; | |
157 } | |
158 | |
159 | |
160 void Zone::DeleteKeptSegment() { | |
161 if (segment_head_ != NULL) { | |
162 DeleteSegment(segment_head_, segment_head_->size()); | |
163 segment_head_ = NULL; | |
164 } | |
165 } | |
166 | |
167 | |
168 Address Zone::NewExpand(int size) { | 130 Address Zone::NewExpand(int size) { |
169 // Make sure the requested size is already properly aligned and that | 131 // Make sure the requested size is already properly aligned and that |
170 // there isn't enough room in the Zone to satisfy the request. | 132 // there isn't enough room in the Zone to satisfy the request. |
171 ASSERT(size == RoundDown(size, kAlignment)); | 133 ASSERT(size == RoundDown(size, kAlignment)); |
172 ASSERT(size > limit_ - position_); | 134 ASSERT(size > limit_ - position_); |
173 | 135 |
174 // Compute the new segment size. We use a 'high water mark' | 136 // Compute the new segment size. We use a 'high water mark' |
175 // strategy, where we increase the segment size every time we expand | 137 // strategy, where we increase the segment size every time we expand |
176 // except that we employ a maximum segment size when we delete. This | 138 // except that we employ a maximum segment size when we delete. This |
177 // is to avoid excessive malloc() and free() overhead. | 139 // is to avoid excessive malloc() and free() overhead. |
(...skipping 30 matching lines...) Expand all Loading... |
208 V8::FatalProcessOutOfMemory("Zone"); | 170 V8::FatalProcessOutOfMemory("Zone"); |
209 return NULL; | 171 return NULL; |
210 } | 172 } |
211 limit_ = segment->end(); | 173 limit_ = segment->end(); |
212 ASSERT(position_ <= limit_); | 174 ASSERT(position_ <= limit_); |
213 return result; | 175 return result; |
214 } | 176 } |
215 | 177 |
216 | 178 |
217 } } // namespace v8::internal | 179 } } // namespace v8::internal |
OLD | NEW |