Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(832)

Side by Side Diff: src/zone/zone.cc

Issue 2400343002: Revert of Pool implementation for zone segments (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/zone/zone.h ('k') | src/zone/zone-segment.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/zone/zone.h" 5 #include "src/zone/zone.h"
6 6
7 #include <cstring> 7 #include <cstring>
8 8
9 #include "src/v8.h" 9 #include "src/v8.h"
10 10
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
44 Zone::Zone(AccountingAllocator* allocator) 44 Zone::Zone(AccountingAllocator* allocator)
45 : allocation_size_(0), 45 : allocation_size_(0),
46 segment_bytes_allocated_(0), 46 segment_bytes_allocated_(0),
47 position_(0), 47 position_(0),
48 limit_(0), 48 limit_(0),
49 allocator_(allocator), 49 allocator_(allocator),
50 segment_head_(nullptr) {} 50 segment_head_(nullptr) {}
51 51
52 Zone::~Zone() { 52 Zone::~Zone() {
53 DeleteAll(); 53 DeleteAll();
54 DeleteKeptSegment();
54 55
55 DCHECK(segment_bytes_allocated_ == 0); 56 DCHECK(segment_bytes_allocated_ == 0);
56 } 57 }
57 58
58 void* Zone::New(size_t size) { 59 void* Zone::New(size_t size) {
59 // Round up the requested size to fit the alignment. 60 // Round up the requested size to fit the alignment.
60 size = RoundUp(size, kAlignment); 61 size = RoundUp(size, kAlignment);
61 62
62 // If the allocation size is divisible by 8 then we return an 8-byte aligned 63 // If the allocation size is divisible by 8 then we return an 8-byte aligned
63 // address. 64 // address.
(...skipping 20 matching lines...) Expand all
84 DCHECK(redzone_position + kASanRedzoneBytes == position_); 85 DCHECK(redzone_position + kASanRedzoneBytes == position_);
85 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes); 86 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
86 87
87 // Check that the result has the proper alignment and return it. 88 // Check that the result has the proper alignment and return it.
88 DCHECK(IsAddressAligned(result, kAlignment, 0)); 89 DCHECK(IsAddressAligned(result, kAlignment, 0));
89 allocation_size_ += size; 90 allocation_size_ += size;
90 return reinterpret_cast<void*>(result); 91 return reinterpret_cast<void*>(result);
91 } 92 }
92 93
93 void Zone::DeleteAll() { 94 void Zone::DeleteAll() {
94 // Traverse the chained list of segments and return them all to the allocator. 95 // Find a segment with a suitable size to keep around.
96 Segment* keep = nullptr;
97 // Traverse the chained list of segments, zapping (in debug mode)
98 // and freeing every segment except the one we wish to keep.
95 for (Segment* current = segment_head_; current;) { 99 for (Segment* current = segment_head_; current;) {
96 Segment* next = current->next(); 100 Segment* next = current->next();
97 size_t size = current->size(); 101 if (!keep && current->size() <= kMaximumKeptSegmentSize) {
98 102 // Unlink the segment we wish to keep from the list.
99 // Un-poison the segment content so we can re-use or zap it later. 103 keep = current;
100 ASAN_UNPOISON_MEMORY_REGION(current->start(), current->capacity()); 104 keep->set_next(nullptr);
101 105 } else {
102 segment_bytes_allocated_ -= size; 106 size_t size = current->size();
103 allocator_->ReturnSegment(current); 107 #ifdef DEBUG
108 // Un-poison first so the zapping doesn't trigger ASan complaints.
109 ASAN_UNPOISON_MEMORY_REGION(current, size);
110 #endif
111 current->ZapContents();
112 segment_bytes_allocated_ -= size;
113 allocator_->FreeSegment(current);
114 }
104 current = next; 115 current = next;
105 } 116 }
106 117
107 position_ = limit_ = 0; 118 // If we have found a segment we want to keep, we must recompute the
119 // variables 'position' and 'limit' to prepare for future allocate
120 // attempts. Otherwise, we must clear the position and limit to
121 // force a new segment to be allocated on demand.
122 if (keep) {
123 Address start = keep->start();
124 position_ = RoundUp(start, kAlignment);
125 limit_ = keep->end();
126 // Un-poison so we can re-use the segment later.
127 ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
128 keep->ZapContents();
129 } else {
130 position_ = limit_ = 0;
131 }
108 132
109 allocation_size_ = 0; 133 allocation_size_ = 0;
110 // Update the head segment to be the kept segment (if any). 134 // Update the head segment to be the kept segment (if any).
111 segment_head_ = nullptr; 135 segment_head_ = keep;
136 }
137
138 void Zone::DeleteKeptSegment() {
139 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
140 if (segment_head_ != nullptr) {
141 size_t size = segment_head_->size();
142 #ifdef DEBUG
143 // Un-poison first so the zapping doesn't trigger ASan complaints.
144 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
145 #endif
146 segment_head_->ZapContents();
147 segment_bytes_allocated_ -= size;
148 allocator_->FreeSegment(segment_head_);
149 segment_head_ = nullptr;
150 }
151
152 DCHECK(segment_bytes_allocated_ == 0);
112 } 153 }
113 154
114 // Creates a new segment, sets it size, and pushes it to the front 155 // Creates a new segment, sets it size, and pushes it to the front
115 // of the segment chain. Returns the new segment. 156 // of the segment chain. Returns the new segment.
116 Segment* Zone::NewSegment(size_t requested_size) { 157 Segment* Zone::NewSegment(size_t size) {
117 Segment* result = allocator_->GetSegment(requested_size); 158 Segment* result = allocator_->AllocateSegment(size);
118 DCHECK_GE(result->size(), requested_size); 159 segment_bytes_allocated_ += size;
119 segment_bytes_allocated_ += result->size();
120 if (result != nullptr) { 160 if (result != nullptr) {
121 result->set_zone(this); 161 result->Initialize(segment_head_, size, this);
122 result->set_next(segment_head_);
123 segment_head_ = result; 162 segment_head_ = result;
124 } 163 }
125 return result; 164 return result;
126 } 165 }
127 166
128 Address Zone::NewExpand(size_t size) { 167 Address Zone::NewExpand(size_t size) {
129 // Make sure the requested size is already properly aligned and that 168 // Make sure the requested size is already properly aligned and that
130 // there isn't enough room in the Zone to satisfy the request. 169 // there isn't enough room in the Zone to satisfy the request.
131 DCHECK_EQ(size, RoundDown(size, kAlignment)); 170 DCHECK_EQ(size, RoundDown(size, kAlignment));
132 DCHECK(limit_ < position_ || 171 DCHECK(limit_ < position_ ||
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
176 // size bytes + header and alignment padding) 215 // size bytes + header and alignment padding)
177 DCHECK(reinterpret_cast<uintptr_t>(position_) >= 216 DCHECK(reinterpret_cast<uintptr_t>(position_) >=
178 reinterpret_cast<uintptr_t>(result)); 217 reinterpret_cast<uintptr_t>(result));
179 limit_ = segment->end(); 218 limit_ = segment->end();
180 DCHECK(position_ <= limit_); 219 DCHECK(position_ <= limit_);
181 return result; 220 return result;
182 } 221 }
183 222
184 } // namespace internal 223 } // namespace internal
185 } // namespace v8 224 } // namespace v8
OLDNEW
« no previous file with comments | « src/zone/zone.h ('k') | src/zone/zone-segment.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698