Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(96)

Side by Side Diff: base/memory/discardable_memory_allocator_android.cc

Issue 183763037: Update AshmemRegion::highest_allocated_chunk_ when merging free chunks. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Remove unnecessary allocator instance in unit test Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/memory/discardable_memory_allocator_android.h" 5 #include "base/memory/discardable_memory_allocator_android.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cmath> 8 #include <cmath>
9 #include <set> 9 #include <set>
10 #include <utility> 10 #include <utility>
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
120 int fd; 120 int fd;
121 void* base; 121 void* base;
122 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) 122 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base))
123 return scoped_ptr<AshmemRegion>(); 123 return scoped_ptr<AshmemRegion>();
124 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); 124 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator));
125 } 125 }
126 126
127 ~AshmemRegion() { 127 ~AshmemRegion() {
128 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); 128 const bool result = internal::CloseAshmemRegion(fd_, size_, base_);
129 DCHECK(result); 129 DCHECK(result);
130 DCHECK(!highest_allocated_chunk_);
130 } 131 }
131 132
132 // Returns a new instance of DiscardableMemory whose size is greater or equal 133 // Returns a new instance of DiscardableMemory whose size is greater or equal
133 // than |actual_size| (which is expected to be greater or equal than 134 // than |actual_size| (which is expected to be greater or equal than
134 // |client_requested_size|). 135 // |client_requested_size|).
135 // Allocation works as follows: 136 // Allocation works as follows:
136 // 1) Reuse a previously freed chunk and return it if it succeeded. See 137 // 1) Reuse a previously freed chunk and return it if it succeeded. See
137 // ReuseFreeChunk_Locked() below for more information. 138 // ReuseFreeChunk_Locked() below for more information.
138 // 2) If no free chunk could be reused and the region is not big enough for 139 // 2) If no free chunk could be reused and the region is not big enough for
139 // the requested size then NULL is returned. 140 // the requested size then NULL is returned.
140 // 3) If there is enough room in the ashmem region then a new chunk is 141 // 3) If there is enough room in the ashmem region then a new chunk is
141 // returned. This new chunk starts at |offset_| which is the end of the 142 // returned. This new chunk starts at |offset_| which is the end of the
142 // previously highest chunk in the region. 143 // previously highest chunk in the region.
143 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, 144 scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size,
144 size_t actual_size) { 145 size_t actual_size) {
145 DCHECK_LE(client_requested_size, actual_size); 146 DCHECK_LE(client_requested_size, actual_size);
146 allocator_->lock_.AssertAcquired(); 147 allocator_->lock_.AssertAcquired();
148
149 // Check that the |highest_allocated_chunk_| field doesn't contain a stale
150 // pointer. It should point to either a free chunk or a used chunk.
151 DCHECK(!highest_allocated_chunk_ ||
152 address_to_free_chunk_map_.find(highest_allocated_chunk_) !=
153 address_to_free_chunk_map_.end() ||
154 used_to_previous_chunk_map_.find(highest_allocated_chunk_) !=
155 used_to_previous_chunk_map_.end());
156
147 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( 157 scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked(
148 client_requested_size, actual_size); 158 client_requested_size, actual_size);
149 if (memory) 159 if (memory)
150 return memory.Pass(); 160 return memory.Pass();
161
151 if (size_ - offset_ < actual_size) { 162 if (size_ - offset_ < actual_size) {
152 // This region does not have enough space left to hold the requested size. 163 // This region does not have enough space left to hold the requested size.
153 return scoped_ptr<DiscardableMemory>(); 164 return scoped_ptr<DiscardableMemory>();
154 } 165 }
166
155 void* const address = static_cast<char*>(base_) + offset_; 167 void* const address = static_cast<char*>(base_) + offset_;
156 memory.reset( 168 memory.reset(
157 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); 169 new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size));
170
158 used_to_previous_chunk_map_.insert( 171 used_to_previous_chunk_map_.insert(
159 std::make_pair(address, highest_allocated_chunk_)); 172 std::make_pair(address, highest_allocated_chunk_));
160 highest_allocated_chunk_ = address; 173 highest_allocated_chunk_ = address;
161 offset_ += actual_size; 174 offset_ += actual_size;
162 DCHECK_LE(offset_, size_); 175 DCHECK_LE(offset_, size_);
163 return memory.Pass(); 176 return memory.Pass();
164 } 177 }
165 178
166 void OnChunkDeletion(void* chunk, size_t size) { 179 void OnChunkDeletion(void* chunk, size_t size) {
167 AutoLock auto_lock(allocator_->lock_); 180 AutoLock auto_lock(allocator_->lock_);
168 MergeAndAddFreeChunk_Locked(chunk, size); 181 MergeAndAddFreeChunk_Locked(chunk, size);
169 // Note that |this| might be deleted beyond this point. 182 // Note that |this| might be deleted beyond this point.
170 } 183 }
171 184
172 private: 185 private:
173 struct FreeChunk { 186 struct FreeChunk {
174 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {} 187 FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {}
175 188
176 explicit FreeChunk(size_t size) 189 explicit FreeChunk(size_t size)
177 : previous_chunk(NULL), 190 : previous_chunk(NULL),
178 start(NULL), 191 start(NULL),
179 size(size) { 192 size(size) {
180 } 193 }
181 194
182 FreeChunk(void* previous_chunk, void* start, size_t size) 195 FreeChunk(void* previous_chunk, void* start, size_t size)
183 : previous_chunk(previous_chunk), 196 : previous_chunk(previous_chunk),
184 start(start), 197 start(start),
185 size(size) { 198 size(size) {
186 DCHECK_NE(previous_chunk, start); 199 DCHECK_LT(previous_chunk, start);
187 } 200 }
188 201
189 void* const previous_chunk; 202 void* const previous_chunk;
190 void* const start; 203 void* const start;
191 const size_t size; 204 const size_t size;
192 205
193 bool is_null() const { return !start; } 206 bool is_null() const { return !start; }
194 207
195 bool operator<(const FreeChunk& other) const { 208 bool operator<(const FreeChunk& other) const {
196 return size < other.size; 209 return size < other.size;
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
281 allocator_->lock_.AssertAcquired(); 294 allocator_->lock_.AssertAcquired();
282 size_t new_free_chunk_size = size; 295 size_t new_free_chunk_size = size;
283 // Merge with the previous chunk. 296 // Merge with the previous chunk.
284 void* first_free_chunk = chunk; 297 void* first_free_chunk = chunk;
285 DCHECK(!used_to_previous_chunk_map_.empty()); 298 DCHECK(!used_to_previous_chunk_map_.empty());
286 const hash_map<void*, void*>::iterator previous_chunk_it = 299 const hash_map<void*, void*>::iterator previous_chunk_it =
287 used_to_previous_chunk_map_.find(chunk); 300 used_to_previous_chunk_map_.find(chunk);
288 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); 301 DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end());
289 void* previous_chunk = previous_chunk_it->second; 302 void* previous_chunk = previous_chunk_it->second;
290 used_to_previous_chunk_map_.erase(previous_chunk_it); 303 used_to_previous_chunk_map_.erase(previous_chunk_it);
304
291 if (previous_chunk) { 305 if (previous_chunk) {
292 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); 306 const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk);
293 if (!free_chunk.is_null()) { 307 if (!free_chunk.is_null()) {
294 new_free_chunk_size += free_chunk.size; 308 new_free_chunk_size += free_chunk.size;
295 first_free_chunk = previous_chunk; 309 first_free_chunk = previous_chunk;
310 if (chunk == highest_allocated_chunk_)
311 highest_allocated_chunk_ = previous_chunk;
312
296 // There should not be more contiguous previous free chunks. 313 // There should not be more contiguous previous free chunks.
297 previous_chunk = free_chunk.previous_chunk; 314 previous_chunk = free_chunk.previous_chunk;
298 DCHECK(!address_to_free_chunk_map_.count(previous_chunk)); 315 DCHECK(!address_to_free_chunk_map_.count(previous_chunk));
299 } 316 }
300 } 317 }
318
301 // Merge with the next chunk if free and present. 319 // Merge with the next chunk if free and present.
302 void* next_chunk = static_cast<char*>(chunk) + size; 320 void* next_chunk = static_cast<char*>(chunk) + size;
303 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); 321 const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk);
304 if (!next_free_chunk.is_null()) { 322 if (!next_free_chunk.is_null()) {
305 new_free_chunk_size += next_free_chunk.size; 323 new_free_chunk_size += next_free_chunk.size;
324 if (next_free_chunk.start == highest_allocated_chunk_)
325 highest_allocated_chunk_ = first_free_chunk;
326
306 // Same as above. 327 // Same as above.
307 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + 328 DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) +
308 next_free_chunk.size)); 329 next_free_chunk.size));
309 } 330 }
331
310 const bool whole_ashmem_region_is_free = 332 const bool whole_ashmem_region_is_free =
311 used_to_previous_chunk_map_.empty(); 333 used_to_previous_chunk_map_.empty();
312 if (!whole_ashmem_region_is_free) { 334 if (!whole_ashmem_region_is_free) {
313 AddFreeChunk_Locked( 335 AddFreeChunk_Locked(
314 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); 336 FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size));
315 return; 337 return;
316 } 338 }
339
317 // The whole ashmem region is free thus it can be deleted. 340 // The whole ashmem region is free thus it can be deleted.
318 DCHECK_EQ(base_, first_free_chunk); 341 DCHECK_EQ(base_, first_free_chunk);
342 DCHECK_EQ(base_, highest_allocated_chunk_);
319 DCHECK(free_chunks_.empty()); 343 DCHECK(free_chunks_.empty());
320 DCHECK(address_to_free_chunk_map_.empty()); 344 DCHECK(address_to_free_chunk_map_.empty());
321 DCHECK(used_to_previous_chunk_map_.empty()); 345 DCHECK(used_to_previous_chunk_map_.empty());
346 highest_allocated_chunk_ = NULL;
322 allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|. 347 allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|.
323 } 348 }
324 349
325 void AddFreeChunk_Locked(const FreeChunk& free_chunk) { 350 void AddFreeChunk_Locked(const FreeChunk& free_chunk) {
326 allocator_->lock_.AssertAcquired(); 351 allocator_->lock_.AssertAcquired();
327 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( 352 const std::multiset<FreeChunk>::iterator it = free_chunks_.insert(
328 free_chunk); 353 free_chunk);
329 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); 354 address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it));
330 // Update the next used contiguous chunk, if any, since its previous chunk 355 // Update the next used contiguous chunk, if any, since its previous chunk
331 // may have changed due to free chunks merging/splitting. 356 // may have changed due to free chunks merging/splitting.
(...skipping 28 matching lines...) Expand all
360 const FreeChunk free_chunk(*free_chunk_it); 385 const FreeChunk free_chunk(*free_chunk_it);
361 address_to_free_chunk_map_.erase(free_chunk_it->start); 386 address_to_free_chunk_map_.erase(free_chunk_it->start);
362 free_chunks_.erase(free_chunk_it); 387 free_chunks_.erase(free_chunk_it);
363 return free_chunk; 388 return free_chunk;
364 } 389 }
365 390
366 const int fd_; 391 const int fd_;
367 const size_t size_; 392 const size_t size_;
368 void* const base_; 393 void* const base_;
369 DiscardableMemoryAllocator* const allocator_; 394 DiscardableMemoryAllocator* const allocator_;
395 // Points to the chunk with the highest address in the region. This pointer
396 // needs to be carefully updated when chunks are merged/split.
370 void* highest_allocated_chunk_; 397 void* highest_allocated_chunk_;
371 // Points to the end of |highest_allocated_chunk_|. 398 // Points to the end of |highest_allocated_chunk_|.
372 size_t offset_; 399 size_t offset_;
373 // Allows free chunks recycling (lookup, insertion and removal) in O(log N). 400 // Allows free chunks recycling (lookup, insertion and removal) in O(log N).
374 // Note that FreeChunk values are indexed by their size and also note that 401 // Note that FreeChunk values are indexed by their size and also note that
375 // multiple free chunks can have the same size (which is why multiset<> is 402 // multiple free chunks can have the same size (which is why multiset<> is
376 // used instead of e.g. set<>). 403 // used instead of e.g. set<>).
377 std::multiset<FreeChunk> free_chunks_; 404 std::multiset<FreeChunk> free_chunks_;
378 // Used while merging free contiguous chunks to erase free chunks (from their 405 // Used while merging free contiguous chunks to erase free chunks (from their
379 // start address) in constant time. Note that multiset<>::{insert,erase}() 406 // start address) in constant time. Note that multiset<>::{insert,erase}()
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
459 DCHECK_LE(ashmem_regions_.size(), 5U); 486 DCHECK_LE(ashmem_regions_.size(), 5U);
460 const ScopedVector<AshmemRegion>::iterator it = std::find( 487 const ScopedVector<AshmemRegion>::iterator it = std::find(
461 ashmem_regions_.begin(), ashmem_regions_.end(), region); 488 ashmem_regions_.begin(), ashmem_regions_.end(), region);
462 DCHECK_NE(ashmem_regions_.end(), it); 489 DCHECK_NE(ashmem_regions_.end(), it);
463 std::swap(*it, ashmem_regions_.back()); 490 std::swap(*it, ashmem_regions_.back());
464 ashmem_regions_.pop_back(); 491 ashmem_regions_.pop_back();
465 } 492 }
466 493
467 } // namespace internal 494 } // namespace internal
468 } // namespace base 495 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698