OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory_allocator_android.h" | 5 #include "base/memory/discardable_memory_allocator_android.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <cmath> | 8 #include <cmath> |
9 #include <set> | 9 #include <set> |
10 #include <utility> | 10 #include <utility> |
(...skipping 28 matching lines...) Expand all Loading... |
39 // currently the maximum allowed). If the client requests 4096 bytes and a free | 39 // currently the maximum allowed). If the client requests 4096 bytes and a free |
40 // chunk of 8192 bytes is available then the free chunk gets splitted into two | 40 // chunk of 8192 bytes is available then the free chunk gets splitted into two |
41 // pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater | 41 // pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater |
42 // than 4095). | 42 // than 4095). |
43 // TODO(pliard): tune this if splitting chunks too often leads to performance | 43 // TODO(pliard): tune this if splitting chunks too often leads to performance |
44 // issues. | 44 // issues. |
45 const size_t kMaxChunkFragmentationBytes = 4096 - 1; | 45 const size_t kMaxChunkFragmentationBytes = 4096 - 1; |
46 | 46 |
47 const size_t kMinAshmemRegionSize = 32 * 1024 * 1024; | 47 const size_t kMinAshmemRegionSize = 32 * 1024 * 1024; |
48 | 48 |
| 49 // Returns 0 if the provided size is too high to be aligned. |
| 50 size_t AlignToNextPage(size_t size) { |
| 51 const size_t kPageSize = 4096; |
| 52 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); |
| 53 if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) |
| 54 return 0; |
| 55 const size_t mask = ~(kPageSize - 1); |
| 56 return (size + kPageSize - 1) & mask; |
| 57 } |
| 58 |
49 } // namespace | 59 } // namespace |
50 | 60 |
51 namespace internal { | 61 namespace internal { |
52 | 62 |
53 class DiscardableMemoryAllocator::DiscardableAshmemChunk | 63 class DiscardableMemoryAllocator::DiscardableAshmemChunk |
54 : public DiscardableMemory { | 64 : public DiscardableMemory { |
55 public: | 65 public: |
56 // Note that |ashmem_region| must outlive |this|. | 66 // Note that |ashmem_region| must outlive |this|. |
57 DiscardableAshmemChunk(AshmemRegion* ashmem_region, | 67 DiscardableAshmemChunk(AshmemRegion* ashmem_region, |
58 int fd, | 68 int fd, |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
99 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); | 109 DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); |
100 }; | 110 }; |
101 | 111 |
102 class DiscardableMemoryAllocator::AshmemRegion { | 112 class DiscardableMemoryAllocator::AshmemRegion { |
103 public: | 113 public: |
104 // Note that |allocator| must outlive |this|. | 114 // Note that |allocator| must outlive |this|. |
105 static scoped_ptr<AshmemRegion> Create( | 115 static scoped_ptr<AshmemRegion> Create( |
106 size_t size, | 116 size_t size, |
107 const std::string& name, | 117 const std::string& name, |
108 DiscardableMemoryAllocator* allocator) { | 118 DiscardableMemoryAllocator* allocator) { |
109 DCHECK_EQ(size, internal::AlignToNextPage(size)); | 119 DCHECK_EQ(size, AlignToNextPage(size)); |
110 int fd; | 120 int fd; |
111 void* base; | 121 void* base; |
112 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) | 122 if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) |
113 return scoped_ptr<AshmemRegion>(); | 123 return scoped_ptr<AshmemRegion>(); |
114 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); | 124 return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); |
115 } | 125 } |
116 | 126 |
117 virtual ~AshmemRegion() { | 127 virtual ~AshmemRegion() { |
118 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); | 128 const bool result = internal::CloseAshmemRegion(fd_, size_, base_); |
119 DCHECK(result); | 129 DCHECK(result); |
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
364 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { | 374 DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { |
365 if (locked_) | 375 if (locked_) |
366 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); | 376 internal::UnlockAshmemRegion(fd_, offset_, size_, address_); |
367 ashmem_region_->OnChunkDeletion(address_, size_); | 377 ashmem_region_->OnChunkDeletion(address_, size_); |
368 } | 378 } |
369 | 379 |
370 DiscardableMemoryAllocator::DiscardableMemoryAllocator( | 380 DiscardableMemoryAllocator::DiscardableMemoryAllocator( |
371 const std::string& name, | 381 const std::string& name, |
372 size_t ashmem_region_size) | 382 size_t ashmem_region_size) |
373 : name_(name), | 383 : name_(name), |
374 ashmem_region_size_(std::max(kMinAshmemRegionSize, ashmem_region_size)) { | 384 ashmem_region_size_( |
| 385 std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), |
| 386 last_ashmem_region_size_(0) { |
375 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); | 387 DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); |
376 } | 388 } |
377 | 389 |
378 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { | 390 DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { |
379 DCHECK(thread_checker_.CalledOnValidThread()); | 391 DCHECK(thread_checker_.CalledOnValidThread()); |
380 DCHECK(ashmem_regions_.empty()); | 392 DCHECK(ashmem_regions_.empty()); |
381 } | 393 } |
382 | 394 |
383 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( | 395 scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( |
384 size_t size) { | 396 size_t size) { |
385 const size_t aligned_size = internal::AlignToNextPage(size); | 397 const size_t aligned_size = AlignToNextPage(size); |
386 if (!aligned_size) | 398 if (!aligned_size) |
387 return scoped_ptr<DiscardableMemory>(); | 399 return scoped_ptr<DiscardableMemory>(); |
388 // TODO(pliard): make this function less naive by e.g. moving the free chunks | 400 // TODO(pliard): make this function less naive by e.g. moving the free chunks |
389 // multiset to the allocator itself in order to decrease even more | 401 // multiset to the allocator itself in order to decrease even more |
390 // fragmentation/speedup allocation. Note that there should not be more than a | 402 // fragmentation/speedup allocation. Note that there should not be more than a |
391 // couple (=5) of AshmemRegion instances in practice though. | 403 // couple (=5) of AshmemRegion instances in practice though. |
392 AutoLock auto_lock(lock_); | 404 AutoLock auto_lock(lock_); |
393 DCHECK_LE(ashmem_regions_.size(), 5U); | 405 DCHECK_LE(ashmem_regions_.size(), 5U); |
394 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); | 406 for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); |
395 it != ashmem_regions_.end(); ++it) { | 407 it != ashmem_regions_.end(); ++it) { |
396 scoped_ptr<DiscardableMemory> memory( | 408 scoped_ptr<DiscardableMemory> memory( |
397 (*it)->Allocate_Locked(size, aligned_size)); | 409 (*it)->Allocate_Locked(size, aligned_size)); |
398 if (memory) | 410 if (memory) |
399 return memory.Pass(); | 411 return memory.Pass(); |
400 } | 412 } |
401 // The creation of the (large) ashmem region might fail if the address space | 413 // The creation of the (large) ashmem region might fail if the address space |
402 // is too fragmented. In case creation fails the allocator retries by | 414 // is too fragmented. In case creation fails the allocator retries by |
403 // repetitively dividing the size by 2. | 415 // repetitively dividing the size by 2. |
404 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); | 416 const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); |
405 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); | 417 for (size_t region_size = std::max(ashmem_region_size_, aligned_size); |
406 region_size >= min_region_size; region_size /= 2) { | 418 region_size >= min_region_size; |
| 419 region_size = AlignToNextPage(region_size / 2)) { |
407 scoped_ptr<AshmemRegion> new_region( | 420 scoped_ptr<AshmemRegion> new_region( |
408 AshmemRegion::Create(region_size, name_.c_str(), this)); | 421 AshmemRegion::Create(region_size, name_.c_str(), this)); |
409 if (!new_region) | 422 if (!new_region) |
410 continue; | 423 continue; |
| 424 last_ashmem_region_size_ = region_size; |
411 ashmem_regions_.push_back(new_region.release()); | 425 ashmem_regions_.push_back(new_region.release()); |
412 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); | 426 return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); |
413 } | 427 } |
414 // TODO(pliard): consider adding an histogram to see how often this happens. | 428 // TODO(pliard): consider adding an histogram to see how often this happens. |
415 return scoped_ptr<DiscardableMemory>(); | 429 return scoped_ptr<DiscardableMemory>(); |
416 } | 430 } |
417 | 431 |
| 432 size_t DiscardableMemoryAllocator::last_ashmem_region_size() const { |
| 433 AutoLock auto_lock(lock_); |
| 434 return last_ashmem_region_size_; |
| 435 } |
| 436 |
418 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( | 437 void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( |
419 AshmemRegion* region) { | 438 AshmemRegion* region) { |
420 lock_.AssertAcquired(); | 439 lock_.AssertAcquired(); |
421 // Note that there should not be more than a couple of ashmem region instances | 440 // Note that there should not be more than a couple of ashmem region instances |
422 // in |ashmem_regions_|. | 441 // in |ashmem_regions_|. |
423 DCHECK_LE(ashmem_regions_.size(), 5U); | 442 DCHECK_LE(ashmem_regions_.size(), 5U); |
424 const ScopedVector<AshmemRegion>::iterator it = std::find( | 443 const ScopedVector<AshmemRegion>::iterator it = std::find( |
425 ashmem_regions_.begin(), ashmem_regions_.end(), region); | 444 ashmem_regions_.begin(), ashmem_regions_.end(), region); |
426 DCHECK_NE(ashmem_regions_.end(), it); | 445 DCHECK_NE(ashmem_regions_.end(), it); |
427 std::swap(*it, ashmem_regions_.back()); | 446 std::swap(*it, ashmem_regions_.back()); |
428 ashmem_regions_.pop_back(); | 447 ashmem_regions_.pop_back(); |
429 } | 448 } |
430 | 449 |
431 } // namespace internal | 450 } // namespace internal |
432 } // namespace base | 451 } // namespace base |
OLD | NEW |