OLD | NEW |
---|---|
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory.h" | 5 #include "base/memory/discardable_memory_android.h" |
6 | 6 |
7 #include <sys/mman.h> | 7 #include <sys/mman.h> |
8 #include <sys/resource.h> | |
9 #include <sys/time.h> | |
8 #include <unistd.h> | 10 #include <unistd.h> |
9 | 11 |
10 #include "base/basictypes.h" | 12 #include "base/basictypes.h" |
11 #include "base/compiler_specific.h" | 13 #include "base/compiler_specific.h" |
12 #include "base/file_util.h" | 14 #include "base/file_util.h" |
13 #include "base/lazy_instance.h" | 15 #include "base/lazy_instance.h" |
14 #include "base/logging.h" | 16 #include "base/logging.h" |
17 #include "base/memory/discardable_memory.h" | |
18 #include "base/memory/discardable_memory_allocator_android.h" | |
15 #include "base/posix/eintr_wrapper.h" | 19 #include "base/posix/eintr_wrapper.h" |
16 #include "base/synchronization/lock.h" | 20 #include "base/synchronization/lock.h" |
17 #include "third_party/ashmem/ashmem.h" | 21 #include "third_party/ashmem/ashmem.h" |
18 | 22 |
19 namespace base { | 23 namespace base { |
20 namespace { | 24 namespace { |
21 | 25 |
22 // Protects |g_num_discardable_memory| below. | 26 const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; |
23 base::LazyInstance<base::Lock>::Leaky g_discardable_memory_lock = | |
24 LAZY_INSTANCE_INITIALIZER; | |
25 | 27 |
26 // Total number of discardable memory in the process. | 28 struct GlobalContext { |
27 int g_num_discardable_memory = 0; | 29 GlobalContext() |
30 : ashmem_fd_limit(GetSoftFDLimit()), | |
31 allocator(kAshmemAllocatorName), | |
32 ashmem_fd_count_(0) { | |
33 } | |
28 | 34 |
29 // Upper limit on the number of discardable memory to avoid hitting file | 35 const int ashmem_fd_limit; |
30 // descriptor limit. | 36 internal::DiscardableMemoryAllocator allocator; |
31 const int kDiscardableMemoryNumLimit = 128; | 37 Lock lock; |
38 | |
39 int ashmem_fd_count() const { | |
40 lock.AssertAcquired(); | |
41 return ashmem_fd_count_; | |
42 } | |
43 | |
44 void decrement_ashmem_fd_count() { | |
45 lock.AssertAcquired(); | |
46 --ashmem_fd_count_; | |
47 } | |
48 | |
49 void increment_ashmem_fd_count() { | |
50 lock.AssertAcquired(); | |
51 ++ashmem_fd_count_; | |
52 } | |
53 | |
54 private: | |
55 static int GetSoftFDLimit() { | |
56 struct rlimit limit_info; | |
57 if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0) | |
58 return 128; | |
59 // Allow 25% of file descriptor capacity for ashmem. | |
60 return limit_info.rlim_cur / 4; | |
61 } | |
62 | |
63 int ashmem_fd_count_; | |
64 }; | |
65 | |
66 LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER; | |
67 | |
68 // This is the default implementation of DiscardableMemory on Android which is | |
69 // used when file descriptor usage is under the soft limit. When file descriptor | |
70 // usage gets too high the discardable memory allocator is used instead. See | |
71 // ShouldUseAllocator() below for more details. | |
72 class DiscardableMemoryAndroidSimple : public DiscardableMemory { | |
73 public: | |
74 DiscardableMemoryAndroidSimple(int fd, void* address, size_t size) | |
75 : fd_(fd), | |
76 memory_(address), | |
77 size_(size) { | |
78 DCHECK_GE(fd_, 0); | |
79 DCHECK(memory_); | |
80 } | |
81 | |
82 virtual ~DiscardableMemoryAndroidSimple() { | |
83 internal::CloseAshmemRegion(fd_, size_, memory_); | |
84 } | |
85 | |
86 // DiscardableMemory: | |
87 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
88 return internal::LockAshmemRegion(fd_, 0, size_, memory_); | |
89 } | |
90 | |
91 virtual void Unlock() OVERRIDE { | |
92 internal::UnlockAshmemRegion(fd_, 0, size_, memory_); | |
93 } | |
94 | |
95 virtual void* Memory() const OVERRIDE { | |
96 return memory_; | |
97 } | |
98 | |
99 private: | |
100 const int fd_; | |
101 void* const memory_; | |
102 const size_t size_; | |
103 | |
104 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple); | |
105 }; | |
106 | |
107 int GetCurrentNumberOfAshmemFDs() { | |
108 AutoLock lock(g_context.Get().lock); | |
109 return g_context.Get().ashmem_fd_count(); | |
110 } | |
111 | |
112 // Allocation can happen in two ways: | |
113 // - Each client-requested allocation is backed by an individual ashmem region. | |
114 // This allows to delete ashmem regions individually by closing the ashmem file | |
willchan no longer on Chromium
2013/11/28 22:51:55
grammar nit: s/to delete/deleting/
Philippe
2013/11/29 12:41:05
Oops, thanks :)
| |
115 // descriptor. This is the default path that is taken when file descriptor usage | |
116 // allows us to do so. | |
willchan no longer on Chromium
2013/11/28 22:51:55
Or when the allocation size would require an entir
Philippe
2013/11/29 12:41:05
Done.
| |
117 // - Allocations are performed by the global allocator when file descriptor | |
118 // usage gets too high. This still allows unpinning but does not allow deleting | |
119 // (i.e. releasing the physycal pages backing) individiual regions. | |
willchan no longer on Chromium
2013/11/28 22:51:55
spelling nits:
* s/physycal/physical/
* s/individi
willchan no longer on Chromium
2013/11/28 22:51:55
spelling nits:
s/physycal/physical/
s/individiual/
Philippe
2013/11/29 12:41:05
Oops, I told you I was retarded :)
| |
120 bool ShouldUseAllocator(size_t size) { | |
121 const float kMaxFDUsageRateForNormalAllocations = 0.9; | |
122 const float kMaxFDUsageRateForVeryLargeAllocations = 0.98; | |
123 const int current_ashmem_fd_count = GetCurrentNumberOfAshmemFDs(); | |
124 const int ashmem_fd_limit = g_context.Get().ashmem_fd_limit; | |
125 if (current_ashmem_fd_count > | |
126 kMaxFDUsageRateForVeryLargeAllocations * ashmem_fd_limit) { | |
127 // FD usage is too high no matter how big the requested size is. | |
willchan no longer on Chromium
2013/11/28 22:51:55
I don't fully understand this algorithm. The alloc
Philippe
2013/11/29 12:41:05
You're right, the allocator would very likely crea
| |
128 return true; | |
129 } | |
130 // TODO(pliard): consider tuning the size threshold below. For instance we | |
131 // might want to make it a fraction of kMinAshmemRegionSize and also | |
132 // systematically have small allocations go through the allocator to allow big | |
133 // allocations to systematically go through individial ashmem regions. | |
willchan no longer on Chromium
2013/11/28 22:51:55
spelling nit: s/individial/individual/
Philippe
2013/11/29 12:41:05
Done.
Philippe
2013/11/29 12:41:05
Done.
| |
134 if (size > internal::DiscardableMemoryAllocator::kMinAshmemRegionSize) | |
willchan no longer on Chromium
2013/11/28 22:51:55
size >= ... perhaps? Even more precise would be si
Philippe
2013/11/29 12:41:05
Agreed for the layering violation and also page al
| |
135 return false; | |
136 | |
137 return current_ashmem_fd_count > | |
138 kMaxFDUsageRateForNormalAllocations * ashmem_fd_limit; | |
139 } | |
140 | |
141 } // namespace | |
142 | |
143 namespace internal { | |
144 | |
145 size_t AlignToNextPage(size_t size) { | |
146 const size_t kPageSize = 4096; | |
147 DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); | |
148 const size_t mask = ~(kPageSize - 1); | |
149 return (size + kPageSize - 1) & mask; | |
willchan no longer on Chromium
2013/11/28 22:51:55
I'm not sure, but this might be a security issue d
Philippe
2013/11/29 12:41:05
Great catch!
| |
150 } | |
32 | 151 |
33 bool CreateAshmemRegion(const char* name, | 152 bool CreateAshmemRegion(const char* name, |
34 size_t size, | 153 size_t size, |
35 int* out_fd, | 154 int* out_fd, |
36 void** out_address) { | 155 void** out_address) { |
37 base::AutoLock lock(g_discardable_memory_lock.Get()); | 156 AutoLock lock(g_context.Get().lock); |
38 if (g_num_discardable_memory + 1 > kDiscardableMemoryNumLimit) | 157 if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit) |
39 return false; | 158 return false; |
40 int fd = ashmem_create_region(name, size); | 159 int fd = ashmem_create_region(name, size); |
41 if (fd < 0) { | 160 if (fd < 0) { |
42 DLOG(ERROR) << "ashmem_create_region() failed"; | 161 DLOG(ERROR) << "ashmem_create_region() failed"; |
43 return false; | 162 return false; |
44 } | 163 } |
45 file_util::ScopedFD fd_closer(&fd); | 164 file_util::ScopedFD fd_closer(&fd); |
46 | 165 |
47 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); | 166 const int err = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); |
48 if (err < 0) { | 167 if (err < 0) { |
49 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; | 168 DLOG(ERROR) << "Error " << err << " when setting protection of ashmem"; |
50 return false; | 169 return false; |
51 } | 170 } |
52 | 171 |
53 // There is a problem using MAP_PRIVATE here. As we are constantly calling | 172 // There is a problem using MAP_PRIVATE here. As we are constantly calling |
54 // Lock() and Unlock(), data could get lost if they are not written to the | 173 // Lock() and Unlock(), data could get lost if they are not written to the |
55 // underlying file when Unlock() gets called. | 174 // underlying file when Unlock() gets called. |
56 void* const address = mmap( | 175 void* const address = mmap( |
57 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); | 176 NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); |
58 if (address == MAP_FAILED) { | 177 if (address == MAP_FAILED) { |
59 DPLOG(ERROR) << "Failed to map memory."; | 178 DPLOG(ERROR) << "Failed to map memory."; |
60 return false; | 179 return false; |
61 } | 180 } |
62 | 181 |
63 ignore_result(fd_closer.release()); | 182 ignore_result(fd_closer.release()); |
64 ++g_num_discardable_memory; | 183 g_context.Get().increment_ashmem_fd_count(); |
65 *out_fd = fd; | 184 *out_fd = fd; |
66 *out_address = address; | 185 *out_address = address; |
67 return true; | 186 return true; |
68 } | 187 } |
69 | 188 |
70 bool DeleteAshmemRegion(int fd, size_t size, void* address) { | 189 bool CloseAshmemRegion(int fd, size_t size, void* address) { |
71 base::AutoLock lock(g_discardable_memory_lock.Get()); | 190 AutoLock lock(g_context.Get().lock); |
72 --g_num_discardable_memory; | 191 g_context.Get().decrement_ashmem_fd_count(); |
73 if (munmap(address, size) == -1) { | 192 if (munmap(address, size) == -1) { |
74 DPLOG(ERROR) << "Failed to unmap memory."; | 193 DPLOG(ERROR) << "Failed to unmap memory."; |
75 close(fd); | 194 close(fd); |
76 return false; | 195 return false; |
77 } | 196 } |
78 return close(fd) == 0; | 197 return close(fd) == 0; |
79 } | 198 } |
80 | 199 |
81 LockDiscardableMemoryStatus LockAshmemRegion(int fd, | 200 LockDiscardableMemoryStatus LockAshmemRegion(int fd, |
82 size_t off, | 201 size_t off, |
83 size_t size, | 202 size_t size, |
84 const void* address) { | 203 const void* address) { |
85 const int result = ashmem_pin_region(fd, off, size); | 204 const int result = ashmem_pin_region(fd, off, size); |
86 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); | 205 DCHECK_EQ(0, mprotect(address, size, PROT_READ | PROT_WRITE)); |
87 return result == ASHMEM_WAS_PURGED ? | 206 return result == ASHMEM_WAS_PURGED ? |
88 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS; | 207 DISCARDABLE_MEMORY_PURGED : DISCARDABLE_MEMORY_SUCCESS; |
89 } | 208 } |
90 | 209 |
91 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { | 210 bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { |
92 const int failed = ashmem_unpin_region(fd, off, size); | 211 const int failed = ashmem_unpin_region(fd, off, size); |
93 if (failed) | 212 if (failed) |
94 DLOG(ERROR) << "Failed to unpin memory."; | 213 DLOG(ERROR) << "Failed to unpin memory."; |
95 // This allows us to catch accesses to unlocked memory. | 214 // This allows us to catch accesses to unlocked memory. |
96 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); | 215 DCHECK_EQ(0, mprotect(address, size, PROT_NONE)); |
97 return !failed; | 216 return !failed; |
98 } | 217 } |
99 | 218 |
100 class DiscardableMemoryAndroid : public DiscardableMemory { | 219 } // namespace internal |
101 public: | |
102 DiscardableMemoryAndroid(int fd, void* address, size_t size) | |
103 : fd_(fd), | |
104 memory_(address), | |
105 size_(size) { | |
106 DCHECK_GE(fd_, 0); | |
107 DCHECK(memory_); | |
108 } | |
109 | |
110 virtual ~DiscardableMemoryAndroid() { | |
111 DeleteAshmemRegion(fd_, size_, memory_); | |
112 } | |
113 | |
114 // DiscardableMemory: | |
115 virtual LockDiscardableMemoryStatus Lock() OVERRIDE { | |
116 return LockAshmemRegion(fd_, 0, size_, memory_); | |
117 } | |
118 | |
119 virtual void Unlock() OVERRIDE { | |
120 UnlockAshmemRegion(fd_, 0, size_, memory_); | |
121 } | |
122 | |
123 virtual void* Memory() const OVERRIDE { | |
124 return memory_; | |
125 } | |
126 | |
127 private: | |
128 const int fd_; | |
129 void* const memory_; | |
130 const size_t size_; | |
131 | |
132 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroid); | |
133 }; | |
134 | |
135 } // namespace | |
136 | 220 |
137 // static | 221 // static |
138 bool DiscardableMemory::SupportedNatively() { | 222 bool DiscardableMemory::SupportedNatively() { |
139 return true; | 223 return true; |
140 } | 224 } |
141 | 225 |
142 // static | 226 // static |
143 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( | 227 scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( |
144 size_t size) { | 228 size_t size) { |
229 GlobalContext* const global_context = g_context.Pointer(); | |
230 if (ShouldUseAllocator(size)) | |
willchan no longer on Chromium
2013/11/28 22:51:55
It occurs to me now that all this checking of the
Philippe
2013/11/29 12:41:05
Yeah this is indeed slightly racy. I think it shou
| |
231 return global_context->allocator.Allocate(size); | |
145 // Pinning & unpinning works with page granularity therefore align the size | 232 // Pinning & unpinning works with page granularity therefore align the size |
146 // upfront. | 233 // upfront. |
147 const size_t kPageSize = 4096; | 234 const size_t aligned_size = internal::AlignToNextPage(size); |
148 const size_t mask = ~(kPageSize - 1); | |
149 size = (size + kPageSize - 1) & mask; | |
150 int fd; | 235 int fd; |
151 void* address; | 236 void* address; |
152 if (!CreateAshmemRegion("", size, &fd, &address)) | 237 if (!internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { |
153 return scoped_ptr<DiscardableMemory>(); | 238 // Fallback to the allocator which might be more likely to succeed. |
239 return global_context->allocator.Allocate(size); | |
240 } | |
154 return scoped_ptr<DiscardableMemory>( | 241 return scoped_ptr<DiscardableMemory>( |
155 new DiscardableMemoryAndroid(fd, address, size)); | 242 new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); |
156 } | 243 } |
157 | 244 |
158 // static | 245 // static |
159 bool DiscardableMemory::PurgeForTestingSupported() { | 246 bool DiscardableMemory::PurgeForTestingSupported() { |
160 return false; | 247 return false; |
161 } | 248 } |
162 | 249 |
163 // static | 250 // static |
164 void DiscardableMemory::PurgeForTesting() { | 251 void DiscardableMemory::PurgeForTesting() { |
165 NOTIMPLEMENTED(); | 252 NOTIMPLEMENTED(); |
166 } | 253 } |
167 | 254 |
168 } // namespace base | 255 } // namespace base |
OLD | NEW |