OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2012 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #include "SkBitmapHeap.h" | |
9 #include "SkBitmap.h" | |
10 #include "SkTSearch.h" | |
11 | |
12 SkBitmapHeapEntry::SkBitmapHeapEntry() | |
13 : fSlot(-1) | |
14 , fRefCount(0) | |
15 , fBytesAllocated(0) { | |
16 } | |
17 | |
18 SkBitmapHeapEntry::~SkBitmapHeapEntry() { | |
19 SkASSERT(0 == fRefCount); | |
20 } | |
21 | |
22 void SkBitmapHeapEntry::addReferences(int count) { | |
23 if (0 == fRefCount) { | |
24 // If there are no current owners then the heap manager | |
25 // will be the only one able to modify it, so it does not | |
26 // need to be an atomic operation. | |
27 fRefCount = count; | |
28 } else { | |
29 sk_atomic_add(&fRefCount, count); | |
30 } | |
31 } | |
32 | |
33 /////////////////////////////////////////////////////////////////////////////// | |
34 | |
35 static bool operator<(const SkIPoint& a, const SkIPoint& b) { | |
36 return *(const int64_t*)&a < *(const int64_t*)&b; | |
37 } | |
38 | |
39 static bool operator>(const SkIPoint& a, const SkIPoint& b) { | |
40 return *(const int64_t*)&a > *(const int64_t*)&b; | |
41 } | |
42 | |
43 bool SkBitmapHeap::LookupEntry::Less(const SkBitmapHeap::LookupEntry& a, | |
44 const SkBitmapHeap::LookupEntry& b) { | |
45 if (a.fGenerationId < b.fGenerationId) { | |
46 return true; | |
47 } else if (a.fGenerationId > b.fGenerationId) { | |
48 return false; | |
49 } else if (a.fPixelOrigin < b.fPixelOrigin) { | |
50 return true; | |
51 } else if (a.fPixelOrigin > b.fPixelOrigin) { | |
52 return false; | |
53 } else if (a.fWidth < b.fWidth) { | |
54 return true; | |
55 } else if (a.fWidth > b.fWidth) { | |
56 return false; | |
57 } else if (a.fHeight < b.fHeight) { | |
58 return true; | |
59 } | |
60 return false; | |
61 } | |
62 | |
63 /////////////////////////////////////////////////////////////////////////////// | |
64 | |
65 SkBitmapHeap::SkBitmapHeap(int32_t preferredSize, int32_t ownerCount) | |
66 : INHERITED() | |
67 , fExternalStorage(nullptr) | |
68 , fMostRecentlyUsed(nullptr) | |
69 , fLeastRecentlyUsed(nullptr) | |
70 , fPreferredCount(preferredSize) | |
71 , fOwnerCount(ownerCount) | |
72 , fBytesAllocated(0) | |
73 , fDeferAddingOwners(false) { | |
74 } | |
75 | |
76 SkBitmapHeap::SkBitmapHeap(ExternalStorage* storage, int32_t preferredSize) | |
77 : INHERITED() | |
78 , fExternalStorage(storage) | |
79 , fMostRecentlyUsed(nullptr) | |
80 , fLeastRecentlyUsed(nullptr) | |
81 , fPreferredCount(preferredSize) | |
82 , fOwnerCount(IGNORE_OWNERS) | |
83 , fBytesAllocated(0) | |
84 , fDeferAddingOwners(false) { | |
85 SkSafeRef(storage); | |
86 } | |
87 | |
88 SkBitmapHeap::~SkBitmapHeap() { | |
89 SkDEBUGCODE( | |
90 for (int i = 0; i < fStorage.count(); i++) { | |
91 bool unused = false; | |
92 for (int j = 0; j < fUnusedSlots.count(); j++) { | |
93 if (fUnusedSlots[j] == fStorage[i]->fSlot) { | |
94 unused = true; | |
95 break; | |
96 } | |
97 } | |
98 if (!unused) { | |
99 fBytesAllocated -= fStorage[i]->fBytesAllocated; | |
100 } | |
101 } | |
102 fBytesAllocated -= (fStorage.count() * sizeof(SkBitmapHeapEntry)); | |
103 ) | |
104 SkASSERT(0 == fBytesAllocated); | |
105 fStorage.deleteAll(); | |
106 SkSafeUnref(fExternalStorage); | |
107 fLookupTable.deleteAll(); | |
108 } | |
109 | |
110 void SkBitmapHeap::removeFromLRU(SkBitmapHeap::LookupEntry* entry) { | |
111 if (fMostRecentlyUsed == entry) { | |
112 fMostRecentlyUsed = entry->fLessRecentlyUsed; | |
113 if (nullptr == fMostRecentlyUsed) { | |
114 SkASSERT(fLeastRecentlyUsed == entry); | |
115 fLeastRecentlyUsed = nullptr; | |
116 } else { | |
117 fMostRecentlyUsed->fMoreRecentlyUsed = nullptr; | |
118 } | |
119 } else { | |
120 // Remove entry from its prior place, and make sure to cover the hole. | |
121 if (fLeastRecentlyUsed == entry) { | |
122 SkASSERT(entry->fMoreRecentlyUsed != nullptr); | |
123 fLeastRecentlyUsed = entry->fMoreRecentlyUsed; | |
124 } | |
125 // Since we have already considered the case where entry is the most rec
ently used, it must | |
126 // have a more recently used at this point. | |
127 SkASSERT(entry->fMoreRecentlyUsed != nullptr); | |
128 entry->fMoreRecentlyUsed->fLessRecentlyUsed = entry->fLessRecentlyUsed; | |
129 | |
130 if (entry->fLessRecentlyUsed != nullptr) { | |
131 SkASSERT(fLeastRecentlyUsed != entry); | |
132 entry->fLessRecentlyUsed->fMoreRecentlyUsed = entry->fMoreRecentlyUs
ed; | |
133 } | |
134 } | |
135 entry->fMoreRecentlyUsed = nullptr; | |
136 } | |
137 | |
138 void SkBitmapHeap::appendToLRU(SkBitmapHeap::LookupEntry* entry) { | |
139 if (fMostRecentlyUsed != nullptr) { | |
140 SkASSERT(nullptr == fMostRecentlyUsed->fMoreRecentlyUsed); | |
141 fMostRecentlyUsed->fMoreRecentlyUsed = entry; | |
142 entry->fLessRecentlyUsed = fMostRecentlyUsed; | |
143 } | |
144 fMostRecentlyUsed = entry; | |
145 if (nullptr == fLeastRecentlyUsed) { | |
146 fLeastRecentlyUsed = entry; | |
147 } | |
148 } | |
149 | |
150 // iterate through our LRU cache and try to find an entry to evict | |
151 SkBitmapHeap::LookupEntry* SkBitmapHeap::findEntryToReplace(const SkBitmap& repl
acement) { | |
152 SkASSERT(fPreferredCount != UNLIMITED_SIZE); | |
153 SkASSERT(fStorage.count() >= fPreferredCount); | |
154 | |
155 SkBitmapHeap::LookupEntry* iter = fLeastRecentlyUsed; | |
156 while (iter != nullptr) { | |
157 SkBitmapHeapEntry* heapEntry = fStorage[iter->fStorageSlot]; | |
158 if (heapEntry->fRefCount > 0) { | |
159 // If the least recently used bitmap has not been unreferenced | |
160 // by its owner, then according to our LRU specifications a more | |
161 // recently used one can not have used all its references yet either
. | |
162 return nullptr; | |
163 } | |
164 if (replacement.getGenerationID() == iter->fGenerationId) { | |
165 // Do not replace a bitmap with a new one using the same | |
166 // pixel ref. Instead look for a different one that will | |
167 // potentially free up more space. | |
168 iter = iter->fMoreRecentlyUsed; | |
169 } else { | |
170 return iter; | |
171 } | |
172 } | |
173 return nullptr; | |
174 } | |
175 | |
176 size_t SkBitmapHeap::freeMemoryIfPossible(size_t bytesToFree) { | |
177 if (UNLIMITED_SIZE == fPreferredCount) { | |
178 return 0; | |
179 } | |
180 LookupEntry* iter = fLeastRecentlyUsed; | |
181 size_t origBytesAllocated = fBytesAllocated; | |
182 // Purge starting from LRU until a non-evictable bitmap is found or until | |
183 // everything is evicted. | |
184 while (iter != nullptr) { | |
185 SkBitmapHeapEntry* heapEntry = fStorage[iter->fStorageSlot]; | |
186 if (heapEntry->fRefCount > 0) { | |
187 break; | |
188 } | |
189 LookupEntry* next = iter->fMoreRecentlyUsed; | |
190 this->removeEntryFromLookupTable(iter); | |
191 // Free the pixel memory. removeEntryFromLookupTable already reduced | |
192 // fBytesAllocated properly. | |
193 heapEntry->fBitmap.reset(); | |
194 // Add to list of unused slots which can be reused in the future. | |
195 fUnusedSlots.push(heapEntry->fSlot); | |
196 iter = next; | |
197 if (origBytesAllocated - fBytesAllocated >= bytesToFree) { | |
198 break; | |
199 } | |
200 } | |
201 | |
202 if (fLeastRecentlyUsed != iter) { | |
203 // There was at least one eviction. | |
204 fLeastRecentlyUsed = iter; | |
205 if (nullptr == fLeastRecentlyUsed) { | |
206 // Everything was evicted | |
207 fMostRecentlyUsed = nullptr; | |
208 fBytesAllocated -= (fStorage.count() * sizeof(SkBitmapHeapEntry)); | |
209 fStorage.deleteAll(); | |
210 fUnusedSlots.reset(); | |
211 SkASSERT(0 == fBytesAllocated); | |
212 } else { | |
213 fLeastRecentlyUsed->fLessRecentlyUsed = nullptr; | |
214 } | |
215 } | |
216 | |
217 return origBytesAllocated - fBytesAllocated; | |
218 } | |
219 | |
220 int SkBitmapHeap::findInLookupTable(const LookupEntry& indexEntry, SkBitmapHeapE
ntry** entry) { | |
221 int index = SkTSearch<const LookupEntry, LookupEntry::Less>( | |
222 (const LookupEntry**)fLookupTable.b
egin(), | |
223 fLookupTable.count(), | |
224 &indexEntry, sizeof(void*)); | |
225 | |
226 if (index < 0) { | |
227 // insert ourselves into the bitmapIndex | |
228 index = ~index; | |
229 *fLookupTable.insert(index) = new LookupEntry(indexEntry); | |
230 } else if (entry != nullptr) { | |
231 // populate the entry if needed | |
232 *entry = fStorage[fLookupTable[index]->fStorageSlot]; | |
233 } | |
234 | |
235 return index; | |
236 } | |
237 | |
238 bool SkBitmapHeap::copyBitmap(const SkBitmap& originalBitmap, SkBitmap& copiedBi
tmap) { | |
239 SkASSERT(!fExternalStorage); | |
240 | |
241 // If the bitmap is mutable, we need to do a deep copy, since the | |
242 // caller may modify it afterwards. | |
243 if (originalBitmap.isImmutable()) { | |
244 copiedBitmap = originalBitmap; | |
245 // TODO if we have the pixel ref in the heap we could pass it here to avoid a po
tential deep copy | |
246 // else if (sharedPixelRef != nullptr) { | |
247 // copiedBitmap = orig; | |
248 // copiedBitmap.setPixelRef(sharedPixelRef, originalBitmap.pixelRefOffset
()); | |
249 } else if (originalBitmap.empty()) { | |
250 copiedBitmap.reset(); | |
251 } else if (!originalBitmap.deepCopyTo(&copiedBitmap)) { | |
252 return false; | |
253 } | |
254 copiedBitmap.setImmutable(); | |
255 return true; | |
256 } | |
257 | |
258 int SkBitmapHeap::removeEntryFromLookupTable(LookupEntry* entry) { | |
259 // remove the bitmap index for the deleted entry | |
260 SkDEBUGCODE(int count = fLookupTable.count();) | |
261 int index = this->findInLookupTable(*entry, nullptr); | |
262 // Verify that findInLookupTable found an existing entry rather than adding | |
263 // a new entry to the lookup table. | |
264 SkASSERT(count == fLookupTable.count()); | |
265 fBytesAllocated -= fStorage[entry->fStorageSlot]->fBytesAllocated; | |
266 delete fLookupTable[index]; | |
267 fLookupTable.remove(index); | |
268 return index; | |
269 } | |
270 | |
271 int32_t SkBitmapHeap::insert(const SkBitmap& originalBitmap) { | |
272 SkBitmapHeapEntry* entry = nullptr; | |
273 int searchIndex = this->findInLookupTable(LookupEntry(originalBitmap), &entr
y); | |
274 | |
275 if (entry) { | |
276 // Already had a copy of the bitmap in the heap. | |
277 if (fOwnerCount != IGNORE_OWNERS) { | |
278 if (fDeferAddingOwners) { | |
279 *fDeferredEntries.append() = entry->fSlot; | |
280 } else { | |
281 entry->addReferences(fOwnerCount); | |
282 } | |
283 } | |
284 if (fPreferredCount != UNLIMITED_SIZE) { | |
285 LookupEntry* lookupEntry = fLookupTable[searchIndex]; | |
286 if (lookupEntry != fMostRecentlyUsed) { | |
287 this->removeFromLRU(lookupEntry); | |
288 this->appendToLRU(lookupEntry); | |
289 } | |
290 } | |
291 return entry->fSlot; | |
292 } | |
293 | |
294 // decide if we need to evict an existing heap entry or create a new one | |
295 if (fPreferredCount != UNLIMITED_SIZE && fStorage.count() >= fPreferredCount
) { | |
296 // iterate through our LRU cache and try to find an entry to evict | |
297 LookupEntry* lookupEntry = this->findEntryToReplace(originalBitmap); | |
298 if (lookupEntry != nullptr) { | |
299 // we found an entry to evict | |
300 entry = fStorage[lookupEntry->fStorageSlot]; | |
301 // Remove it from the LRU. The new entry will be added to the LRU la
ter. | |
302 this->removeFromLRU(lookupEntry); | |
303 int index = this->removeEntryFromLookupTable(lookupEntry); | |
304 | |
305 // update the current search index now that we have removed one | |
306 if (index < searchIndex) { | |
307 searchIndex--; | |
308 } | |
309 } | |
310 } | |
311 | |
312 // if we didn't have an entry yet we need to create one | |
313 if (!entry) { | |
314 if (fPreferredCount != UNLIMITED_SIZE && fUnusedSlots.count() > 0) { | |
315 int slot; | |
316 fUnusedSlots.pop(&slot); | |
317 entry = fStorage[slot]; | |
318 } else { | |
319 entry = new SkBitmapHeapEntry; | |
320 fStorage.append(1, &entry); | |
321 entry->fSlot = fStorage.count() - 1; | |
322 fBytesAllocated += sizeof(SkBitmapHeapEntry); | |
323 } | |
324 } | |
325 | |
326 // create a copy of the bitmap | |
327 bool copySucceeded; | |
328 if (fExternalStorage) { | |
329 copySucceeded = fExternalStorage->insert(originalBitmap, entry->fSlot); | |
330 } else { | |
331 copySucceeded = copyBitmap(originalBitmap, entry->fBitmap); | |
332 } | |
333 | |
334 // if the copy failed then we must abort | |
335 if (!copySucceeded) { | |
336 // delete the index | |
337 delete fLookupTable[searchIndex]; | |
338 fLookupTable.remove(searchIndex); | |
339 // If entry is the last slot in storage, it is safe to delete it. | |
340 if (fStorage.count() - 1 == entry->fSlot) { | |
341 // free the slot | |
342 fStorage.remove(entry->fSlot); | |
343 fBytesAllocated -= sizeof(SkBitmapHeapEntry); | |
344 delete entry; | |
345 } else { | |
346 fUnusedSlots.push(entry->fSlot); | |
347 } | |
348 return INVALID_SLOT; | |
349 } | |
350 | |
351 // update the index with the appropriate slot in the heap | |
352 fLookupTable[searchIndex]->fStorageSlot = entry->fSlot; | |
353 | |
354 // compute the space taken by this entry | |
355 // TODO if there is a shared pixel ref don't count it | |
356 // If the SkBitmap does not share an SkPixelRef with an SkBitmap already | |
357 // in the SharedHeap, also include the size of its pixels. | |
358 entry->fBytesAllocated = originalBitmap.getSize(); | |
359 | |
360 // add the bytes from this entry to the total count | |
361 fBytesAllocated += entry->fBytesAllocated; | |
362 | |
363 if (fOwnerCount != IGNORE_OWNERS) { | |
364 if (fDeferAddingOwners) { | |
365 *fDeferredEntries.append() = entry->fSlot; | |
366 } else { | |
367 entry->addReferences(fOwnerCount); | |
368 } | |
369 } | |
370 if (fPreferredCount != UNLIMITED_SIZE) { | |
371 this->appendToLRU(fLookupTable[searchIndex]); | |
372 } | |
373 return entry->fSlot; | |
374 } | |
375 | |
376 void SkBitmapHeap::deferAddingOwners() { | |
377 fDeferAddingOwners = true; | |
378 } | |
379 | |
380 void SkBitmapHeap::endAddingOwnersDeferral(bool add) { | |
381 if (add) { | |
382 for (int i = 0; i < fDeferredEntries.count(); i++) { | |
383 SkASSERT(fOwnerCount != IGNORE_OWNERS); | |
384 SkBitmapHeapEntry* heapEntry = this->getEntry(fDeferredEntries[i]); | |
385 SkASSERT(heapEntry != nullptr); | |
386 heapEntry->addReferences(fOwnerCount); | |
387 } | |
388 } | |
389 fDeferAddingOwners = false; | |
390 fDeferredEntries.reset(); | |
391 } | |
OLD | NEW |