OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2013 Google Inc. | 2 * Copyright 2013 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkScaledImageCache_DEFINED | 8 #ifndef SkScaledImageCache_DEFINED |
9 #define SkScaledImageCache_DEFINED | 9 #define SkScaledImageCache_DEFINED |
10 | 10 |
11 #include "SkBitmap.h" | 11 #include "SkBitmap.h" |
12 | 12 |
13 class SkDiscardableMemory; | 13 class SkDiscardableMemory; |
14 class SkMipMap; | 14 class SkMipMap; |
15 | 15 |
16 /** | 16 /** |
17 * Cache object for bitmaps (with possible scale in X Y as part of the key). | 17 * Cache object for bitmaps (with possible scale in X Y as part of the key). |
18 * | 18 * |
19 * Multiple caches can be instantiated, but each instance is not implicitly | 19 * Multiple caches can be instantiated, but each instance is not implicitly |
20 * thread-safe, so if a given instance is to be shared across threads, the | 20 * thread-safe, so if a given instance is to be shared across threads, the |
21 * caller must manage the access itself (e.g. via a mutex). | 21 * caller must manage the access itself (e.g. via a mutex). |
22 * | 22 * |
23 * As a convenience, a global instance is also defined, which can be safely | 23 * As a convenience, a global instance is also defined, which can be safely |
24 * access across threads via the static methods (e.g. FindAndLock, etc.). | 24 * access across threads via the static methods (e.g. FindAndLock, etc.). |
25 */ | 25 */ |
26 class SkScaledImageCache { | 26 class SkScaledImageCache { |
27 public: | 27 public: |
28 struct ID; | |
29 | |
30 struct Key { | 28 struct Key { |
31 // Call this to access your private contents. Must not use the address a
fter calling init() | 29 // Call this to access your private contents. Must not use the address a
fter calling init() |
32 void* writableContents() { return this + 1; } | 30 void* writableContents() { return this + 1; } |
33 | 31 |
34 // must call this after your private data has been written. | 32 // must call this after your private data has been written. |
35 // length must be a multiple of 4 | 33 // length must be a multiple of 4 |
36 void init(size_t length); | 34 void init(size_t length); |
37 | 35 |
38 // This is only valid after having called init(). | 36 // This is only valid after having called init(). |
39 uint32_t hash() const { return fHash; } | 37 uint32_t hash() const { return fHash; } |
40 | 38 |
41 bool operator==(const Key& other) const { | 39 bool operator==(const Key& other) const { |
42 const uint32_t* a = this->as32(); | 40 const uint32_t* a = this->as32(); |
43 const uint32_t* b = other.as32(); | 41 const uint32_t* b = other.as32(); |
44 for (int i = 0; i < fCount32; ++i) { | 42 for (int i = 0; i < fCount32; ++i) { |
45 if (a[i] != b[i]) { | 43 if (a[i] != b[i]) { |
46 return false; | 44 return false; |
47 } | 45 } |
48 } | 46 } |
49 return true; | 47 return true; |
50 } | 48 } |
51 | 49 |
52 // delete using sk_free | |
53 Key* clone() const; | |
54 | |
55 private: | 50 private: |
56 // store fCount32 first, so we don't consider it in operator< | 51 // store fCount32 first, so we don't consider it in operator< |
57 int32_t fCount32; // 2 + user contents count32 | 52 int32_t fCount32; // 2 + user contents count32 |
58 uint32_t fHash; | 53 uint32_t fHash; |
59 /* uint32_t fContents32[] */ | 54 /* uint32_t fContents32[] */ |
60 | 55 |
61 const uint32_t* as32() const { return (const uint32_t*)this; } | 56 const uint32_t* as32() const { return (const uint32_t*)this; } |
62 const uint32_t* as32SkipCount() const { return this->as32() + 1; } | 57 const uint32_t* as32SkipCount() const { return this->as32() + 1; } |
63 }; | 58 }; |
64 | 59 |
| 60 struct Rec { |
| 61 typedef SkScaledImageCache::Key Key; |
| 62 |
| 63 Rec() : fLockCount(1) {} |
| 64 virtual ~Rec() {} |
| 65 |
| 66 uint32_t getHash() const { return this->getKey().hash(); } |
| 67 |
| 68 virtual const Key& getKey() const = 0; |
| 69 virtual size_t bytesUsed() const = 0; |
| 70 |
| 71 // for SkTDynamicHash::Traits |
| 72 static uint32_t Hash(const Key& key) { return key.hash(); } |
| 73 static const Key& GetKey(const Rec& rec) { return rec.getKey(); } |
| 74 |
| 75 private: |
| 76 Rec* fNext; |
| 77 Rec* fPrev; |
| 78 int32_t fLockCount; |
| 79 int32_t fPad; |
| 80 |
| 81 friend class SkScaledImageCache; |
| 82 }; |
| 83 |
| 84 typedef const Rec* ID; |
| 85 |
65 /** | 86 /** |
66 * Returns a locked/pinned SkDiscardableMemory instance for the specified | 87 * Returns a locked/pinned SkDiscardableMemory instance for the specified |
67 * number of bytes, or NULL on failure. | 88 * number of bytes, or NULL on failure. |
68 */ | 89 */ |
69 typedef SkDiscardableMemory* (*DiscardableFactory)(size_t bytes); | 90 typedef SkDiscardableMemory* (*DiscardableFactory)(size_t bytes); |
70 | 91 |
71 /* | 92 /* |
72 * The following static methods are thread-safe wrappers around a global | 93 * The following static methods are thread-safe wrappers around a global |
73 * instance of this cache. | 94 * instance of this cache. |
74 */ | 95 */ |
75 | 96 |
76 static ID* FindAndLock(const Key&, SkBitmap* result); | 97 static const Rec* FindAndLock(const Key& key); |
77 static ID* AddAndLock(const Key&, const SkBitmap& result); | 98 static const Rec* AddAndLock(Rec*); |
78 | 99 static void Add(Rec*); |
79 static ID* FindAndLock(const Key&, const SkMipMap** result); | 100 static void Unlock(ID); |
80 static ID* AddAndLock(const Key&, const SkMipMap* result); | |
81 | |
82 static void Unlock(ID*); | |
83 | 101 |
84 static size_t GetTotalBytesUsed(); | 102 static size_t GetTotalBytesUsed(); |
85 static size_t GetTotalByteLimit(); | 103 static size_t GetTotalByteLimit(); |
86 static size_t SetTotalByteLimit(size_t newLimit); | 104 static size_t SetTotalByteLimit(size_t newLimit); |
87 | 105 |
88 static size_t SetSingleAllocationByteLimit(size_t); | 106 static size_t SetSingleAllocationByteLimit(size_t); |
89 static size_t GetSingleAllocationByteLimit(); | 107 static size_t GetSingleAllocationByteLimit(); |
90 | 108 |
91 static SkBitmap::Allocator* GetAllocator(); | 109 static SkBitmap::Allocator* GetAllocator(); |
92 | 110 |
(...skipping 15 matching lines...) Expand all Loading... |
108 | 126 |
109 /** | 127 /** |
110 * Construct the cache, allocating memory with malloc, and respect the | 128 * Construct the cache, allocating memory with malloc, and respect the |
111 * byteLimit, purging automatically when a new image is added to the cache | 129 * byteLimit, purging automatically when a new image is added to the cache |
112 * that pushes the total bytesUsed over the limit. Note: The limit can be | 130 * that pushes the total bytesUsed over the limit. Note: The limit can be |
113 * changed at runtime with setTotalByteLimit. | 131 * changed at runtime with setTotalByteLimit. |
114 */ | 132 */ |
115 explicit SkScaledImageCache(size_t byteLimit); | 133 explicit SkScaledImageCache(size_t byteLimit); |
116 ~SkScaledImageCache(); | 134 ~SkScaledImageCache(); |
117 | 135 |
118 /** | 136 const Rec* findAndLock(const Key& key); |
119 * Search the cache for a matching key. If found, return its bitmap and ret
urn its ID pointer. | 137 const Rec* addAndLock(Rec*); |
120 * Use the returned ID to unlock the cache when you are done using outBitma
p. | 138 void add(Rec*); |
121 * | |
122 * If a match is not found, outBitmap will be unmodifed, and NULL will be r
eturned. | |
123 */ | |
124 ID* findAndLock(const Key& key, SkBitmap* outBitmap); | |
125 ID* findAndLock(const Key& key, const SkMipMap** returnedMipMap); | |
126 | |
127 /** | |
128 * To add a new bitmap (or mipMap) to the cache, call | |
129 * AddAndLock. Use the returned ptr to unlock the cache when you | |
130 * are done using scaled. | |
131 */ | |
132 ID* addAndLock(const Key&, const SkBitmap& bitmap); | |
133 ID* addAndLock(const Key&, const SkMipMap* mipMap); | |
134 | 139 |
135 /** | 140 /** |
136 * Given a non-null ID ptr returned by either findAndLock or addAndLock, | 141 * Given a non-null ID ptr returned by either findAndLock or addAndLock, |
137 * this releases the associated resources to be available to be purged | 142 * this releases the associated resources to be available to be purged |
138 * if needed. After this, the cached bitmap should no longer be | 143 * if needed. After this, the cached bitmap should no longer be |
139 * referenced by the caller. | 144 * referenced by the caller. |
140 */ | 145 */ |
141 void unlock(ID*); | 146 void unlock(ID); |
142 | 147 |
143 size_t getTotalBytesUsed() const { return fTotalBytesUsed; } | 148 size_t getTotalBytesUsed() const { return fTotalBytesUsed; } |
144 size_t getTotalByteLimit() const { return fTotalByteLimit; } | 149 size_t getTotalByteLimit() const { return fTotalByteLimit; } |
145 | 150 |
146 /** | 151 /** |
147 * This is respected by SkBitmapProcState::possiblyScaleImage. | 152 * This is respected by SkBitmapProcState::possiblyScaleImage. |
148 * 0 is no maximum at all; this is the default. | 153 * 0 is no maximum at all; this is the default. |
149 * setSingleAllocationByteLimit() returns the previous value. | 154 * setSingleAllocationByteLimit() returns the previous value. |
150 */ | 155 */ |
151 size_t setSingleAllocationByteLimit(size_t maximumAllocationSize); | 156 size_t setSingleAllocationByteLimit(size_t maximumAllocationSize); |
152 size_t getSingleAllocationByteLimit() const; | 157 size_t getSingleAllocationByteLimit() const; |
153 /** | 158 /** |
154 * Set the maximum number of bytes available to this cache. If the current | 159 * Set the maximum number of bytes available to this cache. If the current |
155 * cache exceeds this new value, it will be purged to try to fit within | 160 * cache exceeds this new value, it will be purged to try to fit within |
156 * this new limit. | 161 * this new limit. |
157 */ | 162 */ |
158 size_t setTotalByteLimit(size_t newLimit); | 163 size_t setTotalByteLimit(size_t newLimit); |
159 | 164 |
160 SkBitmap::Allocator* allocator() const { return fAllocator; }; | 165 SkBitmap::Allocator* allocator() const { return fAllocator; }; |
161 | 166 |
162 /** | 167 /** |
163 * Call SkDebugf() with diagnostic information about the state of the cache | 168 * Call SkDebugf() with diagnostic information about the state of the cache |
164 */ | 169 */ |
165 void dump() const; | 170 void dump() const; |
166 | 171 |
167 public: | |
168 struct Rec; | |
169 private: | 172 private: |
170 Rec* fHead; | 173 Rec* fHead; |
171 Rec* fTail; | 174 Rec* fTail; |
172 | 175 |
173 class Hash; | 176 class Hash; |
174 Hash* fHash; | 177 Hash* fHash; |
175 | 178 |
176 DiscardableFactory fDiscardableFactory; | 179 DiscardableFactory fDiscardableFactory; |
177 // the allocator is NULL or one that matches discardables | 180 // the allocator is NULL or one that matches discardables |
178 SkBitmap::Allocator* fAllocator; | 181 SkBitmap::Allocator* fAllocator; |
179 | 182 |
180 size_t fTotalBytesUsed; | 183 size_t fTotalBytesUsed; |
181 size_t fTotalByteLimit; | 184 size_t fTotalByteLimit; |
182 size_t fSingleAllocationByteLimit; | 185 size_t fSingleAllocationByteLimit; |
183 int fCount; | 186 int fCount; |
184 | 187 |
185 Rec* findAndLock(const Key& key); | |
186 ID* addAndLock(Rec* rec); | |
187 | |
188 void purgeRec(Rec*); | 188 void purgeRec(Rec*); |
189 void purgeAsNeeded(); | 189 void purgeAsNeeded(); |
190 | 190 |
191 // linklist management | 191 // linklist management |
192 void moveToHead(Rec*); | 192 void moveToHead(Rec*); |
193 void addToHead(Rec*); | 193 void addToHead(Rec*); |
194 void detach(Rec*); | 194 void detach(Rec*); |
195 | 195 |
196 void init(); // called by constructors | 196 void init(); // called by constructors |
197 | 197 |
198 #ifdef SK_DEBUG | 198 #ifdef SK_DEBUG |
199 void validate() const; | 199 void validate() const; |
200 #else | 200 #else |
201 void validate() const {} | 201 void validate() const {} |
202 #endif | 202 #endif |
203 }; | 203 }; |
204 #endif | 204 #endif |
OLD | NEW |