OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2014 Google, Inc | 2 * Copyright 2014 Google, Inc |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkSmallAllocator_DEFINED | 8 #ifndef SkSmallAllocator_DEFINED |
9 #define SkSmallAllocator_DEFINED | 9 #define SkSmallAllocator_DEFINED |
10 | 10 |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
110 return NULL; | 110 return NULL; |
111 } | 111 } |
112 const size_t storageRemaining = SkAlign4(kTotalBytes) - fStorageUsed; | 112 const size_t storageRemaining = SkAlign4(kTotalBytes) - fStorageUsed; |
113 storageRequired = SkAlign4(storageRequired); | 113 storageRequired = SkAlign4(storageRequired); |
114 Rec* rec = &fRecs[fNumObjects]; | 114 Rec* rec = &fRecs[fNumObjects]; |
115 if (storageRequired > storageRemaining) { | 115 if (storageRequired > storageRemaining) { |
116 // Allocate on the heap. Ideally we want to avoid this situation, | 116 // Allocate on the heap. Ideally we want to avoid this situation, |
117 // but we're not sure we can catch all callers, so handle it but | 117 // but we're not sure we can catch all callers, so handle it but |
118 // assert false in debug mode. | 118 // assert false in debug mode. |
119 SkASSERT(false); | 119 SkASSERT(false); |
120 rec->fStorageSize = 0; | |
121 rec->fHeapStorage = sk_malloc_throw(storageRequired); | 120 rec->fHeapStorage = sk_malloc_throw(storageRequired); |
122 rec->fObj = static_cast<void*>(rec->fHeapStorage); | 121 rec->fObj = static_cast<void*>(rec->fHeapStorage); |
123 } else { | 122 } else { |
124 // There is space in fStorage. | 123 // There is space in fStorage. |
125 rec->fStorageSize = storageRequired; | |
126 rec->fHeapStorage = NULL; | 124 rec->fHeapStorage = NULL; |
127 SkASSERT(SkIsAlign4(fStorageUsed)); | 125 SkASSERT(SkIsAlign4(fStorageUsed)); |
128 rec->fObj = static_cast<void*>(fStorage + (fStorageUsed / 4)); | 126 rec->fObj = static_cast<void*>(fStorage + (fStorageUsed / 4)); |
129 fStorageUsed += storageRequired; | 127 fStorageUsed += storageRequired; |
130 } | 128 } |
131 rec->fKillProc = destroyT<T>; | 129 rec->fKillProc = destroyT<T>; |
132 fNumObjects++; | 130 fNumObjects++; |
133 return rec->fObj; | 131 return rec->fObj; |
134 } | 132 } |
135 | 133 |
136 /* | |
137 * Free the memory reserved last without calling the destructor. | |
138 * Can be used in a nested way, i.e. after reserving A and B, calling | |
139 * freeLast once will free B and calling it again will free A. | |
140 */ | |
141 void freeLast() { | |
142 SkASSERT(fNumObjects > 0); | |
143 Rec* rec = &fRecs[fNumObjects - 1]; | |
144 sk_free(rec->fHeapStorage); | |
145 fStorageUsed -= rec->fStorageSize; | |
146 | |
147 fNumObjects--; | |
148 } | |
149 | |
150 private: | 134 private: |
151 struct Rec { | 135 struct Rec { |
152 size_t fStorageSize; // 0 if allocated on heap | 136 void* fObj; |
153 void* fObj; | 137 void* fHeapStorage; |
154 void* fHeapStorage; | 138 void (*fKillProc)(void*); |
155 void (*fKillProc)(void*); | |
156 }; | 139 }; |
157 | 140 |
158 // Number of bytes used so far. | 141 // Number of bytes used so far. |
159 size_t fStorageUsed; | 142 size_t fStorageUsed; |
160 // Pad the storage size to be 4-byte aligned. | 143 // Pad the storage size to be 4-byte aligned. |
161 uint32_t fStorage[SkAlign4(kTotalBytes) >> 2]; | 144 uint32_t fStorage[SkAlign4(kTotalBytes) >> 2]; |
162 uint32_t fNumObjects; | 145 uint32_t fNumObjects; |
163 Rec fRecs[kMaxObjects]; | 146 Rec fRecs[kMaxObjects]; |
164 }; | 147 }; |
165 | 148 |
166 #endif // SkSmallAllocator_DEFINED | 149 #endif // SkSmallAllocator_DEFINED |
OLD | NEW |