| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2014 Google, Inc | 2 * Copyright 2014 Google, Inc |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef SkSmallAllocator_DEFINED | 8 #ifndef SkSmallAllocator_DEFINED |
| 9 #define SkSmallAllocator_DEFINED | 9 #define SkSmallAllocator_DEFINED |
| 10 | 10 |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 110 return NULL; | 110 return NULL; |
| 111 } | 111 } |
| 112 const size_t storageRemaining = SkAlign4(kTotalBytes) - fStorageUsed; | 112 const size_t storageRemaining = SkAlign4(kTotalBytes) - fStorageUsed; |
| 113 storageRequired = SkAlign4(storageRequired); | 113 storageRequired = SkAlign4(storageRequired); |
| 114 Rec* rec = &fRecs[fNumObjects]; | 114 Rec* rec = &fRecs[fNumObjects]; |
| 115 if (storageRequired > storageRemaining) { | 115 if (storageRequired > storageRemaining) { |
| 116 // Allocate on the heap. Ideally we want to avoid this situation, | 116 // Allocate on the heap. Ideally we want to avoid this situation, |
| 117 // but we're not sure we can catch all callers, so handle it but | 117 // but we're not sure we can catch all callers, so handle it but |
| 118 // assert false in debug mode. | 118 // assert false in debug mode. |
| 119 SkASSERT(false); | 119 SkASSERT(false); |
| 120 rec->fStorageSize = 0; |
| 120 rec->fHeapStorage = sk_malloc_throw(storageRequired); | 121 rec->fHeapStorage = sk_malloc_throw(storageRequired); |
| 121 rec->fObj = static_cast<void*>(rec->fHeapStorage); | 122 rec->fObj = static_cast<void*>(rec->fHeapStorage); |
| 122 } else { | 123 } else { |
| 123 // There is space in fStorage. | 124 // There is space in fStorage. |
| 125 rec->fStorageSize = storageRequired; |
| 124 rec->fHeapStorage = NULL; | 126 rec->fHeapStorage = NULL; |
| 125 SkASSERT(SkIsAlign4(fStorageUsed)); | 127 SkASSERT(SkIsAlign4(fStorageUsed)); |
| 126 rec->fObj = static_cast<void*>(fStorage + (fStorageUsed / 4)); | 128 rec->fObj = static_cast<void*>(fStorage + (fStorageUsed / 4)); |
| 127 fStorageUsed += storageRequired; | 129 fStorageUsed += storageRequired; |
| 128 } | 130 } |
| 129 rec->fKillProc = destroyT<T>; | 131 rec->fKillProc = destroyT<T>; |
| 130 fNumObjects++; | 132 fNumObjects++; |
| 131 return rec->fObj; | 133 return rec->fObj; |
| 132 } | 134 } |
| 133 | 135 |
| 136 /* |
| 137 * Free the memory reserved last without calling the destructor. |
| 138 * Can be used in a nested way, i.e. after reserving A and B, calling |
| 139 * freeLast once will free B and calling it again will free A. |
| 140 */ |
| 141 void freeLast() { |
| 142 SkASSERT(fNumObjects > 0); |
| 143 Rec* rec = &fRecs[fNumObjects - 1]; |
| 144 sk_free(rec->fHeapStorage); |
| 145 fStorageUsed -= rec->fStorageSize; |
| 146 |
| 147 fNumObjects--; |
| 148 } |
| 149 |
| 134 private: | 150 private: |
| 135 struct Rec { | 151 struct Rec { |
| 136 void* fObj; | 152 size_t fStorageSize; // 0 if allocated on heap |
| 137 void* fHeapStorage; | 153 void* fObj; |
| 138 void (*fKillProc)(void*); | 154 void* fHeapStorage; |
| 155 void (*fKillProc)(void*); |
| 139 }; | 156 }; |
| 140 | 157 |
| 141 // Number of bytes used so far. | 158 // Number of bytes used so far. |
| 142 size_t fStorageUsed; | 159 size_t fStorageUsed; |
| 143 // Pad the storage size to be 4-byte aligned. | 160 // Pad the storage size to be 4-byte aligned. |
| 144 uint32_t fStorage[SkAlign4(kTotalBytes) >> 2]; | 161 uint32_t fStorage[SkAlign4(kTotalBytes) >> 2]; |
| 145 uint32_t fNumObjects; | 162 uint32_t fNumObjects; |
| 146 Rec fRecs[kMaxObjects]; | 163 Rec fRecs[kMaxObjects]; |
| 147 }; | 164 }; |
| 148 | 165 |
| 149 #endif // SkSmallAllocator_DEFINED | 166 #endif // SkSmallAllocator_DEFINED |
| OLD | NEW |