OLD | NEW |
| (Empty) |
1 | |
2 /* | |
3 * Copyright 2006 The Android Open Source Project | |
4 * | |
5 * Use of this source code is governed by a BSD-style license that can be | |
6 * found in the LICENSE file. | |
7 */ | |
8 | |
9 | |
10 #ifndef SkTemplates_DEFINED | |
11 #define SkTemplates_DEFINED | |
12 | |
13 #include "../private/SkTLogic.h" | |
14 #include "SkMath.h" | |
15 #include "SkTypes.h" | |
16 #include <limits.h> | |
17 #include <new> | |
18 | |
19 /** \file SkTemplates.h | |
20 | |
21 This file contains light-weight template classes for type-safe and exception
-safe | |
22 resource management. | |
23 */ | |
24 | |
25 /** | |
26 * Marks a local variable as known to be unused (to avoid warnings). | |
27 * Note that this does *not* prevent the local variable from being optimized aw
ay. | |
28 */ | |
29 template<typename T> inline void sk_ignore_unused_variable(const T&) { } | |
30 | |
31 namespace skstd { | |
32 | |
33 template <typename T> inline remove_reference_t<T>&& move(T&& t) { | |
34 return static_cast<remove_reference_t<T>&&>(t); | |
35 } | |
36 | |
37 template <typename T> inline T&& forward(remove_reference_t<T>& t) /*noexcept*/
{ | |
38 return static_cast<T&&>(t); | |
39 } | |
40 template <typename T> inline T&& forward(remove_reference_t<T>&& t) /*noexcept*/
{ | |
41 static_assert(!is_lvalue_reference<T>::value, | |
42 "Forwarding an rvalue reference as an lvalue reference is not
allowed."); | |
43 return static_cast<T&&>(t); | |
44 } | |
45 | |
46 } // namespace skstd | |
47 | |
48 ///@{ | |
49 /** SkTConstType<T, CONST>::type will be 'const T' if CONST is true, 'T' otherwi
se. */ | |
50 template <typename T, bool CONST> struct SkTConstType { | |
51 typedef T type; | |
52 }; | |
53 template <typename T> struct SkTConstType<T, true> { | |
54 typedef const T type; | |
55 }; | |
56 ///@} | |
57 | |
58 /** | |
59 * Returns a pointer to a D which comes immediately after S[count]. | |
60 */ | |
61 template <typename D, typename S> static D* SkTAfter(S* ptr, size_t count = 1) { | |
62 return reinterpret_cast<D*>(ptr + count); | |
63 } | |
64 | |
65 /** | |
66 * Returns a pointer to a D which comes byteOffset bytes after S. | |
67 */ | |
68 template <typename D, typename S> static D* SkTAddOffset(S* ptr, size_t byteOffs
et) { | |
69 // The intermediate char* has the same const-ness as D as this produces bett
er error messages. | |
70 // This relies on the fact that reinterpret_cast can add constness, but cann
ot remove it. | |
71 return reinterpret_cast<D*>( | |
72 reinterpret_cast<typename SkTConstType<char, SkTIsConst<D>::value>::type
*>(ptr) + byteOffset | |
73 ); | |
74 } | |
75 | |
76 /** \class SkAutoTCallVProc | |
77 | |
78 Call a function when this goes out of scope. The template uses two | |
79 parameters, the object, and a function that is to be called in the destructo
r. | |
80 If detach() is called, the object reference is set to null. If the object | |
81 reference is null when the destructor is called, we do not call the | |
82 function. | |
83 */ | |
84 template <typename T, void (*P)(T*)> class SkAutoTCallVProc : SkNoncopyable { | |
85 public: | |
86 SkAutoTCallVProc(T* obj): fObj(obj) {} | |
87 ~SkAutoTCallVProc() { if (fObj) P(fObj); } | |
88 | |
89 operator T*() const { return fObj; } | |
90 T* operator->() const { SkASSERT(fObj); return fObj; } | |
91 | |
92 T* detach() { T* obj = fObj; fObj = NULL; return obj; } | |
93 void reset(T* obj = NULL) { | |
94 if (fObj != obj) { | |
95 if (fObj) { | |
96 P(fObj); | |
97 } | |
98 fObj = obj; | |
99 } | |
100 } | |
101 private: | |
102 T* fObj; | |
103 }; | |
104 | |
105 /** \class SkAutoTCallIProc | |
106 | |
107 Call a function when this goes out of scope. The template uses two | |
108 parameters, the object, and a function that is to be called in the destructor. | |
109 If detach() is called, the object reference is set to null. If the object | |
110 reference is null when the destructor is called, we do not call the | |
111 function. | |
112 */ | |
113 template <typename T, int (*P)(T*)> class SkAutoTCallIProc : SkNoncopyable { | |
114 public: | |
115 SkAutoTCallIProc(T* obj): fObj(obj) {} | |
116 ~SkAutoTCallIProc() { if (fObj) P(fObj); } | |
117 | |
118 operator T*() const { return fObj; } | |
119 T* operator->() const { SkASSERT(fObj); return fObj; } | |
120 | |
121 T* detach() { T* obj = fObj; fObj = NULL; return obj; } | |
122 private: | |
123 T* fObj; | |
124 }; | |
125 | |
126 /** \class SkAutoTDelete | |
127 An SkAutoTDelete<T> is like a T*, except that the destructor of SkAutoTDelete<
T> | |
128 automatically deletes the pointer it holds (if any). That is, SkAutoTDelete<T
> | |
129 owns the T object that it points to. Like a T*, an SkAutoTDelete<T> may hold | |
130 either NULL or a pointer to a T object. Also like T*, SkAutoTDelete<T> is | |
131 thread-compatible, and once you dereference it, you get the threadsafety | |
132 guarantees of T. | |
133 | |
134 The size of a SkAutoTDelete is small: sizeof(SkAutoTDelete<T>) == sizeof(T*) | |
135 */ | |
136 template <typename T> class SkAutoTDelete : SkNoncopyable { | |
137 public: | |
138 SkAutoTDelete(T* obj = NULL) : fObj(obj) {} | |
139 ~SkAutoTDelete() { SkDELETE(fObj); } | |
140 | |
141 T* get() const { return fObj; } | |
142 operator T*() const { return fObj; } | |
143 T& operator*() const { SkASSERT(fObj); return *fObj; } | |
144 T* operator->() const { SkASSERT(fObj); return fObj; } | |
145 | |
146 void reset(T* obj) { | |
147 if (fObj != obj) { | |
148 SkDELETE(fObj); | |
149 fObj = obj; | |
150 } | |
151 } | |
152 | |
153 /** | |
154 * Delete the owned object, setting the internal pointer to NULL. | |
155 */ | |
156 void free() { | |
157 SkDELETE(fObj); | |
158 fObj = NULL; | |
159 } | |
160 | |
161 /** | |
162 * Transfer ownership of the object to the caller, setting the internal | |
163 * pointer to NULL. Note that this differs from get(), which also returns | |
164 * the pointer, but it does not transfer ownership. | |
165 */ | |
166 T* detach() { | |
167 T* obj = fObj; | |
168 fObj = NULL; | |
169 return obj; | |
170 } | |
171 | |
172 void swap(SkAutoTDelete* that) { | |
173 SkTSwap(fObj, that->fObj); | |
174 } | |
175 | |
176 private: | |
177 T* fObj; | |
178 }; | |
179 | |
180 // Calls ~T() in the destructor. | |
181 template <typename T> class SkAutoTDestroy : SkNoncopyable { | |
182 public: | |
183 SkAutoTDestroy(T* obj = NULL) : fObj(obj) {} | |
184 ~SkAutoTDestroy() { | |
185 if (fObj) { | |
186 fObj->~T(); | |
187 } | |
188 } | |
189 | |
190 T* get() const { return fObj; } | |
191 T& operator*() const { SkASSERT(fObj); return *fObj; } | |
192 T* operator->() const { SkASSERT(fObj); return fObj; } | |
193 | |
194 private: | |
195 T* fObj; | |
196 }; | |
197 | |
198 template <typename T> class SkAutoTDeleteArray : SkNoncopyable { | |
199 public: | |
200 SkAutoTDeleteArray(T array[]) : fArray(array) {} | |
201 ~SkAutoTDeleteArray() { SkDELETE_ARRAY(fArray); } | |
202 | |
203 T* get() const { return fArray; } | |
204 void free() { SkDELETE_ARRAY(fArray); fArray = NULL; } | |
205 T* detach() { T* array = fArray; fArray = NULL; return array; } | |
206 | |
207 void reset(T array[]) { | |
208 if (fArray != array) { | |
209 SkDELETE_ARRAY(fArray); | |
210 fArray = array; | |
211 } | |
212 } | |
213 | |
214 private: | |
215 T* fArray; | |
216 }; | |
217 | |
218 /** Allocate an array of T elements, and free the array in the destructor | |
219 */ | |
220 template <typename T> class SkAutoTArray : SkNoncopyable { | |
221 public: | |
222 SkAutoTArray() { | |
223 fArray = NULL; | |
224 SkDEBUGCODE(fCount = 0;) | |
225 } | |
226 /** Allocate count number of T elements | |
227 */ | |
228 explicit SkAutoTArray(int count) { | |
229 SkASSERT(count >= 0); | |
230 fArray = NULL; | |
231 if (count) { | |
232 fArray = SkNEW_ARRAY(T, count); | |
233 } | |
234 SkDEBUGCODE(fCount = count;) | |
235 } | |
236 | |
237 /** Reallocates given a new count. Reallocation occurs even if new count equ
als old count. | |
238 */ | |
239 void reset(int count) { | |
240 SkDELETE_ARRAY(fArray); | |
241 SkASSERT(count >= 0); | |
242 fArray = NULL; | |
243 if (count) { | |
244 fArray = SkNEW_ARRAY(T, count); | |
245 } | |
246 SkDEBUGCODE(fCount = count;) | |
247 } | |
248 | |
249 ~SkAutoTArray() { | |
250 SkDELETE_ARRAY(fArray); | |
251 } | |
252 | |
253 /** Return the array of T elements. Will be NULL if count == 0 | |
254 */ | |
255 T* get() const { return fArray; } | |
256 | |
257 /** Return the nth element in the array | |
258 */ | |
259 T& operator[](int index) const { | |
260 SkASSERT((unsigned)index < (unsigned)fCount); | |
261 return fArray[index]; | |
262 } | |
263 | |
264 void swap(SkAutoTArray& other) { | |
265 SkTSwap(fArray, other.fArray); | |
266 SkDEBUGCODE(SkTSwap(fCount, other.fCount)); | |
267 } | |
268 | |
269 private: | |
270 T* fArray; | |
271 SkDEBUGCODE(int fCount;) | |
272 }; | |
273 | |
274 /** Wraps SkAutoTArray, with room for up to N elements preallocated | |
275 */ | |
276 template <int N, typename T> class SkAutoSTArray : SkNoncopyable { | |
277 public: | |
278 /** Initialize with no objects */ | |
279 SkAutoSTArray() { | |
280 fArray = NULL; | |
281 fCount = 0; | |
282 } | |
283 | |
284 /** Allocate count number of T elements | |
285 */ | |
286 SkAutoSTArray(int count) { | |
287 fArray = NULL; | |
288 fCount = 0; | |
289 this->reset(count); | |
290 } | |
291 | |
292 ~SkAutoSTArray() { | |
293 this->reset(0); | |
294 } | |
295 | |
296 /** Destroys previous objects in the array and default constructs count numb
er of objects */ | |
297 void reset(int count) { | |
298 T* start = fArray; | |
299 T* iter = start + fCount; | |
300 while (iter > start) { | |
301 (--iter)->~T(); | |
302 } | |
303 | |
304 if (fCount != count) { | |
305 if (fCount > N) { | |
306 // 'fArray' was allocated last time so free it now | |
307 SkASSERT((T*) fStorage != fArray); | |
308 sk_free(fArray); | |
309 } | |
310 | |
311 if (count > N) { | |
312 const uint64_t size64 = sk_64_mul(count, sizeof(T)); | |
313 const size_t size = static_cast<size_t>(size64); | |
314 if (size != size64) { | |
315 sk_out_of_memory(); | |
316 } | |
317 fArray = (T*) sk_malloc_throw(size); | |
318 } else if (count > 0) { | |
319 fArray = (T*) fStorage; | |
320 } else { | |
321 fArray = NULL; | |
322 } | |
323 | |
324 fCount = count; | |
325 } | |
326 | |
327 iter = fArray; | |
328 T* stop = fArray + count; | |
329 while (iter < stop) { | |
330 SkNEW_PLACEMENT(iter++, T); | |
331 } | |
332 } | |
333 | |
334 /** Return the number of T elements in the array | |
335 */ | |
336 int count() const { return fCount; } | |
337 | |
338 /** Return the array of T elements. Will be NULL if count == 0 | |
339 */ | |
340 T* get() const { return fArray; } | |
341 | |
342 /** Return the nth element in the array | |
343 */ | |
344 T& operator[](int index) const { | |
345 SkASSERT(index < fCount); | |
346 return fArray[index]; | |
347 } | |
348 | |
349 private: | |
350 int fCount; | |
351 T* fArray; | |
352 // since we come right after fArray, fStorage should be properly aligned | |
353 char fStorage[N * sizeof(T)]; | |
354 }; | |
355 | |
356 /** Manages an array of T elements, freeing the array in the destructor. | |
357 * Does NOT call any constructors/destructors on T (T must be POD). | |
358 */ | |
359 template <typename T> class SkAutoTMalloc : SkNoncopyable { | |
360 public: | |
361 /** Takes ownership of the ptr. The ptr must be a value which can be passed
to sk_free. */ | |
362 explicit SkAutoTMalloc(T* ptr = NULL) { | |
363 fPtr = ptr; | |
364 } | |
365 | |
366 /** Allocates space for 'count' Ts. */ | |
367 explicit SkAutoTMalloc(size_t count) { | |
368 fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW); | |
369 } | |
370 | |
371 ~SkAutoTMalloc() { | |
372 sk_free(fPtr); | |
373 } | |
374 | |
375 /** Resize the memory area pointed to by the current ptr preserving contents
. */ | |
376 void realloc(size_t count) { | |
377 fPtr = reinterpret_cast<T*>(sk_realloc_throw(fPtr, count * sizeof(T))); | |
378 } | |
379 | |
380 /** Resize the memory area pointed to by the current ptr without preserving
contents. */ | |
381 void reset(size_t count) { | |
382 sk_free(fPtr); | |
383 fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW); | |
384 } | |
385 | |
386 T* get() const { return fPtr; } | |
387 | |
388 operator T*() { | |
389 return fPtr; | |
390 } | |
391 | |
392 operator const T*() const { | |
393 return fPtr; | |
394 } | |
395 | |
396 T& operator[](int index) { | |
397 return fPtr[index]; | |
398 } | |
399 | |
400 const T& operator[](int index) const { | |
401 return fPtr[index]; | |
402 } | |
403 | |
404 /** | |
405 * Transfer ownership of the ptr to the caller, setting the internal | |
406 * pointer to NULL. Note that this differs from get(), which also returns | |
407 * the pointer, but it does not transfer ownership. | |
408 */ | |
409 T* detach() { | |
410 T* ptr = fPtr; | |
411 fPtr = NULL; | |
412 return ptr; | |
413 } | |
414 | |
415 private: | |
416 T* fPtr; | |
417 }; | |
418 | |
419 template <size_t N, typename T> class SkAutoSTMalloc : SkNoncopyable { | |
420 public: | |
421 SkAutoSTMalloc() : fPtr(fTStorage) {} | |
422 | |
423 SkAutoSTMalloc(size_t count) { | |
424 if (count > N) { | |
425 fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW | SK_M
ALLOC_TEMP); | |
426 } else { | |
427 fPtr = fTStorage; | |
428 } | |
429 } | |
430 | |
431 ~SkAutoSTMalloc() { | |
432 if (fPtr != fTStorage) { | |
433 sk_free(fPtr); | |
434 } | |
435 } | |
436 | |
437 // doesn't preserve contents | |
438 T* reset(size_t count) { | |
439 if (fPtr != fTStorage) { | |
440 sk_free(fPtr); | |
441 } | |
442 if (count > N) { | |
443 fPtr = (T*)sk_malloc_throw(count * sizeof(T)); | |
444 } else { | |
445 fPtr = fTStorage; | |
446 } | |
447 return fPtr; | |
448 } | |
449 | |
450 T* get() const { return fPtr; } | |
451 | |
452 operator T*() { | |
453 return fPtr; | |
454 } | |
455 | |
456 operator const T*() const { | |
457 return fPtr; | |
458 } | |
459 | |
460 T& operator[](int index) { | |
461 return fPtr[index]; | |
462 } | |
463 | |
464 const T& operator[](int index) const { | |
465 return fPtr[index]; | |
466 } | |
467 | |
468 // Reallocs the array, can be used to shrink the allocation. Makes no attem
pt to be intelligent | |
469 void realloc(size_t count) { | |
470 if (count > N) { | |
471 if (fPtr == fTStorage) { | |
472 fPtr = (T*)sk_malloc_throw(count * sizeof(T)); | |
473 memcpy(fPtr, fTStorage, N * sizeof(T)); | |
474 } else { | |
475 fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T)); | |
476 } | |
477 } else if (fPtr != fTStorage) { | |
478 fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T)); | |
479 } | |
480 } | |
481 | |
482 private: | |
483 T* fPtr; | |
484 union { | |
485 uint32_t fStorage32[(N*sizeof(T) + 3) >> 2]; | |
486 T fTStorage[1]; // do NOT want to invoke T::T() | |
487 }; | |
488 }; | |
489 | |
490 ////////////////////////////////////////////////////////////////////////////////
////////////////// | |
491 | |
492 /** | |
493 * Pass the object and the storage that was offered during SkInPlaceNewCheck, a
nd this will | |
494 * safely destroy (and free if it was dynamically allocated) the object. | |
495 */ | |
496 template <typename T> void SkInPlaceDeleteCheck(T* obj, void* storage) { | |
497 if (storage == obj) { | |
498 obj->~T(); | |
499 } else { | |
500 SkDELETE(obj); | |
501 } | |
502 } | |
503 | |
504 /** | |
505 * Allocates T, using storage if it is large enough, and allocating on the heap
(via new) if | |
506 * storage is not large enough. | |
507 * | |
508 * obj = SkInPlaceNewCheck<Type>(storage, size); | |
509 * ... | |
510 * SkInPlaceDeleteCheck(obj, storage); | |
511 */ | |
512 template <typename T> T* SkInPlaceNewCheck(void* storage, size_t size) { | |
513 return (sizeof(T) <= size) ? new (storage) T : SkNEW(T); | |
514 } | |
515 | |
516 template <typename T, typename A1, typename A2, typename A3> | |
517 T* SkInPlaceNewCheck(void* storage, size_t size, const A1& a1, const A2& a2, con
st A3& a3) { | |
518 return (sizeof(T) <= size) ? new (storage) T(a1, a2, a3) : SkNEW_ARGS(T, (a1
, a2, a3)); | |
519 } | |
520 | |
521 /** | |
522 * Reserves memory that is aligned on double and pointer boundaries. | |
523 * Hopefully this is sufficient for all practical purposes. | |
524 */ | |
525 template <size_t N> class SkAlignedSStorage : SkNoncopyable { | |
526 public: | |
527 size_t size() const { return N; } | |
528 void* get() { return fData; } | |
529 const void* get() const { return fData; } | |
530 | |
531 private: | |
532 union { | |
533 void* fPtr; | |
534 double fDouble; | |
535 char fData[N]; | |
536 }; | |
537 }; | |
538 | |
539 /** | |
540 * Reserves memory that is aligned on double and pointer boundaries. | |
541 * Hopefully this is sufficient for all practical purposes. Otherwise, | |
542 * we have to do some arcane trickery to determine alignment of non-POD | |
543 * types. Lifetime of the memory is the lifetime of the object. | |
544 */ | |
545 template <int N, typename T> class SkAlignedSTStorage : SkNoncopyable { | |
546 public: | |
547 /** | |
548 * Returns void* because this object does not initialize the | |
549 * memory. Use placement new for types that require a cons. | |
550 */ | |
551 void* get() { return fStorage.get(); } | |
552 const void* get() const { return fStorage.get(); } | |
553 private: | |
554 SkAlignedSStorage<sizeof(T)*N> fStorage; | |
555 }; | |
556 | |
557 #endif | |
OLD | NEW |