OLD | NEW |
---|---|
1 | 1 |
2 /* | 2 /* |
3 * Copyright 2006 The Android Open Source Project | 3 * Copyright 2006 The Android Open Source Project |
4 * | 4 * |
5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
7 */ | 7 */ |
8 | 8 |
9 | 9 |
10 #ifndef SkTemplates_DEFINED | 10 #ifndef SkTemplates_DEFINED |
(...skipping 394 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
405 return ptr; | 405 return ptr; |
406 } | 406 } |
407 | 407 |
408 private: | 408 private: |
409 T* fPtr; | 409 T* fPtr; |
410 }; | 410 }; |
411 | 411 |
412 template <size_t N, typename T> class SkAutoSTMalloc : SkNoncopyable { | 412 template <size_t N, typename T> class SkAutoSTMalloc : SkNoncopyable { |
413 public: | 413 public: |
414 SkAutoSTMalloc() { | 414 SkAutoSTMalloc() { |
415 fPtr = NULL; | 415 fPtr = NULL; |
mtklein
2015/04/08 13:20:05
Does it seem weird that we treat count == 0 differ
| |
416 } | 416 } |
417 | 417 |
418 SkAutoSTMalloc(size_t count) { | 418 SkAutoSTMalloc(size_t count) { |
419 if (count > N) { | 419 if (count > N) { |
420 fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW | SK_M ALLOC_TEMP); | 420 fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW | SK_M ALLOC_TEMP); |
421 } else if (count) { | 421 } else if (count) { |
422 fPtr = fTStorage; | 422 fPtr = fTStorage; |
423 } else { | 423 } else { |
424 fPtr = NULL; | 424 fPtr = NULL; |
425 } | 425 } |
426 } | 426 } |
427 | 427 |
428 ~SkAutoSTMalloc() { | 428 ~SkAutoSTMalloc() { |
429 if (fPtr != fTStorage) { | 429 if (fPtr != fTStorage) { |
430 sk_free(fPtr); | 430 sk_free(fPtr); |
431 } | 431 } |
432 } | 432 } |
433 | 433 |
434 // doesn't preserve contents | 434 // doesn't preserve contents |
435 T* reset(size_t count) { | 435 T* reset(size_t count) { |
436 if (fPtr != fTStorage) { | 436 if (fPtr != fTStorage) { |
437 sk_free(fPtr); | 437 sk_free(fPtr); |
438 } | 438 } |
439 if (count > N) { | 439 if (count > N) { |
440 fPtr = (T*)sk_malloc_flags(count * sizeof(T), SK_MALLOC_THROW | SK_M ALLOC_TEMP); | 440 fPtr = (T*)sk_malloc_throw(count * sizeof(T)); |
441 } else if (count) { | 441 } else if (count) { |
442 fPtr = fTStorage; | 442 fPtr = fTStorage; |
443 } else { | 443 } else { |
444 fPtr = NULL; | 444 fPtr = NULL; |
445 } | 445 } |
446 return fPtr; | 446 return fPtr; |
447 } | 447 } |
448 | 448 |
449 T* get() const { return fPtr; } | 449 T* get() const { return fPtr; } |
450 | 450 |
451 operator T*() { | 451 operator T*() { |
452 return fPtr; | 452 return fPtr; |
453 } | 453 } |
454 | 454 |
455 operator const T*() const { | 455 operator const T*() const { |
456 return fPtr; | 456 return fPtr; |
457 } | 457 } |
458 | 458 |
459 T& operator[](int index) { | 459 T& operator[](int index) { |
460 return fPtr[index]; | 460 return fPtr[index]; |
461 } | 461 } |
462 | 462 |
463 const T& operator[](int index) const { | 463 const T& operator[](int index) const { |
464 return fPtr[index]; | 464 return fPtr[index]; |
465 } | 465 } |
466 | 466 |
467 // Reallocs the array, can be used to shrink the allocation. Makes no attem pt to be intelligent | |
468 void realloc(size_t count) { | |
469 if (count > N) { | |
470 if (fPtr == fTStorage) { | |
471 fPtr = (T*)sk_malloc_throw(count * sizeof(T)); | |
472 memcpy(fPtr, fTStorage, N * sizeof(T)); | |
473 } else { | |
474 fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T)); | |
475 } | |
476 } else if (!fPtr) { | |
477 fPtr = fTStorage; | |
478 } else if (fPtr != fTStorage) { | |
479 fPtr = (T*)sk_realloc_throw(fPtr, count * sizeof(T)); | |
480 } | |
481 } | |
482 | |
467 private: | 483 private: |
468 T* fPtr; | 484 T* fPtr; |
469 union { | 485 union { |
470 uint32_t fStorage32[(N*sizeof(T) + 3) >> 2]; | 486 uint32_t fStorage32[(N*sizeof(T) + 3) >> 2]; |
471 T fTStorage[1]; // do NOT want to invoke T::T() | 487 T fTStorage[1]; // do NOT want to invoke T::T() |
472 }; | 488 }; |
473 }; | 489 }; |
474 | 490 |
475 /** | 491 /** |
476 * Reserves memory that is aligned on double and pointer boundaries. | 492 * Reserves memory that is aligned on double and pointer boundaries. |
(...skipping 22 matching lines...) Expand all Loading... | |
499 /** | 515 /** |
500 * Returns void* because this object does not initialize the | 516 * Returns void* because this object does not initialize the |
501 * memory. Use placement new for types that require a cons. | 517 * memory. Use placement new for types that require a cons. |
502 */ | 518 */ |
503 void* get() { return fStorage.get(); } | 519 void* get() { return fStorage.get(); } |
504 private: | 520 private: |
505 SkAlignedSStorage<sizeof(T)*N> fStorage; | 521 SkAlignedSStorage<sizeof(T)*N> fStorage; |
506 }; | 522 }; |
507 | 523 |
508 #endif | 524 #endif |
OLD | NEW |