Index: src/core/SkRecord.h |
diff --git a/src/core/SkRecord.h b/src/core/SkRecord.h |
index 6019e0340683c9f2077b04c3868ace677c0e2687..521fd38014fc889a7a71469f596e9ee38004961c 100644 |
--- a/src/core/SkRecord.h |
+++ b/src/core/SkRecord.h |
@@ -30,7 +30,7 @@ class SkRecord : SkNoncopyable { |
kFirstReserveCount = 64 / sizeof(void*), |
}; |
public: |
- SkRecord() : fCount(0), fReserved(0) {} |
+ SkRecord() : fCount(0), fReserved(0), fBytesAllocated(0) {} |
~SkRecord() { |
Destroyer destroyer; |
@@ -68,6 +68,7 @@ public: |
template <typename T> |
T* alloc(size_t count = 1) { |
// Bump up to the next pointer width if needed, so all allocations start pointer-aligned. |
+ fBytesAllocated += sizeof(T) * count; |
return (T*)fAlloc.alloc(sizeof(T) * count, SK_MALLOC_THROW); |
} |
@@ -113,6 +114,10 @@ public: |
return fRecords[i].set(this->allocCommand<T>()); |
} |
+ size_t bytesUsed() const { return fBytesAllocated + |
+ fReserved * (sizeof(Record) + sizeof(Type8)) + |
+ sizeof(SkRecord); } |
+ |
private: |
// Implementation notes! |
// |
@@ -231,6 +236,9 @@ private: |
// fCount and fReserved measure both fRecords and fTypes, which always grow in lock step. |
unsigned fCount; |
unsigned fReserved; |
+ |
+ // counts bytes we've requested in fAlloc; *not* bytes in fRecords or fTypes |
+ unsigned fBytesAllocated; |
}; |
#endif//SkRecord_DEFINED |