OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkRecord_DEFINED | 8 #ifndef SkRecord_DEFINED |
9 #define SkRecord_DEFINED | 9 #define SkRecord_DEFINED |
10 | 10 |
(...skipping 12 matching lines...) Expand all Loading... | |
23 // SkRecord often looks like it's compatible with any type T, but really it's co mpatible with any | 23 // SkRecord often looks like it's compatible with any type T, but really it's co mpatible with any |
24 // type T which has a static const SkRecords::Type kType. That is to say, SkRec ord is compatible | 24 // type T which has a static const SkRecords::Type kType. That is to say, SkRec ord is compatible |
25 // only with SkRecords::* structs defined in SkRecords.h. Your compiler will he lpfully yell if you | 25 // only with SkRecords::* structs defined in SkRecords.h. Your compiler will he lpfully yell if you |
26 // get this wrong. | 26 // get this wrong. |
27 | 27 |
28 class SkRecord : SkNoncopyable { | 28 class SkRecord : SkNoncopyable { |
29 enum { | 29 enum { |
30 kFirstReserveCount = 64 / sizeof(void*), | 30 kFirstReserveCount = 64 / sizeof(void*), |
31 }; | 31 }; |
32 public: | 32 public: |
33 SkRecord() : fCount(0), fReserved(0) {} | 33 SkRecord() : fCount(0), fReserved(0), fBytesAllocated(0) {} |
34 | 34 |
35 ~SkRecord() { | 35 ~SkRecord() { |
36 Destroyer destroyer; | 36 Destroyer destroyer; |
37 for (unsigned i = 0; i < this->count(); i++) { | 37 for (unsigned i = 0; i < this->count(); i++) { |
38 this->mutate<void>(i, destroyer); | 38 this->mutate<void>(i, destroyer); |
39 } | 39 } |
40 } | 40 } |
41 | 41 |
42 // Returns the number of canvas commands in this SkRecord. | 42 // Returns the number of canvas commands in this SkRecord. |
43 unsigned count() const { return fCount; } | 43 unsigned count() const { return fCount; } |
(...skipping 17 matching lines...) Expand all Loading... | |
61 SkASSERT(i < this->count()); | 61 SkASSERT(i < this->count()); |
62 return fRecords[i].mutate<R>(fTypes[i], f); | 62 return fRecords[i].mutate<R>(fTypes[i], f); |
63 } | 63 } |
64 // TODO: It'd be nice to infer R from F for visit and mutate if we ever get std::result_of. | 64 // TODO: It'd be nice to infer R from F for visit and mutate if we ever get std::result_of. |
65 | 65 |
66 // Allocate contiguous space for count Ts, to be freed when the SkRecord is destroyed. | 66 // Allocate contiguous space for count Ts, to be freed when the SkRecord is destroyed. |
67 // Here T can be any class, not just those from SkRecords. Throws on failur e. | 67 // Here T can be any class, not just those from SkRecords. Throws on failur e. |
68 template <typename T> | 68 template <typename T> |
69 T* alloc(size_t count = 1) { | 69 T* alloc(size_t count = 1) { |
70 // Bump up to the next pointer width if needed, so all allocations start pointer-aligned. | 70 // Bump up to the next pointer width if needed, so all allocations start pointer-aligned. |
71 fBytesAllocated += sizeof(T) * count; | |
71 return (T*)fAlloc.alloc(sizeof(T) * count, SK_MALLOC_THROW); | 72 return (T*)fAlloc.alloc(sizeof(T) * count, SK_MALLOC_THROW); |
72 } | 73 } |
73 | 74 |
74 // Add a new command of type T to the end of this SkRecord. | 75 // Add a new command of type T to the end of this SkRecord. |
75 // You are expected to placement new an object of type T onto this pointer. | 76 // You are expected to placement new an object of type T onto this pointer. |
76 template <typename T> | 77 template <typename T> |
77 T* append() { | 78 T* append() { |
78 if (fCount == fReserved) { | 79 if (fCount == fReserved) { |
79 fReserved = SkTMax<unsigned>(kFirstReserveCount, fReserved*2); | 80 fReserved = SkTMax<unsigned>(kFirstReserveCount, fReserved*2); |
80 fRecords.realloc(fReserved); | 81 fRecords.realloc(fReserved); |
(...skipping 25 matching lines...) Expand all Loading... | |
106 T* replace(unsigned i, const SkRecords::Adopted<Existing>& proofOfAdoption) { | 107 T* replace(unsigned i, const SkRecords::Adopted<Existing>& proofOfAdoption) { |
107 SkASSERT(i < this->count()); | 108 SkASSERT(i < this->count()); |
108 | 109 |
109 SkASSERT(Existing::kType == fTypes[i]); | 110 SkASSERT(Existing::kType == fTypes[i]); |
110 SkASSERT(proofOfAdoption == fRecords[i].ptr<Existing>()); | 111 SkASSERT(proofOfAdoption == fRecords[i].ptr<Existing>()); |
111 | 112 |
112 fTypes[i] = T::kType; | 113 fTypes[i] = T::kType; |
113 return fRecords[i].set(this->allocCommand<T>()); | 114 return fRecords[i].set(this->allocCommand<T>()); |
114 } | 115 } |
115 | 116 |
117 size_t bytesUsed() const { return fBytesAllocated + | |
118 fReserved * (sizeof(Record) + sizeof(Type8 )) + | |
119 sizeof(SkRecord); } | |
120 | |
116 private: | 121 private: |
117 // Implementation notes! | 122 // Implementation notes! |
118 // | 123 // |
119 // Logically an SkRecord is structured as an array of pointers into a big ch unk of memory where | 124 // Logically an SkRecord is structured as an array of pointers into a big ch unk of memory where |
120 // records representing each canvas draw call are stored: | 125 // records representing each canvas draw call are stored: |
121 // | 126 // |
122 // fRecords: [*][*][*]... | 127 // fRecords: [*][*][*]... |
123 // | | | | 128 // | | | |
124 // | | | | 129 // | | | |
125 // | | +---------------------------------------+ | 130 // | | +---------------------------------------+ |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
224 // | 229 // |
225 // fRecords and fTypes need to be data structures that can append fixed leng th data, and need to | 230 // fRecords and fTypes need to be data structures that can append fixed leng th data, and need to |
226 // support efficient random access and forward iteration. (They don't need to be contiguous.) | 231 // support efficient random access and forward iteration. (They don't need to be contiguous.) |
227 | 232 |
228 SkVarAlloc fAlloc; | 233 SkVarAlloc fAlloc; |
229 SkAutoTMalloc<Record> fRecords; | 234 SkAutoTMalloc<Record> fRecords; |
230 SkAutoTMalloc<Type8> fTypes; | 235 SkAutoTMalloc<Type8> fTypes; |
231 // fCount and fReserved measure both fRecords and fTypes, which always grow in lock step. | 236 // fCount and fReserved measure both fRecords and fTypes, which always grow in lock step. |
232 unsigned fCount; | 237 unsigned fCount; |
233 unsigned fReserved; | 238 unsigned fReserved; |
239 | |
240 // counts bytes we've requested in fAlloc; *not* bytes in fRecords or fTypes | |
241 unsigned fBytesAllocated; | |
mtklein
2014/11/17 23:15:33
Now that I'm thinking about it, it seems counterpr
chrishtr
2014/11/17 23:22:13
It will only be when we want to collect debug info
| |
234 }; | 242 }; |
235 | 243 |
236 #endif//SkRecord_DEFINED | 244 #endif//SkRecord_DEFINED |
OLD | NEW |