OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2016 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef GrBuffer_DEFINED | |
9 #define GrBuffer_DEFINED | |
10 | |
11 #include "GrGpuResource.h" | |
12 | |
13 class GrGpu; | |
14 | |
15 class GrBuffer : public GrGpuResource { | |
16 public: | |
17 /** | |
18 * Computes a scratch key for a buffer with a "dynamic" access pattern. (Buf
fers with "static" | |
19 * and "stream" access patterns are disqualified by nature from being cached
and reused.) | |
20 */ | |
21 static void ComputeScratchKeyForDynamicBuffer(GrBufferType type, size_t size
, | |
22 GrScratchKey* key) { | |
23 static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateRe
sourceType(); | |
24 GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4); | |
25 // TODO: There's not always reason to cache a buffer by type. In some (a
ll?) APIs it's just | |
26 // a chunk of memory we can use/reuse for any type of data. We really on
ly need to | |
27 // differentiate between the "read" types (e.g. kGpuToCpu_BufferType) an
d "draw" types. | |
28 builder[0] = type; | |
29 builder[1] = (uint32_t)size; | |
30 if (sizeof(size_t) > 4) { | |
31 builder[2] = (uint32_t)((uint64_t)size >> 32); | |
32 } | |
33 } | |
34 | |
35 GrBufferType type() const { return fType; } | |
36 | |
37 GrAccessPattern accessPattern() const { return fAccessPattern; } | |
38 | |
39 /** | |
40 * Returns true if the buffer is a wrapper around a CPU array. If true it | |
41 * indicates that map will always succeed and will be free. | |
42 */ | |
43 bool isCPUBacked() const { return fCPUBacked; } | |
44 | |
45 /** | |
46 * Maps the buffer to be written by the CPU. | |
47 * | |
48 * The previous content of the buffer is invalidated. It is an error | |
49 * to draw from the buffer while it is mapped. It may fail if the backend | |
50 * doesn't support mapping the buffer. If the buffer is CPU backed then | |
51 * it will always succeed and is a free operation. Once a buffer is mapped, | |
52 * subsequent calls to map() are ignored. | |
53 * | |
54 * Note that buffer mapping does not go through GrContext and therefore is | |
55 * not serialized with other operations. | |
56 * | |
57 * @return a pointer to the data or nullptr if the map fails. | |
58 */ | |
59 void* map() { | |
60 if (!fMapPtr) { | |
61 this->onMap(); | |
62 } | |
63 return fMapPtr; | |
64 } | |
65 | |
66 /** | |
67 * Unmaps the buffer. | |
68 * | |
69 * The pointer returned by the previous map call will no longer be valid. | |
70 */ | |
71 void unmap() { | |
72 SkASSERT(fMapPtr); | |
73 this->onUnmap(); | |
74 fMapPtr = nullptr; | |
75 } | |
76 | |
77 /** | |
78 * Returns the same ptr that map() returned at time of map or nullptr if the | |
79 * is not mapped. | |
80 * | |
81 * @return ptr to mapped buffer data or nullptr if buffer is not mapped. | |
82 */ | |
83 void* mapPtr() const { return fMapPtr; } | |
84 | |
85 /** | |
86 Queries whether the buffer has been mapped. | |
87 | |
88 @return true if the buffer is mapped, false otherwise. | |
89 */ | |
90 bool isMapped() const { return SkToBool(fMapPtr); } | |
91 | |
92 /** | |
93 * Updates the buffer data. | |
94 * | |
95 * The size of the buffer will be preserved. The src data will be | |
96 * placed at the beginning of the buffer and any remaining contents will | |
97 * be undefined. srcSizeInBytes must be <= to the buffer size. | |
98 * | |
99 * The buffer must not be mapped. | |
100 * | |
101 * Note that buffer updates do not go through GrContext and therefore are | |
102 * not serialized with other operations. | |
103 * | |
104 * @return returns true if the update succeeds, false otherwise. | |
105 */ | |
106 bool updateData(const void* src, size_t srcSizeInBytes) { | |
107 SkASSERT(!this->isMapped()); | |
108 SkASSERT(srcSizeInBytes <= fGpuMemorySize); | |
109 return this->onUpdateData(src, srcSizeInBytes); | |
110 } | |
111 | |
112 protected: | |
113 GrBuffer(GrGpu* gpu, GrBufferType type, size_t gpuMemorySize, GrAccessPatter
n accessPattern, | |
114 bool cpuBacked) | |
115 : INHERITED(gpu, kCached_LifeCycle), | |
116 fMapPtr(nullptr), | |
117 fType(type), | |
118 fGpuMemorySize(gpuMemorySize), // TODO: Zero for cpu backed buffers? | |
119 fAccessPattern(accessPattern), | |
120 fCPUBacked(cpuBacked) { | |
121 if (!fCPUBacked && SkIsPow2(fGpuMemorySize) && kDynamic_GrAccessPattern
== fAccessPattern) { | |
122 GrScratchKey key; | |
123 ComputeScratchKeyForDynamicBuffer(fType, fGpuMemorySize, &key); | |
124 this->setScratchKey(key); | |
125 } | |
126 } | |
127 | |
128 void* fMapPtr; | |
129 | |
130 private: | |
131 virtual size_t onGpuMemorySize() const { return fGpuMemorySize; } | |
132 | |
133 virtual void onMap() = 0; | |
134 virtual void onUnmap() = 0; | |
135 virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0; | |
136 | |
137 GrBufferType fType; | |
138 size_t fGpuMemorySize; | |
139 GrAccessPattern fAccessPattern; | |
140 bool fCPUBacked; | |
141 | |
142 typedef GrGpuResource INHERITED; | |
143 }; | |
144 | |
145 #endif | |
OLD | NEW |