| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrBuffer_DEFINED | 8 #ifndef GrBuffer_DEFINED |
| 9 #define GrBuffer_DEFINED | 9 #define GrBuffer_DEFINED |
| 10 | 10 |
| 11 #include "GrGpuResource.h" | 11 #include "GrGpuResource.h" |
| 12 | 12 |
| 13 class GrGpu; | 13 class GrGpu; |
| 14 | 14 |
| 15 class GrBuffer : public GrGpuResource { | 15 class GrBuffer : public GrGpuResource { |
| 16 public: | 16 public: |
| 17 /** | 17 /** |
| 18 * Computes a scratch key for a buffer with a "dynamic" access pattern. (Buf
fers with "static" | 18 * Creates a client-side buffer. |
| 19 * and "stream" access patterns are disqualified by nature from being cached
and reused.) | |
| 20 */ | 19 */ |
| 21 static void ComputeScratchKeyForDynamicBuffer(size_t size, GrBufferType inte
ndedType, | 20 static SK_WARN_UNUSED_RESULT GrBuffer* CreateCPUBacked(GrGpu*, size_t sizeIn
Bytes, GrBufferType, |
| 22 GrScratchKey* key) { | 21 const void* data = nu
llptr); |
| 23 static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateRe
sourceType(); | 22 |
| 24 GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4); | 23 /** |
| 25 // TODO: There's not always reason to cache a buffer by type. In some (a
ll?) APIs it's just | 24 * Computes a scratch key for a GPU-side buffer with a "dynamic" access patt
ern. (Buffers with |
| 26 // a chunk of memory we can use/reuse for any type of data. We really on
ly need to | 25 * "static" and "stream" patterns are disqualified by nature from being cach
ed and reused.) |
| 27 // differentiate between the "read" types (e.g. kGpuToCpu_BufferType) an
d "draw" types. | 26 */ |
| 28 builder[0] = intendedType; | 27 static void ComputeScratchKeyForDynamicVBO(size_t size, GrBufferType, GrScra
tchKey*); |
| 29 builder[1] = (uint32_t)size; | |
| 30 if (sizeof(size_t) > 4) { | |
| 31 builder[2] = (uint32_t)((uint64_t)size >> 32); | |
| 32 } | |
| 33 } | |
| 34 | 28 |
| 35 GrAccessPattern accessPattern() const { return fAccessPattern; } | 29 GrAccessPattern accessPattern() const { return fAccessPattern; } |
| 30 size_t sizeInBytes() const { return fSizeInBytes; } |
| 36 | 31 |
| 37 /** | 32 /** |
| 38 * Returns true if the buffer is a wrapper around a CPU array. If true it | 33 * Returns true if the buffer is a wrapper around a CPU array. If true it |
| 39 * indicates that map will always succeed and will be free. | 34 * indicates that map will always succeed and will be free. |
| 40 */ | 35 */ |
| 41 bool isCPUBacked() const { return fCPUBacked; } | 36 bool isCPUBacked() const { return SkToBool(fCPUData); } |
| 37 size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); } |
| 42 | 38 |
| 43 /** | 39 /** |
| 44 * Maps the buffer to be written by the CPU. | 40 * Maps the buffer to be written by the CPU. |
| 45 * | 41 * |
| 46 * The previous content of the buffer is invalidated. It is an error | 42 * The previous content of the buffer is invalidated. It is an error |
| 47 * to draw from the buffer while it is mapped. It may fail if the backend | 43 * to draw from the buffer while it is mapped. It may fail if the backend |
| 48 * doesn't support mapping the buffer. If the buffer is CPU backed then | 44 * doesn't support mapping the buffer. If the buffer is CPU backed then |
| 49 * it will always succeed and is a free operation. Once a buffer is mapped, | 45 * it will always succeed and is a free operation. Once a buffer is mapped, |
| 50 * subsequent calls to map() are ignored. | 46 * subsequent calls to map() are ignored. |
| 51 * | 47 * |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 96 * | 92 * |
| 97 * The buffer must not be mapped. | 93 * The buffer must not be mapped. |
| 98 * | 94 * |
| 99 * Note that buffer updates do not go through GrContext and therefore are | 95 * Note that buffer updates do not go through GrContext and therefore are |
| 100 * not serialized with other operations. | 96 * not serialized with other operations. |
| 101 * | 97 * |
| 102 * @return returns true if the update succeeds, false otherwise. | 98 * @return returns true if the update succeeds, false otherwise. |
| 103 */ | 99 */ |
| 104 bool updateData(const void* src, size_t srcSizeInBytes) { | 100 bool updateData(const void* src, size_t srcSizeInBytes) { |
| 105 SkASSERT(!this->isMapped()); | 101 SkASSERT(!this->isMapped()); |
| 106 SkASSERT(srcSizeInBytes <= fGpuMemorySize); | 102 SkASSERT(srcSizeInBytes <= fSizeInBytes); |
| 107 return this->onUpdateData(src, srcSizeInBytes); | 103 return this->onUpdateData(src, srcSizeInBytes); |
| 108 } | 104 } |
| 109 | 105 |
| 110 protected: | 106 ~GrBuffer() override { |
| 111 GrBuffer(GrGpu* gpu, size_t gpuMemorySize, GrBufferType intendedType, | 107 sk_free(fCPUData); |
| 112 GrAccessPattern accessPattern, bool cpuBacked) | |
| 113 : INHERITED(gpu), | |
| 114 fMapPtr(nullptr), | |
| 115 fGpuMemorySize(gpuMemorySize), // TODO: Zero for cpu backed buffers? | |
| 116 fAccessPattern(accessPattern), | |
| 117 fCPUBacked(cpuBacked), | |
| 118 fIntendedType(intendedType) { | |
| 119 } | 108 } |
| 120 | 109 |
| 121 void computeScratchKey(GrScratchKey* key) const override { | 110 protected: |
| 122 if (!fCPUBacked && SkIsPow2(fGpuMemorySize) && kDynamic_GrAccessPattern
== fAccessPattern) { | 111 GrBuffer(GrGpu*, size_t sizeInBytes, GrBufferType, GrAccessPattern); |
| 123 ComputeScratchKeyForDynamicBuffer(fGpuMemorySize, fIntendedType, key
); | |
| 124 } | |
| 125 } | |
| 126 | 112 |
| 127 void* fMapPtr; | 113 void* fMapPtr; |
| 128 | 114 |
| 129 private: | 115 private: |
| 130 size_t onGpuMemorySize() const override { return fGpuMemorySize; } | 116 /** |
| 117 * Internal constructor to make a CPU-backed buffer. |
| 118 */ |
| 119 GrBuffer(GrGpu*, size_t sizeInBytes, GrBufferType, void* cpuData); |
| 131 | 120 |
| 132 virtual void onMap() = 0; | 121 virtual void onMap() { SkASSERT(this->isCPUBacked()); fMapPtr = fCPUData; } |
| 133 virtual void onUnmap() = 0; | 122 virtual void onUnmap() { SkASSERT(this->isCPUBacked()); } |
| 134 virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0; | 123 virtual bool onUpdateData(const void* src, size_t srcSizeInBytes); |
| 135 | 124 |
| 136 size_t fGpuMemorySize; | 125 size_t onGpuMemorySize() const override { return fSizeInBytes; } // TODO: ze
ro for cpu backed? |
| 126 void computeScratchKey(GrScratchKey* key) const override; |
| 127 |
| 128 size_t fSizeInBytes; |
| 137 GrAccessPattern fAccessPattern; | 129 GrAccessPattern fAccessPattern; |
| 138 bool fCPUBacked; | 130 void* fCPUData; |
| 139 GrBufferType fIntendedType; | 131 GrBufferType fIntendedType; |
| 132 |
| 140 typedef GrGpuResource INHERITED; | 133 typedef GrGpuResource INHERITED; |
| 141 }; | 134 }; |
| 142 | 135 |
| 143 #endif | 136 #endif |
| OLD | NEW |