Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(203)

Side by Side Diff: src/gpu/GrLayerCache.h

Issue 433553002: Add CTM to the cached layers' key and reduce render target pingponging in layer pre-rendering (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Fix overlength lines Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/gpu/GrLayerCache.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2014 Google Inc. 2 * Copyright 2014 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #ifndef GrLayerCache_DEFINED 8 #ifndef GrLayerCache_DEFINED
9 #define GrLayerCache_DEFINED 9 #define GrLayerCache_DEFINED
10 10
(...skipping 26 matching lines...) Expand all
37 37
38 GrAtlas::ClientPlotUsage fPlotUsage; 38 GrAtlas::ClientPlotUsage fPlotUsage;
39 }; 39 };
40 40
41 // GrCachedLayer encapsulates the caching information for a single saveLayer. 41 // GrCachedLayer encapsulates the caching information for a single saveLayer.
42 // 42 //
43 // Atlased layers get a ref to the backing GrTexture while non-atlased layers 43 // Atlased layers get a ref to the backing GrTexture while non-atlased layers
44 // get a ref to the GrTexture in which they reside. In both cases 'fRect' 44 // get a ref to the GrTexture in which they reside. In both cases 'fRect'
45 // contains the layer's extent in its texture. 45 // contains the layer's extent in its texture.
46 // Atlased layers also get a pointer to the plot in which they reside. 46 // Atlased layers also get a pointer to the plot in which they reside.
47 // For non-atlased layers the lock field just corresponds to locking in 47 // For non-atlased layers, the lock field just corresponds to locking in
48 // the resource cache. For atlased layers it implements an additional level 48 // the resource cache. For atlased layers, it implements an additional level
49 // of locking to allow atlased layers to be reused multiple times. 49 // of locking to allow atlased layers to be reused multiple times.
50 struct GrCachedLayer { 50 struct GrCachedLayer {
51 public: 51 public:
52 // For SkTDynamicHash 52 // For SkTDynamicHash
53 struct Key { 53 struct Key {
54 Key(uint32_t pictureID, int layerID) : fPictureID(pictureID) , fLayerID( layerID) {} 54 Key(uint32_t pictureID, int start, int stop, const SkMatrix& ctm)
55 : fPictureID(pictureID)
56 , fStart(start)
57 , fStop(stop)
58 , fCTM(ctm) {
59 }
55 60
56 bool operator==(const Key& other) const { 61 bool operator==(const Key& other) const {
57 return fPictureID == other.fPictureID && fLayerID == other.fLayerID; 62 return fPictureID == other.fPictureID &&
63 fStart == other.fStart &&
64 fStop == other.fStop &&
65 fCTM.cheapEqualTo(other.fCTM);
58 } 66 }
59 67
60 uint32_t getPictureID() const { return fPictureID; } 68 uint32_t pictureID() const { return fPictureID; }
61 int getLayerID() const { return fLayerID; } 69 int start() const { return fStart; }
70 int stop() const { return fStop; }
71 const SkMatrix& ctm() const { return fCTM; }
62 72
63 private: 73 private:
64 // ID of the picture of which this layer is a part 74 // ID of the picture of which this layer is a part
65 const uint32_t fPictureID; 75 const uint32_t fPictureID;
66 // fLayerID is the index of this layer in the picture (one of 0 .. #laye rs). 76 // The range of commands in the picture this layer represents
67 const int fLayerID; 77 const int fStart;
78 const int fStop;
79 // The CTM applied to this layer in the picture
80 const SkMatrix fCTM;
68 }; 81 };
69 82
70 static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; } 83 static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; }
71 static uint32_t Hash(const Key& key) { 84 static uint32_t Hash(const Key& key) {
72 return SkChecksum::Mix((key.getPictureID() << 16) | key.getLayerID()); 85 return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&key), size of(Key));
bsalomon 2014/07/30 21:23:09 assert that key is tightly packed? what if matrix
robertphillips 2014/07/31 13:23:36 I have added the assert and forced the computation
73 } 86 }
74 87
75 // GrCachedLayer proper 88 // GrCachedLayer proper
76 GrCachedLayer(uint32_t pictureID, int layerID) 89 GrCachedLayer(uint32_t pictureID, int start, int stop, const SkMatrix& ctm)
77 : fKey(pictureID, layerID) 90 : fKey(pictureID, start, stop, ctm)
78 , fTexture(NULL) 91 , fTexture(NULL)
79 , fRect(GrIRect16::MakeEmpty()) 92 , fRect(GrIRect16::MakeEmpty())
80 , fPlot(NULL) 93 , fPlot(NULL)
81 , fLocked(false) { 94 , fLocked(false) {
82 SkASSERT(SK_InvalidGenID != pictureID && layerID >= 0); 95 SkASSERT(SK_InvalidGenID != pictureID && layerID >= 0);
83 } 96 }
84 97
85 ~GrCachedLayer() { 98 ~GrCachedLayer() {
86 SkSafeUnref(fTexture); 99 SkSafeUnref(fTexture);
87 } 100 }
88 101
89 uint32_t pictureID() const { return fKey.getPictureID(); } 102 uint32_t pictureID() const { return fKey.pictureID(); }
90 int layerID() const { return fKey.getLayerID(); } 103 int start() const { return fKey.start(); }
104 int stop() const { return fKey.stop(); }
105 const SkMatrix& ctm() const { return fKey.ctm(); }
91 106
92 void setTexture(GrTexture* texture, const GrIRect16& rect) { 107 void setTexture(GrTexture* texture, const GrIRect16& rect) {
93 SkRefCnt_SafeAssign(fTexture, texture); 108 SkRefCnt_SafeAssign(fTexture, texture);
94 fRect = rect; 109 fRect = rect;
95 } 110 }
96 GrTexture* texture() { return fTexture; } 111 GrTexture* texture() { return fTexture; }
97 const GrIRect16& rect() const { return fRect; } 112 const GrIRect16& rect() const { return fRect; }
98 113
99 void setPlot(GrPlot* plot) { 114 void setPlot(GrPlot* plot) {
100 SkASSERT(NULL == fPlot); 115 SkASSERT(NULL == fPlot);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
144 // classes. 159 // classes.
145 class GrLayerCache { 160 class GrLayerCache {
146 public: 161 public:
147 GrLayerCache(GrContext*); 162 GrLayerCache(GrContext*);
148 ~GrLayerCache(); 163 ~GrLayerCache();
149 164
150 // As a cache, the GrLayerCache can be ordered to free up all its cached 165 // As a cache, the GrLayerCache can be ordered to free up all its cached
151 // elements by the GrContext 166 // elements by the GrContext
152 void freeAll(); 167 void freeAll();
153 168
154 GrCachedLayer* findLayer(const SkPicture* picture, int layerID); 169 GrCachedLayer* findLayer(const SkPicture* picture, int start, int stop, cons t SkMatrix& ctm);
155 GrCachedLayer* findLayerOrCreate(const SkPicture* picture, int layerID); 170 GrCachedLayer* findLayerOrCreate(const SkPicture* picture,
171 int start, int stop,
172 const SkMatrix& ctm);
156 173
157 // Inform the cache that layer's cached image is now required. Return true 174 // Inform the cache that layer's cached image is now required. Return true
158 // if it was found in the ResourceCache and doesn't need to be regenerated. 175 // if it was found in the ResourceCache and doesn't need to be regenerated.
159 // If false is returned the caller should (re)render the layer into the 176 // If false is returned the caller should (re)render the layer into the
160 // newly acquired texture. 177 // newly acquired texture.
161 bool lock(GrCachedLayer* layer, const GrTextureDesc& desc); 178 bool lock(GrCachedLayer* layer, const GrTextureDesc& desc);
162 179
163 // Inform the cache that layer's cached image is not currently required 180 // Inform the cache that layer's cached image is not currently required
164 void unlock(GrCachedLayer* layer); 181 void unlock(GrCachedLayer* layer);
165 182
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 216
200 // This implements a plot-centric locking mechanism (since the atlas 217 // This implements a plot-centric locking mechanism (since the atlas
201 // backing texture is always locked). Each layer that is locked (i.e., 218 // backing texture is always locked). Each layer that is locked (i.e.,
202 // needed for the current rendering) in a plot increments the plot lock 219 // needed for the current rendering) in a plot increments the plot lock
203 // count for that plot. Similarly, once a rendering is complete all the 220 // count for that plot. Similarly, once a rendering is complete all the
204 // layers used in it decrement the lock count for the used plots. 221 // layers used in it decrement the lock count for the used plots.
205 // Plots with a 0 lock count are open for recycling/purging. 222 // Plots with a 0 lock count are open for recycling/purging.
206 int fPlotLocks[kNumPlotsX * kNumPlotsY]; 223 int fPlotLocks[kNumPlotsX * kNumPlotsY];
207 224
208 void initAtlas(); 225 void initAtlas();
209 GrCachedLayer* createLayer(const SkPicture* picture, int layerID); 226 GrCachedLayer* createLayer(const SkPicture* picture, int start, int stop, co nst SkMatrix& ctm);
210 227
211 // Remove all the layers (and unlock any resources) associated with 'picture ID' 228 // Remove all the layers (and unlock any resources) associated with 'picture ID'
212 void purge(uint32_t pictureID); 229 void purge(uint32_t pictureID);
213 230
214 static bool PlausiblyAtlasable(int width, int height) { 231 static bool PlausiblyAtlasable(int width, int height) {
215 return width <= kPlotWidth && height <= kPlotHeight; 232 return width <= kPlotWidth && height <= kPlotHeight;
216 } 233 }
217 234
218 // Try to find a purgeable plot and clear it out. Return true if a plot 235 // Try to find a purgeable plot and clear it out. Return true if a plot
219 // was purged; false otherwise. 236 // was purged; false otherwise.
220 bool purgePlot(); 237 bool purgePlot();
221 238
222 // for testing 239 // for testing
223 friend class TestingAccess; 240 friend class TestingAccess;
224 int numLayers() const { return fLayerHash.count(); } 241 int numLayers() const { return fLayerHash.count(); }
225 }; 242 };
226 243
227 #endif 244 #endif
OLDNEW
« no previous file with comments | « no previous file | src/gpu/GrLayerCache.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698