Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #if SK_SUPPORT_GPU | 8 #if SK_SUPPORT_GPU |
| 9 #include "GrLayerHoister.h" | 9 #include "GrLayerHoister.h" |
| 10 #include "GrRecordReplaceDraw.h" | 10 #include "GrRecordReplaceDraw.h" |
| 11 #endif | 11 #endif |
| 12 | 12 |
| 13 #include "SkCanvas.h" | 13 #include "SkCanvas.h" |
| 14 #include "SkMultiPictureDraw.h" | 14 #include "SkMultiPictureDraw.h" |
| 15 #include "SkPicture.h" | 15 #include "SkPicture.h" |
| 16 | 16 |
| 17 void SkMultiPictureDraw::DrawData::init(SkCanvas* canvas, const SkPicture* pictu re, | |
| 18 const SkMatrix* matrix, const SkPaint* p aint) { | |
| 19 fPicture = SkRef(picture); | |
| 20 fCanvas = SkRef(canvas); | |
| 21 if (matrix) { | |
| 22 fMatrix = *matrix; | |
| 23 } else { | |
| 24 fMatrix.setIdentity(); | |
| 25 } | |
| 26 if (paint) { | |
| 27 fPaint = SkNEW_ARGS(SkPaint, (*paint)); | |
| 28 } else { | |
| 29 fPaint = NULL; | |
| 30 } | |
| 31 } | |
| 32 | |
| 33 void SkMultiPictureDraw::DrawData::Reset(SkTDArray<DrawData>& data) { | |
| 34 for (int i = 0; i < data.count(); ++i) { | |
| 35 data[i].fPicture->unref(); | |
| 36 data[i].fCanvas->unref(); | |
| 37 SkDELETE(data[i].fPaint); | |
| 38 } | |
| 39 data.rewind(); | |
| 40 } | |
| 41 | |
| 17 SkMultiPictureDraw::SkMultiPictureDraw(int reserve) { | 42 SkMultiPictureDraw::SkMultiPictureDraw(int reserve) { |
| 18 if (reserve > 0) { | 43 if (reserve > 0) { |
| 19 fDrawData.setReserve(reserve); | 44 fGPUDrawData.setReserve(reserve); |
| 45 fThreadSafeDrawData.setReserve(reserve); | |
| 20 } | 46 } |
| 21 } | 47 } |
| 22 | 48 |
| 23 void SkMultiPictureDraw::reset() { | 49 void SkMultiPictureDraw::reset() { |
|
robertphillips
2014/10/28 20:44:58
DrawData::Reset(fGPUDrawData); ?
| |
| 24 for (int i = 0; i < fDrawData.count(); ++i) { | 50 fGPUDrawData.reset(); |
| 25 fDrawData[i].picture->unref(); | 51 fThreadSafeDrawData.reset(); |
| 26 fDrawData[i].canvas->unref(); | |
| 27 SkDELETE(fDrawData[i].paint); | |
| 28 } | |
| 29 | |
| 30 fDrawData.rewind(); | |
| 31 } | 52 } |
| 32 | 53 |
| 33 void SkMultiPictureDraw::add(SkCanvas* canvas, | 54 void SkMultiPictureDraw::add(SkCanvas* canvas, |
| 34 const SkPicture* picture, | 55 const SkPicture* picture, |
| 35 const SkMatrix* matrix, | 56 const SkMatrix* matrix, |
| 36 const SkPaint* paint) { | 57 const SkPaint* paint) { |
| 37 if (NULL == canvas || NULL == picture) { | 58 if (NULL == canvas || NULL == picture) { |
| 38 SkDEBUGFAIL("parameters to SkMultiPictureDraw::add should be non-NULL"); | 59 SkDEBUGFAIL("parameters to SkMultiPictureDraw::add should be non-NULL"); |
| 39 return; | 60 return; |
| 40 } | 61 } |
| 41 | 62 |
| 42 DrawData* data = fDrawData.append(); | 63 SkTDArray<DrawData>& array = canvas->getGrContext() ? fGPUDrawData : fThread SafeDrawData; |
| 43 | 64 array.append()->init(canvas, picture, matrix, paint); |
| 44 data->picture = SkRef(picture); | |
| 45 data->canvas = SkRef(canvas); | |
| 46 if (matrix) { | |
| 47 data->matrix = *matrix; | |
| 48 } else { | |
| 49 data->matrix.setIdentity(); | |
| 50 } | |
| 51 if (paint) { | |
| 52 data->paint = SkNEW_ARGS(SkPaint, (*paint)); | |
| 53 } else { | |
| 54 data->paint = NULL; | |
| 55 } | |
| 56 } | 65 } |
| 57 | 66 |
| 58 #undef SK_IGNORE_GPU_LAYER_HOISTING | 67 #undef SK_IGNORE_GPU_LAYER_HOISTING |
| 59 #define SK_IGNORE_GPU_LAYER_HOISTING 1 | 68 #define SK_IGNORE_GPU_LAYER_HOISTING 1 |
| 60 | 69 |
| 70 #include "SkTaskGroup.h" | |
| 71 | |
| 72 struct PictureDrawRunnable : public SkRunnable { | |
| 73 SkCanvas* fCanvas; | |
| 74 const SkPicture* fPicture; | |
| 75 const SkMatrix* fMatrix; | |
| 76 const SkPaint* fPaint; | |
| 77 | |
| 78 PictureDrawRunnable* init(SkCanvas* canvas, const SkPicture* picture, const SkMatrix* matrix, | |
| 79 const SkPaint* paint) { | |
| 80 // no need to ref/copy these, as the caller ensures they all survive our task. | |
| 81 fCanvas = canvas; | |
| 82 fPicture = picture; | |
| 83 fMatrix = matrix; | |
| 84 fPaint = paint; | |
| 85 return this; | |
| 86 } | |
| 87 | |
| 88 virtual void run() SK_OVERRIDE { | |
| 89 fCanvas->drawPicture(fPicture, fMatrix, fPaint); | |
| 90 } | |
| 91 }; | |
| 92 | |
| 61 void SkMultiPictureDraw::draw() { | 93 void SkMultiPictureDraw::draw() { |
| 94 SkTaskGroup group; | |
| 95 | |
| 96 // Queue up the non-gpu tasks first, so they can execute in parallel while w e then | |
| 97 // handle the gpu tasks. | |
| 98 if (fThreadSafeDrawData.count() > 0) { | |
| 99 const int count = fThreadSafeDrawData.count(); | |
| 100 SkAutoSTArray<32, PictureDrawRunnable> pdr(count); | |
| 101 for (int i = 0; i < count; ++i) { | |
| 102 const DrawData& data = fThreadSafeDrawData[i]; | |
| 103 group.add(pdr[i].init(data.fCanvas, data.fPicture, &data.fMatrix, da ta.fPaint)); | |
| 104 } | |
| 105 } | |
| 106 | |
| 107 // Enter the GPU-only section | |
| 108 | |
| 109 const int count = fGPUDrawData.count(); | |
| 110 if (0 == count) { | |
|
robertphillips
2014/10/28 20:44:58
add:
group.wait();
this->reset();
to fulfill con
| |
| 111 return; | |
| 112 } | |
| 62 | 113 |
| 63 #ifndef SK_IGNORE_GPU_LAYER_HOISTING | 114 #ifndef SK_IGNORE_GPU_LAYER_HOISTING |
| 64 GrContext* context = NULL; | 115 GrContext* context = fGPUDrawData[0].fCanvas->getGrContext(); |
| 116 SkASSERT(context); | |
| 65 | 117 |
| 66 // Start by collecting all the layers that are going to be atlased and rende r | 118 // Start by collecting all the layers that are going to be atlased and rende r |
| 67 // them (if necessary). Hoisting the free floating layers is deferred until | 119 // them (if necessary). Hoisting the free floating layers is deferred until |
| 68 // drawing the canvas that requires them. | 120 // drawing the canvas that requires them. |
| 69 SkTDArray<GrHoistedLayer> atlasedNeedRendering, atlasedRecycled; | 121 SkTDArray<GrHoistedLayer> atlasedNeedRendering, atlasedRecycled; |
| 70 | 122 |
| 71 for (int i = 0; i < fDrawData.count(); ++i) { | 123 for (int i = 0; i < count; ++i) { |
| 72 if (fDrawData[i].canvas->getGrContext() && | 124 const DrawData& data = fGPUDrawData[i]; |
| 73 !fDrawData[i].paint && fDrawData[i].matrix.isIdentity()) { | 125 // we only expect 1 context for all the canvases |
| 74 SkASSERT(NULL == context || context == fDrawData[i].canvas->getGrCon text()); | 126 SkASSERT(data.canvas->getGrContext() == context); |
| 75 context = fDrawData[i].canvas->getGrContext(); | |
| 76 | 127 |
| 128 if (!data.fPaint && data.fMatrix.isIdentity()) { | |
| 77 // TODO: this path always tries to optimize pictures. Should we | 129 // TODO: this path always tries to optimize pictures. Should we |
| 78 // switch to this API approach (vs. SkCanvas::EXPERIMENTAL_optimize) ? | 130 // switch to this API approach (vs. SkCanvas::EXPERIMENTAL_optimize) ? |
| 79 fDrawData[i].canvas->EXPERIMENTAL_optimize(fDrawData[i].picture); | 131 data.fCanvas->EXPERIMENTAL_optimize(data.fPicture); |
| 80 | 132 |
| 81 SkRect clipBounds; | 133 SkRect clipBounds; |
| 82 if (!fDrawData[i].canvas->getClipBounds(&clipBounds)) { | 134 if (!data.fCanvas->getClipBounds(&clipBounds)) { |
| 83 continue; | 135 continue; |
| 84 } | 136 } |
| 85 | 137 |
| 86 // TODO: sorting the cacheable layers from smallest to largest | 138 // TODO: sorting the cacheable layers from smallest to largest |
| 87 // would improve the packing and reduce the number of swaps | 139 // would improve the packing and reduce the number of swaps |
| 88 // TODO: another optimization would be to make a first pass to | 140 // TODO: another optimization would be to make a first pass to |
| 89 // lock any required layer that is already in the atlas | 141 // lock any required layer that is already in the atlas |
| 90 GrLayerHoister::FindLayersToAtlas(context, fDrawData[i].picture, | 142 GrLayerHoister::FindLayersToAtlas(context, data.fPicture, |
| 91 clipBounds, | 143 clipBounds, |
| 92 &atlasedNeedRendering, &atlasedRec ycled); | 144 &atlasedNeedRendering, &atlasedRec ycled); |
| 93 } | 145 } |
| 94 } | 146 } |
| 95 | 147 |
| 96 if (NULL != context) { | 148 GrLayerHoister::DrawLayersToAtlas(context, atlasedNeedRendering); |
| 97 GrLayerHoister::DrawLayersToAtlas(context, atlasedNeedRendering); | |
| 98 } | |
| 99 | 149 |
| 100 SkTDArray<GrHoistedLayer> needRendering, recycled; | 150 SkTDArray<GrHoistedLayer> needRendering, recycled; |
| 101 #endif | 151 #endif |
| 102 | 152 |
| 103 for (int i = 0; i < fDrawData.count(); ++i) { | 153 for (int i = 0; i < count; ++i) { |
| 154 const DrawData& data = fGPUDrawData[i]; | |
| 155 SkCanvas* canvas = data.fCanvas; | |
| 156 const SkPicture* picture = data.fPicture; | |
| 157 | |
| 104 #ifndef SK_IGNORE_GPU_LAYER_HOISTING | 158 #ifndef SK_IGNORE_GPU_LAYER_HOISTING |
| 105 if (fDrawData[i].canvas->getGrContext() && | 159 if (!data.fPaint && data.fMatrix.isIdentity()) { |
| 106 !fDrawData[i].paint && fDrawData[i].matrix.isIdentity()) { | |
| 107 | 160 |
| 108 SkRect clipBounds; | 161 SkRect clipBounds; |
| 109 if (!fDrawData[i].canvas->getClipBounds(&clipBounds)) { | 162 if (!canvas->getClipBounds(&clipBounds)) { |
| 110 continue; | 163 continue; |
| 111 } | 164 } |
| 112 | 165 |
| 113 // Find the layers required by this canvas. It will return atlased | 166 // Find the layers required by this canvas. It will return atlased |
| 114 // layers in the 'recycled' list since they have already been drawn. | 167 // layers in the 'recycled' list since they have already been drawn. |
| 115 GrLayerHoister::FindLayersToHoist(context, fDrawData[i].picture, | 168 GrLayerHoister::FindLayersToHoist(context, picture, |
| 116 clipBounds, &needRendering, &recyc led); | 169 clipBounds, &needRendering, &recyc led); |
| 117 | 170 |
| 118 GrLayerHoister::DrawLayers(context, needRendering); | 171 GrLayerHoister::DrawLayers(context, needRendering); |
| 119 | 172 |
| 120 GrReplacements replacements; | 173 GrReplacements replacements; |
| 121 | 174 |
| 122 GrLayerHoister::ConvertLayersToReplacements(needRendering, &replacem ents); | 175 GrLayerHoister::ConvertLayersToReplacements(needRendering, &replacem ents); |
| 123 GrLayerHoister::ConvertLayersToReplacements(recycled, &replacements) ; | 176 GrLayerHoister::ConvertLayersToReplacements(recycled, &replacements) ; |
| 124 | 177 |
| 125 const SkMatrix initialMatrix = fDrawData[i].canvas->getTotalMatrix() ; | 178 const SkMatrix initialMatrix = canvas->getTotalMatrix(); |
| 126 | 179 |
| 127 // Render the entire picture using new layers | 180 // Render the entire picture using new layers |
| 128 GrRecordReplaceDraw(fDrawData[i].picture, fDrawData[i].canvas, | 181 GrRecordReplaceDraw(picture, canvas, &replacements, initialMatrix, N ULL); |
| 129 &replacements, initialMatrix, NULL); | |
| 130 | 182 |
| 131 GrLayerHoister::UnlockLayers(context, needRendering); | 183 GrLayerHoister::UnlockLayers(context, needRendering); |
| 132 GrLayerHoister::UnlockLayers(context, recycled); | 184 GrLayerHoister::UnlockLayers(context, recycled); |
| 133 | 185 |
| 134 needRendering.rewind(); | 186 needRendering.rewind(); |
| 135 recycled.rewind(); | 187 recycled.rewind(); |
| 136 } else | 188 } else |
| 137 #endif | 189 #endif |
| 138 { | 190 { |
| 139 fDrawData[i].canvas->drawPicture(fDrawData[i].picture, | 191 canvas->drawPicture(picture, &data.fMatrix, data.fPaint); |
| 140 &fDrawData[i].matrix, | |
| 141 fDrawData[i].paint); | |
| 142 } | 192 } |
| 143 } | 193 } |
| 144 | 194 |
| 145 #ifndef SK_IGNORE_GPU_LAYER_HOISTING | 195 #ifndef SK_IGNORE_GPU_LAYER_HOISTING |
| 146 if (NULL != context) { | 196 GrLayerHoister::UnlockLayers(context, atlasedNeedRendering); |
| 147 GrLayerHoister::UnlockLayers(context, atlasedNeedRendering); | 197 GrLayerHoister::UnlockLayers(context, atlasedRecycled); |
| 148 GrLayerHoister::UnlockLayers(context, atlasedRecycled); | |
| 149 } | |
| 150 #endif | 198 #endif |
| 151 | 199 |
| 200 // we need to explicitly wait, as we will tear-down our objects (DrawData) w hen we call reset() | |
| 201 // and our tasks just have shallow pointers to those. Otherwise, we could ju st rely on group's | |
| 202 // destructor to perform the wait. | |
| 203 group.wait(); | |
| 152 this->reset(); | 204 this->reset(); |
| 153 } | 205 } |
| 154 | 206 |
| OLD | NEW |