| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef SkTextureCompressor_Blitter_DEFINED | 8 #ifndef SkTextureCompressor_Blitter_DEFINED |
| 9 #define SkTextureCompressor_Blitter_DEFINED | 9 #define SkTextureCompressor_Blitter_DEFINED |
| 10 | 10 |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 48 // | 48 // |
| 49 // // The function used to compress an A8 block. The layout of the | 49 // // The function used to compress an A8 block. The layout of the |
| 50 // // block is also expected to be in row-major order. | 50 // // block is also expected to be in row-major order. |
| 51 // static void CompressA8Horizontal(uint8_t* dst, const uint8_t* src, int sr
cRowBytes); | 51 // static void CompressA8Horizontal(uint8_t* dst, const uint8_t* src, int sr
cRowBytes); |
| 52 // | 52 // |
| 53 #if PEDANTIC_BLIT_RECT | 53 #if PEDANTIC_BLIT_RECT |
| 54 // // The function used to update an already compressed block. This will | 54 // // The function used to update an already compressed block. This will |
| 55 // // most likely be implementation dependent. The mask variable will have | 55 // // most likely be implementation dependent. The mask variable will have |
| 56 // // 0xFF in positions where the block should be updated and 0 in positions | 56 // // 0xFF in positions where the block should be updated and 0 in positions |
| 57 // // where it shouldn't. src contains an uncompressed buffer of pixels. | 57 // // where it shouldn't. src contains an uncompressed buffer of pixels. |
| 58 // static void UpdateBlock(uint8_t* dst, const uint8_t* src, int srcRowBytes
, | 58 // static void UpdateBlock(uint8_t* dst, const uint8_t* src, int srcRowBytes
, |
| 59 // const uint8_t* mask); | 59 // const uint8_t* mask); |
| 60 #endif | 60 #endif |
| 61 // }; | 61 // }; |
| 62 template<int BlockDim, int EncodedBlockSize, typename CompressorType> | 62 template<int BlockDim, int EncodedBlockSize, typename CompressorType> |
| 63 class SkTCompressedAlphaBlitter : public SkBlitter { | 63 class SkTCompressedAlphaBlitter : public SkBlitter { |
| 64 public: | 64 public: |
| 65 SkTCompressedAlphaBlitter(int width, int height, void *compressedBuffer) | 65 SkTCompressedAlphaBlitter(int width, int height, void *compressedBuffer) |
| 66 // 0x7FFE is one minus the largest positive 16-bit int. We use it for | 66 // 0x7FFE is one minus the largest positive 16-bit int. We use it for |
| 67 // debugging to make sure that we're properly setting the nextX distance | 67 // debugging to make sure that we're properly setting the nextX distance |
| 68 // in flushRuns(). | 68 // in flushRuns(). |
| 69 #ifdef SK_DEBUG | 69 #ifdef SK_DEBUG |
| 70 : fCalledOnceWithNonzeroY(false) | 70 : fCalledOnceWithNonzeroY(false) |
| 71 , fBlitMaskCalled(false), | 71 , fBlitMaskCalled(false), |
| 72 #else | 72 #else |
| 73 : | 73 : |
| 74 #endif | 74 #endif |
| 75 kLongestRun(0x7FFE), kZeroAlpha(0) | 75 kLongestRun(0x7FFE), kZeroAlpha(0) |
| 76 , fNextRun(0) | 76 , fNextRun(0) |
| 77 , fWidth(width) | 77 , fWidth(width) |
| 78 , fHeight(height) | 78 , fHeight(height) |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 132 fBufferedRuns[fNextRun].fRuns = runs; | 132 fBufferedRuns[fNextRun].fRuns = runs; |
| 133 fBufferedRuns[fNextRun].fX = x; | 133 fBufferedRuns[fNextRun].fX = x; |
| 134 fBufferedRuns[fNextRun].fY = y; | 134 fBufferedRuns[fNextRun].fY = y; |
| 135 | 135 |
| 136 // If we've output a block of scanlines in a row that don't violate our | 136 // If we've output a block of scanlines in a row that don't violate our |
| 137 // assumptions, then it's time to flush them... | 137 // assumptions, then it's time to flush them... |
| 138 if (BlockDim == ++fNextRun) { | 138 if (BlockDim == ++fNextRun) { |
| 139 this->flushRuns(); | 139 this->flushRuns(); |
| 140 } | 140 } |
| 141 } | 141 } |
| 142 | 142 |
| 143 // Blit a vertical run of pixels with a constant alpha value. | 143 // Blit a vertical run of pixels with a constant alpha value. |
| 144 void blitV(int x, int y, int height, SkAlpha alpha) override { | 144 void blitV(int x, int y, int height, SkAlpha alpha) override { |
| 145 // This function is currently not implemented. It is not explicitly | 145 // This function is currently not implemented. It is not explicitly |
| 146 // required by the contract, but if at some time a code path runs into | 146 // required by the contract, but if at some time a code path runs into |
| 147 // this function (which is entirely possible), it needs to be implemente
d. | 147 // this function (which is entirely possible), it needs to be implemente
d. |
| 148 // | 148 // |
| 149 // TODO (krajcevski): | 149 // TODO (krajcevski): |
| 150 // This function will be most easily implemented in one of two ways: | 150 // This function will be most easily implemented in one of two ways: |
| 151 // 1. Buffer each vertical column value and then construct a list | 151 // 1. Buffer each vertical column value and then construct a list |
| 152 // of alpha values and output all of the blocks at once. This only | 152 // of alpha values and output all of the blocks at once. This only |
| (...skipping 11 matching lines...) Expand all Loading... |
| 164 bool fCalledOnceWithNonzeroY; | 164 bool fCalledOnceWithNonzeroY; |
| 165 #endif | 165 #endif |
| 166 void blitRect(int x, int y, int width, int height) override { | 166 void blitRect(int x, int y, int width, int height) override { |
| 167 | 167 |
| 168 // Assumptions: | 168 // Assumptions: |
| 169 SkASSERT(0 == x); | 169 SkASSERT(0 == x); |
| 170 SkASSERT(width <= fWidth); | 170 SkASSERT(width <= fWidth); |
| 171 | 171 |
| 172 // Make sure that we're only ever bracketing calls to blitAntiH. | 172 // Make sure that we're only ever bracketing calls to blitAntiH. |
| 173 SkASSERT((0 == y) || (!fCalledOnceWithNonzeroY && (fCalledOnceWithNonzer
oY = true))); | 173 SkASSERT((0 == y) || (!fCalledOnceWithNonzeroY && (fCalledOnceWithNonzer
oY = true))); |
| 174 | 174 |
| 175 #if !(PEDANTIC_BLIT_RECT) | 175 #if !(PEDANTIC_BLIT_RECT) |
| 176 for (int i = 0; i < height; ++i) { | 176 for (int i = 0; i < height; ++i) { |
| 177 const SkAlpha kFullAlpha = 0xFF; | 177 const SkAlpha kFullAlpha = 0xFF; |
| 178 this->blitAntiH(x, y+i, &kFullAlpha, &kLongestRun); | 178 this->blitAntiH(x, y+i, &kFullAlpha, &kLongestRun); |
| 179 } | 179 } |
| 180 #else | 180 #else |
| 181 const int startBlockX = (x / BlockDim) * BlockDim; | 181 const int startBlockX = (x / BlockDim) * BlockDim; |
| 182 const int startBlockY = (y / BlockDim) * BlockDim; | 182 const int startBlockY = (y / BlockDim) * BlockDim; |
| 183 | 183 |
| 184 const int endBlockX = ((x + width) / BlockDim) * BlockDim; | 184 const int endBlockX = ((x + width) / BlockDim) * BlockDim; |
| 185 const int endBlockY = ((y + height) / BlockDim) * BlockDim; | 185 const int endBlockY = ((y + height) / BlockDim) * BlockDim; |
| 186 | 186 |
| 187 // If start and end are the same, then we only need to update a single b
lock... | 187 // If start and end are the same, then we only need to update a single b
lock... |
| 188 if (startBlockY == endBlockY && startBlockX == endBlockX) { | 188 if (startBlockY == endBlockY && startBlockX == endBlockX) { |
| 189 uint8_t mask[BlockDim*BlockDim]; | 189 uint8_t mask[BlockDim*BlockDim]; |
| 190 memset(mask, 0, sizeof(mask)); | 190 memset(mask, 0, sizeof(mask)); |
| 191 | 191 |
| 192 const int xoff = x - startBlockX; | 192 const int xoff = x - startBlockX; |
| 193 SkASSERT((xoff + width) <= BlockDim); | 193 SkASSERT((xoff + width) <= BlockDim); |
| 194 | 194 |
| 195 const int yoff = y - startBlockY; | 195 const int yoff = y - startBlockY; |
| 196 SkASSERT((yoff + height) <= BlockDim); | 196 SkASSERT((yoff + height) <= BlockDim); |
| 197 | 197 |
| 198 for (int j = 0; j < height; ++j) { | 198 for (int j = 0; j < height; ++j) { |
| 199 memset(mask + (j + yoff)*BlockDim + xoff, 0xFF, width); | 199 memset(mask + (j + yoff)*BlockDim + xoff, 0xFF, width); |
| 200 } | 200 } |
| 201 | 201 |
| 202 uint8_t* dst = this->getBlock(startBlockX, startBlockY); | 202 uint8_t* dst = this->getBlock(startBlockX, startBlockY); |
| 203 CompressorType::UpdateBlock(dst, mask, BlockDim, mask); | 203 CompressorType::UpdateBlock(dst, mask, BlockDim, mask); |
| 204 | 204 |
| 205 // If start and end are the same in the y dimension, then we can freely
update an | 205 // If start and end are the same in the y dimension, then we can freely
update an |
| 206 // entire row of blocks... | 206 // entire row of blocks... |
| 207 } else if (startBlockY == endBlockY) { | 207 } else if (startBlockY == endBlockY) { |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 486 // c1 c2 c3 c4 | 486 // c1 c2 c3 c4 |
| 487 // ---------------------------------------------------------------------
-- | 487 // ---------------------------------------------------------------------
-- |
| 488 // ... | | | | | ----> fBufferedRuns[0] | 488 // ... | | | | | ----> fBufferedRuns[0] |
| 489 // ---------------------------------------------------------------------
-- | 489 // ---------------------------------------------------------------------
-- |
| 490 // ... | | | | | ----> fBufferedRuns[1] | 490 // ... | | | | | ----> fBufferedRuns[1] |
| 491 // ---------------------------------------------------------------------
-- | 491 // ---------------------------------------------------------------------
-- |
| 492 // ... | | | | | ----> fBufferedRuns[2] | 492 // ... | | | | | ----> fBufferedRuns[2] |
| 493 // ---------------------------------------------------------------------
-- | 493 // ---------------------------------------------------------------------
-- |
| 494 // ... | | | | | ----> fBufferedRuns[3] | 494 // ... | | | | | ----> fBufferedRuns[3] |
| 495 // ---------------------------------------------------------------------
-- | 495 // ---------------------------------------------------------------------
-- |
| 496 // | 496 // |
| 497 // curX -- the macro X value that we've gotten to. | 497 // curX -- the macro X value that we've gotten to. |
| 498 // c[BlockDim] -- the buffers that represent the columns of the current
block | 498 // c[BlockDim] -- the buffers that represent the columns of the current
block |
| 499 // that we're operating on | 499 // that we're operating on |
| 500 // curAlphaColumn -- buffer containing the column of alpha values from f
BufferedRuns. | 500 // curAlphaColumn -- buffer containing the column of alpha values from f
BufferedRuns. |
| 501 // nextX -- for each run, the next point at which we need to update curA
lphaColumn | 501 // nextX -- for each run, the next point at which we need to update curA
lphaColumn |
| 502 // after the value of curX. | 502 // after the value of curX. |
| 503 // finalX -- the minimum of all the nextX values. | 503 // finalX -- the minimum of all the nextX values. |
| 504 // | 504 // |
| 505 // curX advances to finalX outputting any blocks that it passes along | 505 // curX advances to finalX outputting any blocks that it passes along |
| 506 // the way. Since finalX will not change when we reach the end of a | 506 // the way. Since finalX will not change when we reach the end of a |
| (...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 724 mask, BlockDim, mask); | 724 mask, BlockDim, mask); |
| 725 } | 725 } |
| 726 } | 726 } |
| 727 #endif // PEDANTIC_BLIT_RECT | 727 #endif // PEDANTIC_BLIT_RECT |
| 728 | 728 |
| 729 }; | 729 }; |
| 730 | 730 |
| 731 } // namespace SkTextureCompressor | 731 } // namespace SkTextureCompressor |
| 732 | 732 |
| 733 #endif // SkTextureCompressor_Blitter_DEFINED | 733 #endif // SkTextureCompressor_Blitter_DEFINED |
| OLD | NEW |