OLD | NEW |
---|---|
(Empty) | |
1 /* | |
2 * Copyright 2014 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
robertphillips
2014/07/28 17:51:36
This needs a 'T' in its name to let everyone know
krajcevski
2014/07/28 18:18:31
Done.
| |
8 #ifndef SkTextureCompressor_Blitter_DEFINED | |
9 #define SkTextureCompressor_Blitter_DEFINED | |
10 | |
11 #include "SkTypes.h" | |
12 #include "SkBlitter.h" | |
13 | |
14 namespace SkTextureCompressor { | |
15 | |
16 // The function used to compress an A8 block. This function is expected to be | |
17 // used as a template argument to SkCompressedAlphaBlitter. The layout of the | |
18 // block is also expected to be in column-major order. | |
19 typedef void (*CompressA8Proc)(uint8_t* dst, const uint8_t block[]); | |
20 | |
21 // This class implements a blitter that blits directly into a buffer that will | |
22 // be used as an compressed alpha texture. We compute this buffer by | |
23 // buffering scan lines and then outputting them all at once. The number of | |
24 // scan lines buffered is controlled by kBlockSize | |
25 template<int kBlockSize, int kEncodedBlockSize, CompressA8Proc kCompressionProc> | |
26 class SkCompressedAlphaBlitter : public SkBlitter { | |
27 public: | |
28 SkCompressedAlphaBlitter(int width, int height, void *compressedBuffer) | |
29 // 0x7FFE is one minus the largest positive 16-bit int. We use it for | |
30 // debugging to make sure that we're properly setting the nextX distance | |
31 // in flushRuns(). | |
32 : kLongestRun(0x7FFE), kZeroAlpha(0) | |
33 , fNextRun(0) | |
34 , fWidth(width) | |
35 , fHeight(height) | |
36 , fBuffer(compressedBuffer) | |
37 { | |
38 SkASSERT((width % kBlockSize) == 0); | |
39 SkASSERT((height % kBlockSize) == 0); | |
40 } | |
41 | |
42 virtual ~SkCompressedAlphaBlitter() { this->flushRuns(); } | |
43 | |
44 // Blit a horizontal run of one or more pixels. | |
45 virtual void blitH(int x, int y, int width) SK_OVERRIDE { | |
46 // This function is intended to be called from any standard RGB | |
47 // buffer, so we should never encounter it. However, if some code | |
48 // path does end up here, then this needs to be investigated. | |
49 SkFAIL("Not implemented!"); | |
50 } | |
51 | |
52 // Blit a horizontal run of antialiased pixels; runs[] is a *sparse* | |
53 // zero-terminated run-length encoding of spans of constant alpha values. | |
54 virtual void blitAntiH(int x, int y, | |
55 const SkAlpha antialias[], | |
56 const int16_t runs[]) SK_OVERRIDE { | |
57 // Make sure that the new row to blit is either the first | |
58 // row that we're blitting, or it's exactly the next scan row | |
59 // since the last row that we blit. This is to ensure that when | |
60 // we go to flush the runs, that they are all the same four | |
61 // runs. | |
62 if (fNextRun > 0 && | |
63 ((x != fBufferedRuns[fNextRun-1].fX) || | |
64 (y-1 != fBufferedRuns[fNextRun-1].fY))) { | |
65 this->flushRuns(); | |
66 } | |
67 | |
68 // Align the rows to a block boundary. If we receive rows that | |
69 // are not on a block boundary, then fill in the preceding runs | |
70 // with zeros. We do this by producing a single RLE that says | |
71 // that we have 0x7FFE pixels of zero (0x7FFE = 32766). | |
72 const int row = y & ~3; | |
73 while ((row + fNextRun) < y) { | |
74 fBufferedRuns[fNextRun].fAlphas = &kZeroAlpha; | |
75 fBufferedRuns[fNextRun].fRuns = &kLongestRun; | |
76 fBufferedRuns[fNextRun].fX = 0; | |
77 fBufferedRuns[fNextRun].fY = row + fNextRun; | |
78 ++fNextRun; | |
79 } | |
80 | |
81 // Make sure that our assumptions aren't violated... | |
82 SkASSERT(fNextRun == (y & 3)); | |
83 SkASSERT(fNextRun == 0 || fBufferedRuns[fNextRun - 1].fY < y); | |
84 | |
85 // Set the values of the next run | |
86 fBufferedRuns[fNextRun].fAlphas = antialias; | |
87 fBufferedRuns[fNextRun].fRuns = runs; | |
88 fBufferedRuns[fNextRun].fX = x; | |
89 fBufferedRuns[fNextRun].fY = y; | |
90 | |
91 // If we've output four scanlines in a row that don't violate our | |
92 // assumptions, then it's time to flush them... | |
robertphillips
2014/07/28 17:51:36
4?
krajcevski
2014/07/28 18:18:31
Done.
| |
93 if (4 == ++fNextRun) { | |
94 this->flushRuns(); | |
95 } | |
96 } | |
97 | |
98 // Blit a vertical run of pixels with a constant alpha value. | |
99 virtual void blitV(int x, int y, int height, SkAlpha alpha) SK_OVERRIDE { | |
100 // This function is currently not implemented. It is not explicitly | |
101 // required by the contract, but if at some time a code path runs into | |
102 // this function (which is entirely possible), it needs to be implemente d. | |
103 // | |
104 // TODO (krajcevski): | |
105 // This function will be most easily implemented in one of two ways: | |
106 // 1. Buffer each vertical column value and then construct a list | |
107 // of alpha values and output all of the blocks at once. This only | |
108 // requires a write to the compressed buffer | |
109 // 2. Replace the indices of each block with the proper indices based | |
110 // on the alpha value. This requires a read and write of the compress ed | |
111 // buffer, but much less overhead. | |
112 SkFAIL("Not implemented!"); | |
113 } | |
114 | |
115 // Blit a solid rectangle one or more pixels wide. | |
116 virtual void blitRect(int x, int y, int width, int height) SK_OVERRIDE { | |
117 // Analogous to blitRow, this function is intended for RGB targets | |
118 // and should never be called by this blitter. Any calls to this functio n | |
119 // are probably a bug and should be investigated. | |
120 SkFAIL("Not implemented!"); | |
121 } | |
122 | |
123 // Blit a rectangle with one alpha-blended column on the left, | |
124 // width (zero or more) opaque pixels, and one alpha-blended column | |
125 // on the right. The result will always be at least two pixels wide. | |
126 virtual void blitAntiRect(int x, int y, int width, int height, | |
127 SkAlpha leftAlpha, SkAlpha rightAlpha) SK_OVERRIDE { | |
128 // This function is currently not implemented. It is not explicitly | |
129 // required by the contract, but if at some time a code path runs into | |
130 // this function (which is entirely possible), it needs to be implemente d. | |
131 // | |
132 // TODO (krajcevski): | |
133 // This function will be most easily implemented as follows: | |
134 // 1. If width/height are smaller than a block, then update the | |
135 // indices of the affected blocks. | |
136 // 2. If width/height are larger than a block, then construct a 9-patch | |
137 // of block encodings that represent the rectangle, and write them | |
138 // to the compressed buffer as necessary. Whether or not the blocks | |
139 // are overwritten by zeros or just their indices are updated is up | |
140 // to debate. | |
141 SkFAIL("Not implemented!"); | |
142 } | |
143 | |
144 // Blit a pattern of pixels defined by a rectangle-clipped mask; | |
145 // typically used for text. | |
146 virtual void blitMask(const SkMask&, const SkIRect& clip) SK_OVERRIDE { | |
147 // This function is currently not implemented. It is not explicitly | |
148 // required by the contract, but if at some time a code path runs into | |
149 // this function (which is entirely possible), it needs to be implemente d. | |
150 // | |
151 // TODO (krajcevski): | |
152 // This function will be most easily implemented in the same way as | |
153 // blitAntiRect above. | |
154 SkFAIL("Not implemented!"); | |
155 } | |
156 | |
157 // If the blitter just sets a single value for each pixel, return the | |
158 // bitmap it draws into, and assign value. If not, return NULL and ignore | |
159 // the value parameter. | |
160 virtual const SkBitmap* justAnOpaqueColor(uint32_t* value) SK_OVERRIDE { | |
161 return NULL; | |
162 } | |
163 | |
164 /** | |
165 * Compressed texture blitters only really work correctly if they get | |
robertphillips
2014/07/28 17:51:36
four?
krajcevski
2014/07/28 18:18:31
Done.
| |
166 * four blocks at a time. That being said, this blitter tries it's best | |
167 * to preserve semantics if blitAntiH doesn't get called in too many | |
168 * weird ways... | |
169 */ | |
170 virtual int requestRowsPreserved() const { return kBlockSize; } | |
171 | |
172 private: | |
173 static const int kPixelsPerBlock = kBlockSize * kBlockSize; | |
174 | |
175 // The longest possible run of pixels that this blitter will receive. | |
176 // This is initialized in the constructor to 0x7FFE, which is one less | |
177 // than the largest positive 16-bit integer. We make sure that it's one | |
178 // less for debugging purposes. We also don't make this variable static | |
179 // in order to make sure that we can construct a valid pointer to it. | |
180 const int16_t kLongestRun; | |
181 | |
182 // Usually used in conjunction with kLongestRun. This is initialized to | |
183 // zero. | |
184 const SkAlpha kZeroAlpha; | |
185 | |
186 // This is the information that we buffer whenever we're asked to blit | |
187 // a row with this blitter. | |
188 struct BufferedRun { | |
189 const SkAlpha* fAlphas; | |
190 const int16_t* fRuns; | |
191 int fX, fY; | |
192 } fBufferedRuns[kBlockSize]; | |
193 | |
194 // The next row [0, kBlockSize) that we need to blit. | |
195 int fNextRun; | |
196 | |
197 // The width and height of the image that we're blitting | |
198 const int fWidth; | |
199 const int fHeight; | |
200 | |
201 // The compressed buffer that we're blitting into. It is assumed that the bu ffer | |
202 // is large enough to store a compressed image of size fWidth*fHeight. | |
203 void* const fBuffer; | |
204 | |
205 // Various utility functions | |
206 int blocksWide() const { return fWidth / kBlockSize; } | |
207 int blocksTall() const { return fHeight / kBlockSize; } | |
208 int totalBlocks() const { return (fWidth * fHeight) / kPixelsPerBlock; } | |
209 | |
210 // Returns the block index for the block containing pixel (x, y). Block | |
211 // indices start at zero and proceed in raster order. | |
212 int getBlockOffset(int x, int y) const { | |
213 SkASSERT(x < fWidth); | |
214 SkASSERT(y < fHeight); | |
215 const int blockCol = x / kBlockSize; | |
216 const int blockRow = y / kBlockSize; | |
217 return blockRow * this->blocksWide() + blockCol; | |
218 } | |
219 | |
220 // Returns a pointer to the block containing pixel (x, y) | |
221 uint8_t *getBlock(int x, int y) const { | |
222 uint8_t* ptr = reinterpret_cast<uint8_t*>(fBuffer); | |
223 return ptr + kEncodedBlockSize*this->getBlockOffset(x, y); | |
224 } | |
225 | |
robertphillips
2014/07/28 17:51:37
blockColN ?
krajcevski
2014/07/28 18:18:30
Done.
| |
226 // Updates the block whose columns are stored in blockColN. curAlphai is exp ected | |
227 // to store, as an integer, the four alpha values that will be placed within each | |
228 // of the columns in the range [col, col+colsLeft). | |
229 typedef uint32_t Column[kBlockSize/4]; | |
230 typedef uint32_t Block[kBlockSize][kBlockSize/4]; | |
231 inline void updateBlockColumns(Block block, const int col, | |
232 const int colsLeft, const Column curAlphai) { | |
233 SkASSERT(NULL != block); | |
234 SkASSERT(col + colsLeft <= 4); | |
235 | |
236 for (int i = col; i < (col + colsLeft); ++i) { | |
237 memcpy(block[i], curAlphai, sizeof(Column)); | |
238 } | |
239 } | |
240 | |
241 // The following function writes the buffered runs to compressed blocks. | |
242 // If fNextRun < kBlockSize, then we fill the runs that we haven't buffered with | |
243 // the constant zero buffer. | |
244 void flushRuns() { | |
245 // If we don't have any runs, then just return. | |
246 if (0 == fNextRun) { | |
247 return; | |
248 } | |
249 | |
250 #ifndef NDEBUG | |
251 // Make sure that if we have any runs, they all match | |
252 for (int i = 1; i < fNextRun; ++i) { | |
253 SkASSERT(fBufferedRuns[i].fY == fBufferedRuns[i-1].fY + 1); | |
254 SkASSERT(fBufferedRuns[i].fX == fBufferedRuns[i-1].fX); | |
255 } | |
256 #endif | |
257 | |
robertphillips
2014/07/28 17:51:36
don't ?
krajcevski
2014/07/28 18:18:31
Done.
| |
258 // If we dont have as many runs as we have rows, fill in the remaining | |
259 // runs with constant zeros. | |
260 for (int i = fNextRun; i < kBlockSize; ++i) { | |
261 fBufferedRuns[i].fY = fBufferedRuns[0].fY + i; | |
262 fBufferedRuns[i].fX = fBufferedRuns[0].fX; | |
263 fBufferedRuns[i].fAlphas = &kZeroAlpha; | |
264 fBufferedRuns[i].fRuns = &kLongestRun; | |
265 } | |
266 | |
267 // Make sure that our assumptions aren't violated. | |
268 SkASSERT(fNextRun > 0 && fNextRun <= kBlockSize); | |
269 SkASSERT((fBufferedRuns[0].fY % kBlockSize) == 0); | |
270 | |
271 // The following logic walks kBlockSize rows at a time and outputs compr essed | |
272 // blocks to the buffer passed into the constructor. | |
273 // We do the following: | |
274 // | |
275 // c1 c2 c3 c4 | |
276 // --------------------------------------------------------------------- -- | |
277 // ... | | | | | ----> fBufferedRuns[0] | |
278 // --------------------------------------------------------------------- -- | |
279 // ... | | | | | ----> fBufferedRuns[1] | |
280 // --------------------------------------------------------------------- -- | |
281 // ... | | | | | ----> fBufferedRuns[2] | |
282 // --------------------------------------------------------------------- -- | |
283 // ... | | | | | ----> fBufferedRuns[3] | |
284 // --------------------------------------------------------------------- -- | |
285 // | |
286 // curX -- the macro X value that we've gotten to. | |
287 // c[kBlockSize] -- the buffers that represent the columns of the curren t block | |
288 // that we're operating on | |
289 // curAlphaColumn -- buffer containing the column of alpha values from f BufferedRuns. | |
290 // nextX -- for each run, the next point at which we need to update curA lphaColumn | |
291 // after the value of curX. | |
292 // finalX -- the minimum of all the nextX values. | |
293 // | |
294 // curX advances to finalX outputting any blocks that it passes along | |
295 // the way. Since finalX will not change when we reach the end of a | |
296 // run, the termination criteria will be whenever curX == finalX at the | |
297 // end of a loop. | |
298 | |
299 // Setup: | |
robertphillips
2014/07/28 17:51:37
put sk_bzero's on their own line ?
krajcevski
2014/07/28 18:18:30
Done.
| |
300 Block block; sk_bzero(block, sizeof(block)); | |
301 Column curAlphaColumn; sk_bzero(curAlphaColumn, sizeof(curAlphaColumn)); | |
302 | |
303 SkAlpha *curAlpha = reinterpret_cast<SkAlpha*>(&curAlphaColumn); | |
304 | |
305 int nextX[kBlockSize]; | |
306 for (int i = 0; i < kBlockSize; ++i) { | |
307 nextX[i] = 0x7FFFFF; | |
308 } | |
309 | |
310 uint8_t* outPtr = this->getBlock(fBufferedRuns[0].fX, fBufferedRuns[0].f Y); | |
311 | |
312 // Populate the first set of runs and figure out how far we need to | |
313 // advance on the first step | |
314 int curX = 0; | |
315 int finalX = 0xFFFFF; | |
316 for (int i = 0; i < kBlockSize; ++i) { | |
317 nextX[i] = *(fBufferedRuns[i].fRuns); | |
318 curAlpha[i] = *(fBufferedRuns[i].fAlphas); | |
319 | |
320 finalX = SkMin32(nextX[i], finalX); | |
321 } | |
322 | |
323 // Make sure that we have a valid right-bound X value | |
324 SkASSERT(finalX < 0xFFFFF); | |
325 | |
326 // Run the blitter... | |
327 while (curX != finalX) { | |
328 SkASSERT(finalX >= curX); | |
329 | |
330 // Do we need to populate the rest of the block? | |
331 if ((finalX - (kBlockSize*(curX / kBlockSize))) >= kBlockSize) { | |
332 const int col = curX % kBlockSize; | |
333 const int colsLeft = kBlockSize - col; | |
334 SkASSERT(curX + colsLeft <= finalX); | |
335 | |
336 this->updateBlockColumns(block, col, colsLeft, curAlphaColumn); | |
337 | |
338 // Write this block | |
robertphillips
2014/07/28 17:51:37
The 'k' prefix is usually reserved for comments. I
krajcevski
2014/07/28 18:18:31
Done.
| |
339 kCompressionProc(outPtr, reinterpret_cast<uint8_t*>(block)); | |
340 outPtr += kEncodedBlockSize; | |
341 curX += colsLeft; | |
342 } | |
343 | |
344 // If we can advance even further, then just keep memsetting the blo ck | |
345 if ((finalX - curX) >= kBlockSize) { | |
346 SkASSERT((curX % kBlockSize) == 0); | |
347 | |
348 const int col = 0; | |
349 const int colsLeft = kBlockSize; | |
350 | |
351 this->updateBlockColumns(block, col, colsLeft, curAlphaColumn); | |
352 | |
353 // While we can keep advancing, just keep writing the block. | |
354 uint8_t lastBlock[kEncodedBlockSize]; | |
355 kCompressionProc(lastBlock, reinterpret_cast<uint8_t*>(block)); | |
356 while((finalX - curX) >= kBlockSize) { | |
357 memcpy(outPtr, lastBlock, kEncodedBlockSize); | |
358 outPtr += kEncodedBlockSize; | |
359 curX += kBlockSize; | |
360 } | |
361 } | |
362 | |
363 // If we haven't advanced within the block then do so. | |
364 if (curX < finalX) { | |
365 const int col = curX % kBlockSize; | |
366 const int colsLeft = finalX - curX; | |
367 | |
368 this->updateBlockColumns(block, col, colsLeft, curAlphaColumn); | |
369 curX += colsLeft; | |
370 } | |
371 | |
372 SkASSERT(curX == finalX); | |
373 | |
374 // Figure out what the next advancement is... | |
375 for (int i = 0; i < kBlockSize; ++i) { | |
376 if (nextX[i] == finalX) { | |
377 const int16_t run = *(fBufferedRuns[i].fRuns); | |
378 fBufferedRuns[i].fRuns += run; | |
379 fBufferedRuns[i].fAlphas += run; | |
380 curAlpha[i] = *(fBufferedRuns[i].fAlphas); | |
381 nextX[i] += *(fBufferedRuns[i].fRuns); | |
382 } | |
383 } | |
384 | |
385 finalX = 0xFFFFF; | |
386 for (int i = 0; i < kBlockSize; ++i) { | |
387 finalX = SkMin32(nextX[i], finalX); | |
388 } | |
389 } | |
390 | |
391 // If we didn't land on a block boundary, output the block... | |
392 if ((curX % kBlockSize) > 1) { | |
393 kCompressionProc(outPtr, reinterpret_cast<uint8_t*>(block)); | |
394 } | |
395 | |
396 fNextRun = 0; | |
397 } | |
398 }; | |
399 | |
400 } // namespace SkTextureCompressor | |
401 | |
402 #endif // SkTextureCompressor_Blitter_DEFINED | |
OLD | NEW |