| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * | 7 * |
| 8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 70 uint8_t blendR = blendChannel(SkGetPackedR32(src), srcA, SkGetPackedR32(dst), | 70 uint8_t blendR = blendChannel(SkGetPackedR32(src), srcA, SkGetPackedR32(dst), |
| 71 dstFactorA, scale); | 71 dstFactorA, scale); |
| 72 uint8_t blendG = blendChannel(SkGetPackedG32(src), srcA, SkGetPackedG32(dst), | 72 uint8_t blendG = blendChannel(SkGetPackedG32(src), srcA, SkGetPackedG32(dst), |
| 73 dstFactorA, scale); | 73 dstFactorA, scale); |
| 74 uint8_t blendB = blendChannel(SkGetPackedB32(src), srcA, SkGetPackedB32(dst), | 74 uint8_t blendB = blendChannel(SkGetPackedB32(src), srcA, SkGetPackedB32(dst), |
| 75 dstFactorA, scale); | 75 dstFactorA, scale); |
| 76 | 76 |
| 77 return SkPackARGB32NoCheck(blendA, blendR, blendG, blendB); | 77 return SkPackARGB32NoCheck(blendA, blendR, blendG, blendB); |
| 78 } | 78 } |
| 79 | 79 |
| 80 // Returns two point ranges (<left, width> pairs) at row 'canvasY', that belong
to 'src' but not 'dst'. | 80 // Returns two point ranges (<left, width> pairs) at row |canvasY| which belong |
| 81 // A point range is empty if the corresponding width is 0. | 81 // to |src| but not |dst|. A range is empty if its width is 0. |
| 82 inline void findBlendRangeAtRow(const blink::IntRect& src, | 82 inline void findBlendRangeAtRow(const blink::IntRect& src, |
| 83 const blink::IntRect& dst, | 83 const blink::IntRect& dst, |
| 84 int canvasY, | 84 int canvasY, |
| 85 int& left1, | 85 int& left1, |
| 86 int& width1, | 86 int& width1, |
| 87 int& left2, | 87 int& left2, |
| 88 int& width2) { | 88 int& width2) { |
| 89 ASSERT_WITH_SECURITY_IMPLICATION(canvasY >= src.y() && canvasY < src.maxY()); | 89 ASSERT_WITH_SECURITY_IMPLICATION(canvasY >= src.y() && canvasY < src.maxY()); |
| 90 left1 = -1; | 90 left1 = -1; |
| 91 width1 = 0; | 91 width1 = 0; |
| (...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 292 ASSERT(!prevRect.contains(IntRect(IntPoint(), size()))); | 292 ASSERT(!prevRect.contains(IntRect(IntPoint(), size()))); |
| 293 buffer.zeroFillFrameRect(prevRect); | 293 buffer.zeroFillFrameRect(prevRect); |
| 294 } | 294 } |
| 295 | 295 |
| 296 m_frameBackgroundHasAlpha = | 296 m_frameBackgroundHasAlpha = |
| 297 prevBuffer.hasAlpha() || | 297 prevBuffer.hasAlpha() || |
| 298 (prevBuffer.getDisposalMethod() == ImageFrame::DisposeOverwriteBgcolor); | 298 (prevBuffer.getDisposalMethod() == ImageFrame::DisposeOverwriteBgcolor); |
| 299 } | 299 } |
| 300 | 300 |
| 301 buffer.setStatus(ImageFrame::FramePartial); | 301 buffer.setStatus(ImageFrame::FramePartial); |
| 302 // The buffer is transparent outside the decoded area while the image is loadi
ng. | 302 // The buffer is transparent outside the decoded area while the image is |
| 303 // The correct value of 'hasAlpha' for the frame will be set when it is fully
decoded. | 303 // loading. The correct alpha value for the frame will be set when it is fully |
| 304 // decoded. |
| 304 buffer.setHasAlpha(true); | 305 buffer.setHasAlpha(true); |
| 305 return true; | 306 return true; |
| 306 } | 307 } |
| 307 | 308 |
| 308 size_t WEBPImageDecoder::clearCacheExceptFrame(size_t clearExceptFrame) { | 309 size_t WEBPImageDecoder::clearCacheExceptFrame(size_t clearExceptFrame) { |
| 309 // If |clearExceptFrame| has status FrameComplete, we preserve that frame. | 310 // If |clearExceptFrame| has status FrameComplete, we preserve that frame. |
| 310 // Otherwise, we preserve a previous frame with status FrameComplete whose dat
a is required | 311 // Otherwise, we preserve the most recent previous frame with status |
| 311 // to decode |clearExceptFrame|, either in initFrameBuffer() or ApplyPostProce
ssing(). | 312 // FrameComplete whose data will be required to decode |clearExceptFrame|, |
| 312 // All other frames can be cleared. | 313 // either in initFrameBuffer() or ApplyPostProcessing(). All other frames can |
| 314 // be cleared. |
| 313 while ((clearExceptFrame < m_frameBufferCache.size()) && | 315 while ((clearExceptFrame < m_frameBufferCache.size()) && |
| 314 (m_frameBufferCache[clearExceptFrame].getStatus() != | 316 (m_frameBufferCache[clearExceptFrame].getStatus() != |
| 315 ImageFrame::FrameComplete)) | 317 ImageFrame::FrameComplete)) |
| 316 clearExceptFrame = | 318 clearExceptFrame = |
| 317 m_frameBufferCache[clearExceptFrame].requiredPreviousFrameIndex(); | 319 m_frameBufferCache[clearExceptFrame].requiredPreviousFrameIndex(); |
| 318 | 320 |
| 319 return ImageDecoder::clearCacheExceptFrame(clearExceptFrame); | 321 return ImageDecoder::clearCacheExceptFrame(clearExceptFrame); |
| 320 } | 322 } |
| 321 | 323 |
| 322 void WEBPImageDecoder::clearFrameBuffer(size_t frameIndex) { | 324 void WEBPImageDecoder::clearFrameBuffer(size_t frameIndex) { |
| 323 if (m_demux && m_demuxState >= WEBP_DEMUX_PARSED_HEADER && | 325 if (m_demux && m_demuxState >= WEBP_DEMUX_PARSED_HEADER && |
| 324 m_frameBufferCache[frameIndex].getStatus() == ImageFrame::FramePartial) { | 326 m_frameBufferCache[frameIndex].getStatus() == ImageFrame::FramePartial) { |
| 325 // Clear the decoder state so that this partial frame can be decoded again w
hen requested. | 327 // Clear the decoder state so that this partial frame can be decoded again |
| 328 // when requested. |
| 326 clearDecoder(); | 329 clearDecoder(); |
| 327 } | 330 } |
| 328 ImageDecoder::clearFrameBuffer(frameIndex); | 331 ImageDecoder::clearFrameBuffer(frameIndex); |
| 329 } | 332 } |
| 330 | 333 |
| 331 void WEBPImageDecoder::readColorProfile() { | 334 void WEBPImageDecoder::readColorProfile() { |
| 332 WebPChunkIterator chunkIterator; | 335 WebPChunkIterator chunkIterator; |
| 333 if (!WebPDemuxGetChunk(m_demux, "ICCP", 1, &chunkIterator)) { | 336 if (!WebPDemuxGetChunk(m_demux, "ICCP", 1, &chunkIterator)) { |
| 334 WebPDemuxReleaseChunkIterator(&chunkIterator); | 337 WebPDemuxReleaseChunkIterator(&chunkIterator); |
| 335 return; | 338 return; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 369 uint8_t* pixel = row; | 372 uint8_t* pixel = row; |
| 370 for (int x = 0; x < width; ++x, pixel += 4) { | 373 for (int x = 0; x < width; ++x, pixel += 4) { |
| 371 const int canvasX = left + x; | 374 const int canvasX = left + x; |
| 372 buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2], | 375 buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2], |
| 373 pixel[3]); | 376 pixel[3]); |
| 374 } | 377 } |
| 375 } | 378 } |
| 376 } | 379 } |
| 377 #endif // USE(QCMSLIB) | 380 #endif // USE(QCMSLIB) |
| 378 | 381 |
| 379 // During the decoding of current frame, we may have set some pixels to be tra
nsparent (i.e. alpha < 255). | 382 // During the decoding of the current frame, we may have set some pixels to be |
| 380 // However, the value of each of these pixels should have been determined by b
lending it against the value | 383 // transparent (i.e. alpha < 255). If the alpha blend source was |
| 381 // of that pixel in the previous frame if alpha blend source was 'BlendAtopPre
viousFrame'. So, we correct these | 384 // 'BlendAtopPreviousFrame', the values of these pixels should be determined |
| 382 // pixels based on disposal method of the previous frame and the previous fram
e buffer. | 385 // by blending them against the pixels of the corresponding previous frame. |
| 383 // FIXME: This could be avoided if libwebp decoder had an API that used the pr
evious required frame | 386 // Compute the correct opaque values now. |
| 384 // to do the alpha-blending by itself. | 387 // FIXME: This could be avoided if libwebp decoder had an API that used the |
| 388 // previous required frame to do the alpha-blending by itself. |
| 385 if ((m_formatFlags & ANIMATION_FLAG) && frameIndex && | 389 if ((m_formatFlags & ANIMATION_FLAG) && frameIndex && |
| 386 buffer.getAlphaBlendSource() == ImageFrame::BlendAtopPreviousFrame && | 390 buffer.getAlphaBlendSource() == ImageFrame::BlendAtopPreviousFrame && |
| 387 buffer.requiredPreviousFrameIndex() != kNotFound) { | 391 buffer.requiredPreviousFrameIndex() != kNotFound) { |
| 388 ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1]; | 392 ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1]; |
| 389 ASSERT(prevBuffer.getStatus() == ImageFrame::FrameComplete); | 393 ASSERT(prevBuffer.getStatus() == ImageFrame::FrameComplete); |
| 390 ImageFrame::DisposalMethod prevDisposalMethod = | 394 ImageFrame::DisposalMethod prevDisposalMethod = |
| 391 prevBuffer.getDisposalMethod(); | 395 prevBuffer.getDisposalMethod(); |
| 392 if (prevDisposalMethod == | 396 if (prevDisposalMethod == ImageFrame::DisposeKeep) { |
| 393 ImageFrame:: | 397 // Blend transparent pixels with pixels in previous canvas. |
| 394 DisposeKeep) { // Blend transparent pixels with pixels in previous
canvas. | |
| 395 for (int y = m_decodedHeight; y < decodedHeight; ++y) { | 398 for (int y = m_decodedHeight; y < decodedHeight; ++y) { |
| 396 m_blendFunction(buffer, prevBuffer, top + y, left, width); | 399 m_blendFunction(buffer, prevBuffer, top + y, left, width); |
| 397 } | 400 } |
| 398 } else if (prevDisposalMethod == ImageFrame::DisposeOverwriteBgcolor) { | 401 } else if (prevDisposalMethod == ImageFrame::DisposeOverwriteBgcolor) { |
| 399 const IntRect& prevRect = prevBuffer.originalFrameRect(); | 402 const IntRect& prevRect = prevBuffer.originalFrameRect(); |
| 400 // We need to blend a transparent pixel with its value just after initFram
e() call. That is: | 403 // We need to blend a transparent pixel with the starting value (from just |
| 401 // * Blend with fully transparent pixel if it belongs to prevRect <-- Th
is is a no-op. | 404 // after the initFrame() call). If the pixel belongs to prevRect, the |
| 402 // * Blend with the pixel in the previous canvas otherwise <-- Needs alp
ha-blending. | 405 // starting value was fully transparent, so this is a no-op. Otherwise, we |
| 406 // need to blend against the pixel from the previous canvas. |
| 403 for (int y = m_decodedHeight; y < decodedHeight; ++y) { | 407 for (int y = m_decodedHeight; y < decodedHeight; ++y) { |
| 404 int canvasY = top + y; | 408 int canvasY = top + y; |
| 405 int left1, width1, left2, width2; | 409 int left1, width1, left2, width2; |
| 406 findBlendRangeAtRow(frameRect, prevRect, canvasY, left1, width1, left2, | 410 findBlendRangeAtRow(frameRect, prevRect, canvasY, left1, width1, left2, |
| 407 width2); | 411 width2); |
| 408 if (width1 > 0) | 412 if (width1 > 0) |
| 409 m_blendFunction(buffer, prevBuffer, canvasY, left1, width1); | 413 m_blendFunction(buffer, prevBuffer, canvasY, left1, width1); |
| 410 if (width2 > 0) | 414 if (width2 > 0) |
| 411 m_blendFunction(buffer, prevBuffer, canvasY, left2, width2); | 415 m_blendFunction(buffer, prevBuffer, canvasY, left2, width2); |
| 412 } | 416 } |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 501 | 505 |
| 502 ASSERT(m_frameBufferCache.size() > frameIndex); | 506 ASSERT(m_frameBufferCache.size() > frameIndex); |
| 503 ImageFrame& buffer = m_frameBufferCache[frameIndex]; | 507 ImageFrame& buffer = m_frameBufferCache[frameIndex]; |
| 504 ASSERT(buffer.getStatus() != ImageFrame::FrameComplete); | 508 ASSERT(buffer.getStatus() != ImageFrame::FrameComplete); |
| 505 | 509 |
| 506 if (buffer.getStatus() == ImageFrame::FrameEmpty) { | 510 if (buffer.getStatus() == ImageFrame::FrameEmpty) { |
| 507 if (!buffer.setSizeAndColorProfile(size().width(), size().height(), | 511 if (!buffer.setSizeAndColorProfile(size().width(), size().height(), |
| 508 colorProfile())) | 512 colorProfile())) |
| 509 return setFailed(); | 513 return setFailed(); |
| 510 buffer.setStatus(ImageFrame::FramePartial); | 514 buffer.setStatus(ImageFrame::FramePartial); |
| 511 // The buffer is transparent outside the decoded area while the image is loa
ding. | 515 // The buffer is transparent outside the decoded area while the image is |
| 512 // The correct value of 'hasAlpha' for the frame will be set when it is full
y decoded. | 516 // loading. The correct alpha value for the frame will be set when it is |
| 517 // fully decoded. |
| 513 buffer.setHasAlpha(true); | 518 buffer.setHasAlpha(true); |
| 514 buffer.setOriginalFrameRect(IntRect(IntPoint(), size())); | 519 buffer.setOriginalFrameRect(IntRect(IntPoint(), size())); |
| 515 } | 520 } |
| 516 | 521 |
| 517 const IntRect& frameRect = buffer.originalFrameRect(); | 522 const IntRect& frameRect = buffer.originalFrameRect(); |
| 518 if (!m_decoder) { | 523 if (!m_decoder) { |
| 519 WEBP_CSP_MODE mode = outputMode(m_formatFlags & ALPHA_FLAG); | 524 WEBP_CSP_MODE mode = outputMode(m_formatFlags & ALPHA_FLAG); |
| 520 if (!m_premultiplyAlpha) | 525 if (!m_premultiplyAlpha) |
| 521 mode = outputMode(false); | 526 mode = outputMode(false); |
| 522 #if USE(QCMSLIB) | 527 #if USE(QCMSLIB) |
| (...skipping 29 matching lines...) Expand all Loading... |
| 552 return false; | 557 return false; |
| 553 } | 558 } |
| 554 // FALLTHROUGH | 559 // FALLTHROUGH |
| 555 default: | 560 default: |
| 556 clear(); | 561 clear(); |
| 557 return setFailed(); | 562 return setFailed(); |
| 558 } | 563 } |
| 559 } | 564 } |
| 560 | 565 |
| 561 } // namespace blink | 566 } // namespace blink |
| OLD | NEW |