Chromium Code Reviews| Index: Source/core/platform/image-decoders/webp/WEBPImageDecoder.cpp |
| diff --git a/Source/core/platform/image-decoders/webp/WEBPImageDecoder.cpp b/Source/core/platform/image-decoders/webp/WEBPImageDecoder.cpp |
| index 3c905a94441119725907fd792cde5bf8a778daad..1a7becbb646055025be2808eb5b137022e8c7513 100644 |
| --- a/Source/core/platform/image-decoders/webp/WEBPImageDecoder.cpp |
| +++ b/Source/core/platform/image-decoders/webp/WEBPImageDecoder.cpp |
| @@ -33,11 +33,21 @@ |
| #ifdef QCMS_WEBP_COLOR_CORRECTION |
| #include "qcms.h" |
| -#include "webp/demux.h" |
| +#endif |
| + |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| +#include "core/page/RuntimeEnabledFeatures.h" |
| +#include "webp/format_constants.h" |
| #else |
|
Noel Gordon
2013/04/29 18:41:57
41: Change it to #endif here.
Then you could you
urvang (Google)
2013/04/30 13:14:15
Restuctured as per ur suggestion.
|
| #undef ICCP_FLAG |
| #define ICCP_FLAG 0 |
| -#endif |
| +#undef ALPHA_FLAG |
| +#if (WEBP_DECODER_ABI_VERSION >= 0x0163) // Alpha supported, but need to define flag. |
| +#define ALPHA_FLAG 0x00000010 |
| +#else // Versions earlier than 0.1.99 don't support alpha. |
| +#define ALPHA_FLAG 0 |
| +#endif // WEBP_DECODER_ABI_VERSION >= 0x0163 |
| +#endif // WEBP_ICC_ANIM_SUPPORT |
| // Backward emulation for earlier versions than 0.1.99. |
| #if (WEBP_DECODER_ABI_VERSION < 0x0163) |
| @@ -59,62 +69,262 @@ WEBPImageDecoder::WEBPImageDecoder(ImageSource::AlphaOption alphaOption, |
| ImageSource::GammaAndColorProfileOption gammaAndColorProfileOption) |
| : ImageDecoder(alphaOption, gammaAndColorProfileOption) |
| , m_decoder(0) |
| - , m_hasAlpha(false) |
| , m_formatFlags(0) |
| #ifdef QCMS_WEBP_COLOR_CORRECTION |
| , m_haveReadProfile(false) |
| , m_transform(0) |
| +#endif |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| , m_decodedHeight(0) |
| + , m_haveAlreadyParsedThisData(false) |
| + , m_demux(0) |
| + , m_demuxState(WEBP_DEMUX_PARSING_HEADER) |
| + , m_haveReadAnimParams(false) |
| + , m_repetitionCount(cAnimationLoopOnce) |
| #endif |
| { |
|
Noel Gordon
2013/04/29 18:41:57
One of our member variables (m_decoderBuffer) is n
urvang (Google)
2013/04/30 13:14:15
I moved this initialization to 515, because decode
|
| - WebPInitDecBuffer(&m_decoderBuffer); |
| } |
| WEBPImageDecoder::~WEBPImageDecoder() |
| { |
| - clear(); |
| + clearAll(); |
|
Noel Gordon
2013/04/29 18:41:57
clear() here and elsewhere.
urvang (Google)
2013/04/30 13:14:15
Done.
|
| } |
| -void WEBPImageDecoder::clear() |
| +void WEBPImageDecoder::clearAll() |
| { |
| #ifdef QCMS_WEBP_COLOR_CORRECTION |
| if (m_transform) |
| qcms_transform_release(m_transform); |
| m_transform = 0; |
| #endif |
| - WebPFreeDecBuffer(&m_decoderBuffer); |
| - if (m_decoder) |
| - WebPIDelete(m_decoder); |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| + WebPDemuxDelete(m_demux); |
| + m_demux = 0; |
| +#endif |
| + clearDecoder(); |
| +} |
| + |
| +void WEBPImageDecoder::clearDecoder() |
| +{ |
| + WebPIDelete(m_decoder); |
| m_decoder = 0; |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| + m_decodedHeight = 0; |
| +#endif |
| } |
| bool WEBPImageDecoder::isSizeAvailable() |
| { |
| - if (!ImageDecoder::isSizeAvailable()) |
| - decode(true); |
| - |
| + if (!ImageDecoder::isSizeAvailable()) { |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| + if (!updateDemuxer()) |
| + return 0; |
| +#else |
| + decode(reinterpret_cast<const uint8_t*>(m_data->data()), m_data->size(), true, 0); |
| +#endif |
| + } |
| return ImageDecoder::isSizeAvailable(); |
| } |
| -ImageFrame* WEBPImageDecoder::frameBufferAtIndex(size_t index) |
| +size_t WEBPImageDecoder::frameCount() |
| { |
| - if (index) |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| + if (!updateDemuxer()) |
| return 0; |
| - |
| +#else |
| if (m_frameBufferCache.isEmpty()) { |
| m_frameBufferCache.resize(1); |
| m_frameBufferCache[0].setPremultiplyAlpha(m_premultiplyAlpha); |
| } |
| +#endif |
| + return m_frameBufferCache.size(); |
| +} |
| - ImageFrame& frame = m_frameBufferCache[0]; |
| +ImageFrame* WEBPImageDecoder::frameBufferAtIndex(size_t index) |
| +{ |
| + if (index >= frameCount()) |
| + return 0; |
| + |
| + ImageFrame& frame = m_frameBufferCache[index]; |
| if (frame.status() != ImageFrame::FrameComplete) { |
|
Noel Gordon
2013/04/29 18:41:57
Prefer the early return in Blink. Write this as
urvang (Google)
2013/04/30 13:14:15
Done.
|
| - PlatformInstrumentation::willDecodeImage("WEBP"); |
| - decode(false); |
| - PlatformInstrumentation::didDecodeImage(); |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| + if (RuntimeEnabledFeatures::animatedWebPEnabled()) { |
| + if (index && (m_frameBufferCache[index - 1].status() != ImageFrame::FrameComplete)) |
| + return 0; // We haven't fully decoded the previous frame yet. |
| + ASSERT(m_demux); |
| + WebPIterator fIter; |
| + if (!WebPDemuxGetFrame(m_demux, index + 1, &fIter)) |
| + return 0; |
| + if (m_formatFlags & ANIMATION_FLAG) { |
| + if (!initFrameBuffer(fIter, index)) |
|
Noel Gordon
2013/04/29 18:41:57
Combine this "if" with the one in the prior line?
urvang (Google)
2013/04/30 13:14:15
Done.
|
| + return 0; |
| + } |
| + PlatformInstrumentation::willDecodeImage("WEBP"); |
| + decode(fIter.fragment.bytes, fIter.fragment.size, false, index); |
| + PlatformInstrumentation::didDecodeImage(); |
| + WebPDemuxReleaseIterator(&fIter); |
| + } else { |
|
Noel Gordon
2013/04/29 18:41:57
WebPDemuxReleaseIterator(&fIter);
return &frame
urvang (Google)
2013/04/30 13:14:15
Done.
|
| +#endif |
| + ASSERT(!index); |
|
Noel Gordon
2013/04/29 18:41:57
This ASSERT is correct in that we should only deco
urvang (Google)
2013/04/30 13:14:15
Good catch! Done.
|
| + PlatformInstrumentation::willDecodeImage("WEBP"); |
| + decode(reinterpret_cast<const uint8_t*>(m_data->data()), m_data->size(), false, index); |
| + PlatformInstrumentation::didDecodeImage(); |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| + } |
| +#endif |
| } |
| return &frame; |
| } |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| + |
| +void WEBPImageDecoder::setData(SharedBuffer* data, bool allDataReceived) |
| +{ |
| + if (failed()) |
| + return; |
| + |
| + ImageDecoder::setData(data, allDataReceived); |
| + |
| + // Mark that we have new data. |
| + if (m_demuxState != WEBP_DEMUX_DONE) |
| + m_haveAlreadyParsedThisData = false; |
| +} |
| + |
| +bool WEBPImageDecoder::updateDemuxer() |
| +{ |
| + if (!m_haveAlreadyParsedThisData) { |
|
Noel Gordon
2013/04/29 18:41:57
Prefer the early return, please.
urvang (Google)
2013/04/30 13:14:15
Done.
|
| + WebPDemuxDelete(m_demux); |
|
Noel Gordon
2013/04/29 18:41:57
Could you move these two lines (196-197) down to w
urvang (Google)
2013/04/30 13:14:15
Done.
|
| + const uint8_t* dataBytes = reinterpret_cast<const uint8_t*>(m_data->data()); |
| + const size_t dataSize = m_data->size(); |
| + |
| + static const size_t minSizeForDemux = RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE; |
| + if (dataSize < minSizeForDemux) |
| + return 0; // Wait for headers so that WebPDemuxPartial doesn't return null. |
|
Noel Gordon
2013/04/29 18:41:57
return 0? return false; I think.
urvang (Google)
2013/04/30 13:14:15
Done.
|
| + |
| + WebPData inputData = { dataBytes, dataSize }; |
| + m_demux = WebPDemuxPartial(&inputData, &m_demuxState); |
|
Noel Gordon
2013/04/29 18:41:57
I assume WebPDemuxPartial() will correctly reset m
urvang (Google)
2013/04/30 13:14:15
Yes, it will.
|
| + if (!m_demux) |
| + return setFailed(); // Must be a failure as we have at least 'minSizeForDemux' bytes. |
| + if (m_demuxState >= WEBP_DEMUX_PARSED_HEADER) { |
|
Noel Gordon
2013/04/29 18:41:57
Prefer the early return, but the current statement
urvang (Google)
2013/04/30 13:14:15
Done.
but the current statement says greater than
Noel Gordon
2013/05/01 17:55:28
Hard to distinquish WEBP_DEMUX_PARSING_HEADER and
|
| + if (!ImageDecoder::isSizeAvailable()) { |
| + if (!setSize(WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_WIDTH), WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_HEIGHT))) |
| + setFailed(); |
|
Noel Gordon
2013/04/29 18:41:57
return setFailed();
urvang (Google)
2013/04/30 13:14:15
Done.
|
| + m_formatFlags = WebPDemuxGetI(m_demux, WEBP_FF_FORMAT_FLAGS); |
| + } |
| + ASSERT(ImageDecoder::isSizeAvailable()); |
| + const bool hasAnimation = (m_formatFlags & ANIMATION_FLAG); |
| + const size_t newFrameCount = WebPDemuxGetI(m_demux, WEBP_FF_FRAME_COUNT); |
| + if (RuntimeEnabledFeatures::animatedWebPEnabled() && hasAnimation && !m_haveReadAnimParams && (newFrameCount >= 1)) { |
| + // As we have parsed at least one frame (even if partially), |
| + // we must already have parsed the animation properties. |
| + // This is because ANIM chunk always precedes ANMF chunks. |
| + const uint32_t loopCount = WebPDemuxGetI(m_demux, WEBP_FF_LOOP_COUNT); |
| + // Note: The following casts an 'unsigned int' to 'int'. But that is fine, because loop count is always <= 16 bits. |
| + m_repetitionCount = (!loopCount) ? cAnimationLoopInfinite : loopCount; |
| + m_haveReadAnimParams = true; |
| + } |
| + if (newFrameCount > m_frameBufferCache.size()) { |
|
Noel Gordon
2013/04/29 18:41:57
See the comment for line 168.
urvang (Google)
2013/04/30 13:14:15
Done.
|
| + m_frameBufferCache.resize(newFrameCount); |
| + for (size_t i = 0; i < newFrameCount; ++i) |
| + m_frameBufferCache[i].setPremultiplyAlpha(m_premultiplyAlpha); |
| + } |
| + } |
| + m_haveAlreadyParsedThisData = true; |
| + } |
| + return true; |
| +} |
| + |
| +// FIXME: This method is very similar to the one in GIFImageDecoder.cpp and should be refactored. |
| +bool WEBPImageDecoder::initFrameBuffer(const WebPIterator& fIter, size_t frameIndex) |
| +{ |
| + ImageFrame& buffer = m_frameBufferCache[frameIndex]; |
| + if (buffer.status() != ImageFrame::FrameEmpty) // Already initialized. |
| + return true; |
| + |
| + // Initialize the frame rect in our buffer. |
| + IntRect frameRect(fIter.x_offset, fIter.y_offset, fIter.width, fIter.height); |
| + |
| + // Make sure the frameRect doesn't extend outside the buffer. |
| + if (frameRect.maxX() > size().width()) |
| + frameRect.setWidth(size().width() - fIter.x_offset); |
| + if (frameRect.maxY() > size().height()) |
| + frameRect.setHeight(size().height() - fIter.y_offset); |
| + |
| + const int left = upperBoundScaledX(frameRect.x()); |
| + const int right = lowerBoundScaledX(frameRect.maxX(), left); |
| + const int top = upperBoundScaledY(frameRect.y()); |
| + const int bottom = lowerBoundScaledY(frameRect.maxY(), top); |
| + buffer.setOriginalFrameRect(IntRect(left, top, right - left, bottom - top)); |
| + |
| + buffer.setDisposalMethod(fIter.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND ? ImageFrame::DisposeOverwriteBgcolor : ImageFrame::DisposeKeep); |
| + buffer.setDuration(fIter.duration); |
| + buffer.setHasAlpha(m_formatFlags & ALPHA_FLAG); |
| + |
| + if (!frameIndex) { |
| + // This is the first frame, so we're not relying on any previous data. |
| + if (!buffer.setSize(scaledSize().width(), scaledSize().height())) |
| + return setFailed(); |
| + } else { |
| + // The starting state for this frame depends on the previous frame's |
| + // disposal method. |
| + const ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1]; |
| + ASSERT(prevBuffer.status() == ImageFrame::FrameComplete); |
| + const IntRect& prevRect = prevBuffer.originalFrameRect(); |
| + const ImageFrame::FrameDisposalMethod prevMethod = prevBuffer.disposalMethod(); |
| + if ((prevMethod == ImageFrame::DisposeKeep) || (prevMethod == ImageFrame::DisposeNotSpecified)) { |
| + // Preserve the last frame as the starting state for this frame. |
| + if (!buffer.copyBitmapData(prevBuffer)) |
| + return setFailed(); |
| + } else { // prevMethod == ImageFrame::DisposeOverwriteBgcolor |
| + // We want to clear the previous frame to transparent, without |
| + // affecting pixels in the image outside of the frame. |
| + // So, we copy the whole previous buffer, then clear just its frame. |
| + if (!frameIndex || prevRect.contains(IntRect(IntPoint(), scaledSize()))) { |
| + // Clearing the first frame, or a frame the size of the whole |
| + // image, results in a completely empty image. |
| + if (!buffer.setSize(scaledSize().width(), scaledSize().height())) |
| + return setFailed(); |
| + } else { |
| + // Copy the whole previous buffer, then clear just its frame. |
| + if (!buffer.copyBitmapData(prevBuffer)) |
| + return setFailed(); |
| + for (int y = prevRect.y(); y < prevRect.maxY(); ++y) { |
| + for (int x = prevRect.x(); x < prevRect.maxX(); ++x) |
| + buffer.setRGBA(x, y, 0, 0, 0, 0); |
| + } |
| + } |
| + } |
| + } |
| + // Update frame status to be partially complete. |
|
Noel Gordon
2013/04/29 18:41:57
Space before this line.
urvang (Google)
2013/04/30 13:14:15
Done.
|
| + buffer.setStatus(ImageFrame::FramePartial); |
| + return true; |
| +} |
| +void WEBPImageDecoder::clearFrameBufferCache(size_t clearBeforeFrame) |
|
Noel Gordon
2013/04/29 18:41:57
Space before this line.
urvang (Google)
2013/04/30 13:14:15
Done.
|
| +{ |
| + // We always preserve at least one frame. |
| + if (m_frameBufferCache.size() <= 1) |
| + return; |
| + |
| + // Find the last frame we need to preserve in the cache to facilitate |
| + // the construction of next frames (needed by initFrame() and |
| + // applyPostProcessing()) . This frame is either: |
| + // * The last decoded frame in cache, OR |
| + // * The first frame (if cache doesn't contain any decoded frames). |
| + const int lastFrame = std::min(clearBeforeFrame, m_frameBufferCache.size() - 1); |
| + Vector<ImageFrame>::iterator i(m_frameBufferCache.begin() + lastFrame); |
| + while ((i != m_frameBufferCache.begin()) && (i->status() != ImageFrame::FrameComplete)) |
| + --i; |
| + |
| + // Now |i| holds the last frame we need to preserve; clear prior frames. |
| + for (Vector<ImageFrame>::iterator j(m_frameBufferCache.begin()); j != i; ++j) { |
| + ASSERT(j->status() != ImageFrame::FramePartial); |
| + if (j->status() != ImageFrame::FrameEmpty) |
| + j->clearPixelData(); |
| + } |
| +} |
| + |
| +#endif // WEBP_ICC_ANIM_SUPPORT |
| + |
| #ifdef QCMS_WEBP_COLOR_CORRECTION |
| void WEBPImageDecoder::createColorTransform(const char* data, size_t size) |
| @@ -140,16 +350,11 @@ void WEBPImageDecoder::createColorTransform(const char* data, size_t size) |
| qcms_profile_release(inputProfile); |
| } |
| -void WEBPImageDecoder::readColorProfile(const uint8_t* data, size_t size) |
| +void WEBPImageDecoder::readColorProfile() |
| { |
| WebPChunkIterator chunkIterator; |
| - WebPData inputData = { data, size }; |
| - WebPDemuxState state; |
| - |
| - WebPDemuxer* demuxer = WebPDemuxPartial(&inputData, &state); |
| - if (!WebPDemuxGetChunk(demuxer, "ICCP", 1, &chunkIterator)) { |
| + if (!WebPDemuxGetChunk(m_demux, "ICCP", 1, &chunkIterator)) { |
| WebPDemuxReleaseChunkIterator(&chunkIterator); |
| - WebPDemuxDelete(demuxer); |
| return; |
| } |
| @@ -169,80 +374,116 @@ void WEBPImageDecoder::readColorProfile(const uint8_t* data, size_t size) |
| createColorTransform(profileData, profileSize); |
| WebPDemuxReleaseChunkIterator(&chunkIterator); |
| - WebPDemuxDelete(demuxer); |
| } |
| -void WEBPImageDecoder::applyColorProfile(const uint8_t* data, size_t size, ImageFrame& buffer) |
| +#endif // QCMS_WEBP_COLOR_CORRECTION |
| + |
| +#ifdef WEBP_ICC_ANIM_SUPPORT |
| +void WEBPImageDecoder::applyPostProcessing(size_t frameIndex) |
| { |
| + ImageFrame& buffer = m_frameBufferCache[frameIndex]; |
| int width; |
| + int stride; |
|
Noel Gordon
2013/04/29 18:41:57
stride is not used, what's it for?
urvang (Google)
2013/04/30 13:14:15
Good catch! Thanks.
|
| int decodedHeight; |
| - if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0)) |
| + if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, &stride)) |
| return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062 |
| if (decodedHeight <= 0) |
| return; |
| - |
| - if (!m_haveReadProfile) { |
| - readColorProfile(data, size); |
| - m_haveReadProfile = true; |
| - } |
| - |
| ASSERT(width == scaledSize().width()); |
| ASSERT(decodedHeight <= scaledSize().height()); |
| + const int left = buffer.originalFrameRect().x(); |
| + const int top = buffer.originalFrameRect().y(); |
| + |
| + // Color Profile. |
| + if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile()) { |
|
Noel Gordon
2013/04/29 18:41:57
Why all the #ifdef QCMS_WEBP_COLOR_CORRECTION's in
urvang (Google)
2013/04/30 13:14:15
Cleaned up this code to have the whole 'if' surrou
|
| +#ifdef QCMS_WEBP_COLOR_CORRECTION |
| + if (!m_haveReadProfile) { |
| + readColorProfile(); |
| + m_haveReadProfile = true; |
| + } |
| +#endif // QCMS_WEBP_COLOR_CORRECTION |
| + for (int y = m_decodedHeight; y < decodedHeight; ++y) { |
| + const int canvasY = top + y; |
| + uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(left, canvasY)); |
| +#ifdef QCMS_WEBP_COLOR_CORRECTION |
| + if (qcms_transform* transform = colorTransform()) |
| + qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX); |
| +#endif // QCMS_WEBP_COLOR_CORRECTION |
| + uint8_t* pixel = row; |
| + for (int x = 0; x < width; ++x, pixel += 4) { |
| + const int canvasX = left + x; |
| + buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2], pixel[3]); |
| + } |
| + } |
| + } |
| - for (int y = m_decodedHeight; y < decodedHeight; ++y) { |
| - uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(0, y)); |
| - if (qcms_transform* transform = colorTransform()) |
| - qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX); |
| - uint8_t* pixel = row; |
| - for (int x = 0; x < width; ++x, pixel += 4) |
| - buffer.setRGBA(x, y, pixel[0], pixel[1], pixel[2], pixel[3]); |
| + // Frame disposal: |
| + // During the decoding of current frame, we may have set some pixels to be transparent (i.e. alpha < 255). |
| + // However, the value of each of these pixels should have been determined by blending it against the value |
| + // of that pixel in the previous frame. So, we correct these pixels based on disposal method of the previous |
| + // frame and the previous frame buffer. |
| + if ((m_formatFlags & ANIMATION_FLAG) && frameIndex) { |
| + ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1]; |
| + ImageFrame::FrameDisposalMethod prevMethod = prevBuffer.disposalMethod(); |
| + ASSERT(prevBuffer.status() == ImageFrame::FrameComplete); |
| + if (prevMethod == ImageFrame::DisposeKeep) { // Restore transparent pixels to pixels in previous canvas. |
| + for (int y = m_decodedHeight; y < decodedHeight; ++y) { |
| + const int canvasY = top + y; |
| + for (int x = 0; x < width; ++x) { |
| + const int canvasX = left + x; |
| + ImageFrame::PixelData& pixel = *buffer.getAddr(canvasX, canvasY); |
| + // FIXME: Use alpha-blending when alpha is between 0 and 255. |
| + // Alpha-blending is being implemented in: https://bugs.webkit.org/show_bug.cgi?id=17022 |
| + if (!((pixel >> 24) & 0xff)) { // Need to restore. |
| + const ImageFrame::PixelData prevPixel = *prevBuffer.getAddr(canvasX, canvasY); |
| + pixel = prevPixel; |
| + } |
| + } |
| + } |
| + } else if (prevMethod == ImageFrame::DisposeOverwriteBgcolor) { |
| + const IntRect& prevRect = prevBuffer.originalFrameRect(); |
| + // We need to restore transparent pixels to as they were just after initFrame() call. That is: |
| + // * Transparent if it belongs to prevRect <-- This is a no-op. |
| + // * Pixel in the previous canvas otherwise <-- Need to restore. |
| + for (int y = m_decodedHeight; y < decodedHeight; ++y) { |
| + const int canvasY = top + y; |
| + for (int x = 0; x < width; ++x) { |
| + const int canvasX = left + x; |
| + ImageFrame::PixelData& pixel = *buffer.getAddr(canvasX, canvasY); |
| + const ImageFrame::PixelData prevPixel = *prevBuffer.getAddr(canvasX, canvasY); |
| + // FIXME: Use alpha-blending when alpha is between 0 and 255. |
| + if (!((pixel >> 24) & 0xff) && !prevRect.contains(IntPoint(canvasX, canvasY))) // Need to restore. |
| + pixel = prevPixel; |
| + } |
| + } |
| + } |
| } |
| m_decodedHeight = decodedHeight; |
| } |
| +#endif // WEBP_ICC_ANIM_SUPPORT |
| -#endif // QCMS_WEBP_COLOR_CORRECTION |
| - |
| -bool WEBPImageDecoder::decode(bool onlySize) |
| +bool WEBPImageDecoder::decode(const uint8_t* dataBytes, size_t dataSize, bool onlySize, size_t frameIndex) |
| { |
| if (failed()) |
| return false; |
| - const uint8_t* dataBytes = reinterpret_cast<const uint8_t*>(m_data->data()); |
| - const size_t dataSize = m_data->size(); |
| - |
| if (!ImageDecoder::isSizeAvailable()) { |
| static const size_t imageHeaderSize = 30; |
| if (dataSize < imageHeaderSize) |
| return false; |
| int width, height; |
| -#ifdef QCMS_WEBP_COLOR_CORRECTION |
| - WebPData inputData = { dataBytes, dataSize }; |
| - WebPDemuxState state; |
| - WebPDemuxer* demuxer = WebPDemuxPartial(&inputData, &state); |
| - if (!demuxer) |
| - return setFailed(); |
| - |
| - width = WebPDemuxGetI(demuxer, WEBP_FF_CANVAS_WIDTH); |
| - height = WebPDemuxGetI(demuxer, WEBP_FF_CANVAS_HEIGHT); |
| - m_formatFlags = WebPDemuxGetI(demuxer, WEBP_FF_FORMAT_FLAGS); |
| - m_hasAlpha = !!(m_formatFlags & ALPHA_FLAG); |
| - |
| - WebPDemuxDelete(demuxer); |
| - if (state <= WEBP_DEMUX_PARSING_HEADER) |
| - return false; |
| -#elif (WEBP_DECODER_ABI_VERSION >= 0x0163) |
| +#if (WEBP_DECODER_ABI_VERSION >= 0x0163) |
| WebPBitstreamFeatures features; |
| if (WebPGetFeatures(dataBytes, dataSize, &features) != VP8_STATUS_OK) |
| return setFailed(); |
| width = features.width; |
| height = features.height; |
| - m_hasAlpha = features.has_alpha; |
| + m_formatFlags = features.has_alpha ? ALPHA_FLAG : 0; |
| #else |
| // Earlier version won't be able to display WebP files with alpha. |
| if (!WebPGetInfo(dataBytes, dataSize, &width, &height)) |
| return setFailed(); |
| - m_hasAlpha = false; |
| #endif |
| if (!setSize(width, height)) |
| return setFailed(); |
| @@ -252,48 +493,48 @@ bool WEBPImageDecoder::decode(bool onlySize) |
| if (onlySize) |
| return true; |
| - ASSERT(!m_frameBufferCache.isEmpty()); |
| - ImageFrame& buffer = m_frameBufferCache[0]; |
| + ASSERT(m_frameBufferCache.size() > frameIndex); |
| + ImageFrame& buffer = m_frameBufferCache[frameIndex]; |
| ASSERT(buffer.status() != ImageFrame::FrameComplete); |
| if (buffer.status() == ImageFrame::FrameEmpty) { |
| - if (!buffer.setSize(size().width(), size().height())) |
| + if (!buffer.setSize(scaledSize().width(), scaledSize().height())) |
| return setFailed(); |
| buffer.setStatus(ImageFrame::FramePartial); |
| - buffer.setHasAlpha(m_hasAlpha); |
| + buffer.setHasAlpha(m_formatFlags & ALPHA_FLAG); |
| buffer.setOriginalFrameRect(IntRect(IntPoint(), size())); |
| } |
| + const IntRect& frameRect = buffer.originalFrameRect(); |
| if (!m_decoder) { |
| - WEBP_CSP_MODE mode = outputMode(m_hasAlpha); |
| + WEBP_CSP_MODE mode = outputMode(m_formatFlags & ALPHA_FLAG); |
| if (!m_premultiplyAlpha) |
| mode = outputMode(false); |
| if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile()) |
| mode = MODE_RGBA; // Decode to RGBA for input to libqcms. |
| + WebPInitDecBuffer(&m_decoderBuffer); |
|
Noel Gordon
2013/04/29 18:41:57
Where is m_decoderBuffer released? Seems m_decode
urvang (Google)
2013/04/30 13:14:15
WebPIDelete() calls WebPFreeDecBuffer() internally
|
| m_decoderBuffer.colorspace = mode; |
| m_decoderBuffer.u.RGBA.stride = size().width() * sizeof(ImageFrame::PixelData); |
| - m_decoderBuffer.u.RGBA.size = m_decoderBuffer.u.RGBA.stride * size().height(); |
| + m_decoderBuffer.u.RGBA.size = m_decoderBuffer.u.RGBA.stride * frameRect.height(); |
| m_decoderBuffer.is_external_memory = 1; |
| m_decoder = WebPINewDecoder(&m_decoderBuffer); |
| if (!m_decoder) |
| return setFailed(); |
| } |
| - m_decoderBuffer.u.RGBA.rgba = reinterpret_cast<uint8_t*>(buffer.getAddr(0, 0)); |
| + m_decoderBuffer.u.RGBA.rgba = reinterpret_cast<uint8_t*>(buffer.getAddr(frameRect.x(), frameRect.y())); |
| switch (WebPIUpdate(m_decoder, dataBytes, dataSize)) { |
| case VP8_STATUS_OK: |
| - if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile()) |
| - applyColorProfile(dataBytes, dataSize, buffer); |
| + applyPostProcessing(frameIndex); |
| buffer.setStatus(ImageFrame::FrameComplete); |
| - clear(); |
| + clearDecoder(); |
| return true; |
| case VP8_STATUS_SUSPENDED: |
| - if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile()) |
| - applyColorProfile(dataBytes, dataSize, buffer); |
| + applyPostProcessing(frameIndex); |
| return false; |
| default: |
| - clear(); |
| + clearAll(); |
| return setFailed(); |
| } |
| } |