Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1050)

Unified Diff: third_party/WebKit/Source/platform/image-decoders/webp/WEBPImageDecoder.cpp

Issue 2775063002: Move WEBPImageDecoder to SkCodec
Patch Set: Pull in changes from the gif decoder Created 3 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/image-decoders/webp/WEBPImageDecoder.cpp
diff --git a/third_party/WebKit/Source/platform/image-decoders/webp/WEBPImageDecoder.cpp b/third_party/WebKit/Source/platform/image-decoders/webp/WEBPImageDecoder.cpp
index 1d4fad0d7b04f22945c55797e81fc03dff5a05e2..5902e38b47db0fa882ea0f317fcaafa7dbb66e74 100644
--- a/third_party/WebKit/Source/platform/image-decoders/webp/WEBPImageDecoder.cpp
+++ b/third_party/WebKit/Source/platform/image-decoders/webp/WEBPImageDecoder.cpp
@@ -28,94 +28,11 @@
#include "platform/image-decoders/webp/WEBPImageDecoder.h"
-#include "build/build_config.h"
-#include "third_party/skia/include/core/SkData.h"
-
-#if defined(ARCH_CPU_BIG_ENDIAN)
-#error Blink assumes a little-endian target.
-#endif
-
-#if SK_B32_SHIFT // Output little-endian RGBA pixels (Android).
-inline WEBP_CSP_MODE outputMode(bool hasAlpha) {
- return hasAlpha ? MODE_rgbA : MODE_RGBA;
-}
-#else // Output little-endian BGRA pixels.
-inline WEBP_CSP_MODE outputMode(bool hasAlpha) {
- return hasAlpha ? MODE_bgrA : MODE_BGRA;
-}
-#endif
-
-namespace {
-
-// Returns two point ranges (<left, width> pairs) at row |canvasY| which belong
-// to |src| but not |dst|. A range is empty if its width is 0.
-inline void findBlendRangeAtRow(const blink::IntRect& src,
- const blink::IntRect& dst,
- int canvasY,
- int& left1,
- int& width1,
- int& left2,
- int& width2) {
- SECURITY_DCHECK(canvasY >= src.Y() && canvasY < src.MaxY());
- left1 = -1;
- width1 = 0;
- left2 = -1;
- width2 = 0;
-
- if (canvasY < dst.Y() || canvasY >= dst.MaxY() || src.X() >= dst.MaxX() ||
- src.MaxX() <= dst.X()) {
- left1 = src.X();
- width1 = src.Width();
- return;
- }
-
- if (src.X() < dst.X()) {
- left1 = src.X();
- width1 = dst.X() - src.X();
- }
-
- if (src.MaxX() > dst.MaxX()) {
- left2 = dst.MaxX();
- width2 = src.MaxX() - dst.MaxX();
- }
-}
-
-// alphaBlendPremultiplied and alphaBlendNonPremultiplied are separate methods,
-// even though they only differ by one line. This is done so that the compiler
-// can inline BlendSrcOverDstPremultiplied() and BlensSrcOverDstRaw() calls.
-// For GIF images, this optimization reduces decoding time by 15% for 3MB
-// images.
-void alphaBlendPremultiplied(blink::ImageFrame& src,
- blink::ImageFrame& dst,
- int canvasY,
- int left,
- int width) {
- for (int x = 0; x < width; ++x) {
- int canvasX = left + x;
- blink::ImageFrame::PixelData* pixel = src.GetAddr(canvasX, canvasY);
- if (SkGetPackedA32(*pixel) != 0xff) {
- blink::ImageFrame::PixelData prevPixel = *dst.GetAddr(canvasX, canvasY);
- blink::ImageFrame::BlendSrcOverDstPremultiplied(pixel, prevPixel);
- }
- }
-}
-
-void alphaBlendNonPremultiplied(blink::ImageFrame& src,
- blink::ImageFrame& dst,
- int canvasY,
- int left,
- int width) {
- for (int x = 0; x < width; ++x) {
- int canvasX = left + x;
- blink::ImageFrame::PixelData* pixel = src.GetAddr(canvasX, canvasY);
- if (SkGetPackedA32(*pixel) != 0xff) {
- blink::ImageFrame::PixelData prevPixel = *dst.GetAddr(canvasX, canvasY);
- blink::ImageFrame::BlendSrcOverDstRaw(pixel, prevPixel);
- }
- }
-}
-
-} // namespace
+#include <limits>
+#include "platform/image-decoders/SegmentStream.h"
+#include "platform/wtf/NotFound.h"
+#include "platform/wtf/PtrUtil.h"
+#include "third_party/skia/include/core/SkImageInfo.h"
namespace blink {
@@ -123,406 +40,281 @@ WEBPImageDecoder::WEBPImageDecoder(AlphaOption alpha_option,
const ColorBehavior& color_behavior,
size_t max_decoded_bytes)
: ImageDecoder(alpha_option, color_behavior, max_decoded_bytes),
- decoder_(0),
- format_flags_(0),
- frame_background_has_alpha_(false),
- demux_(0),
- demux_state_(WEBP_DEMUX_PARSING_HEADER),
- have_already_parsed_this_data_(false),
- repetition_count_(kAnimationLoopOnce),
- decoded_height_(0) {
- blend_function_ = (alpha_option == kAlphaPremultiplied)
- ? alphaBlendPremultiplied
- : alphaBlendNonPremultiplied;
-}
+ codec_(),
+ segment_stream_(nullptr) {}
-WEBPImageDecoder::~WEBPImageDecoder() {
- Clear();
-}
+WEBPImageDecoder::~WEBPImageDecoder() = default;
-void WEBPImageDecoder::Clear() {
- WebPDemuxDelete(demux_);
- demux_ = 0;
- consolidated_data_.reset();
- ClearDecoder();
-}
+void WEBPImageDecoder::OnSetData(SegmentReader* data) {
+ if (!data) {
+ if (segment_stream_)
+ segment_stream_->SetReader(nullptr);
+ return;
+ }
-void WEBPImageDecoder::ClearDecoder() {
- WebPIDelete(decoder_);
- decoder_ = 0;
- decoded_height_ = 0;
- frame_background_has_alpha_ = false;
-}
+ std::unique_ptr<SegmentStream> segment_stream;
+ if (!segment_stream_) {
+ segment_stream = base::MakeUnique<SegmentStream>();
+ segment_stream_ = segment_stream.get();
+ }
-void WEBPImageDecoder::OnSetData(SegmentReader*) {
- have_already_parsed_this_data_ = false;
+ segment_stream_->SetReader(std::move(data));
+
+ if (!codec_) {
+ SkCodec::Result codec_creation_result;
+ codec_ = SkCodec::MakeFromStream(std::move(segment_stream),
+ &codec_creation_result, nullptr);
+ switch (codec_creation_result) {
+ case SkCodec::kSuccess: {
+ // SkCodec::MakeFromStream will read enough of the image to get the
+ // image size.
+ SkImageInfo image_info = codec_->getInfo();
+ SetSize(image_info.width(), image_info.height());
+ return;
+ }
+ case SkCodec::kIncompleteInput:
+ // |segment_stream_|'s ownership is passed into MakeFromStream.
+ // It is deleted if MakeFromStream fails.
+ // If MakeFromStream fails, we set |segment_stream_| to null so
+ // we aren't pointing to reclaimed memory.
+ segment_stream_ = nullptr;
+ return;
+ default:
+ SetFailed();
+ return;
+ }
+ }
}
int WEBPImageDecoder::RepetitionCount() const {
- return Failed() ? kAnimationLoopOnce : repetition_count_;
+ if (!codec_ || segment_stream_->IsCleared())
+ return repetition_count_;
+
+ DCHECK(!Failed());
+
+ // SkCodec will parse forward in the file if the repetition count has not
+ // been seen yet.
+ int repetition_count = codec_->getRepetitionCount();
+
+ switch (repetition_count) {
+ case 0: {
+ // SkCodec returns 0 for both still images and animated images which
+ // only play once.
+ if (IsAllDataReceived() && codec_->getFrameCount() == 1) {
+ repetition_count_ = kAnimationNone;
+ break;
+ }
+
+ repetition_count_ = kAnimationLoopOnce;
+ break;
+ }
+ case SkCodec::kRepetitionCountInfinite:
+ repetition_count_ = kAnimationLoopInfinite;
+ break;
+ default:
+ repetition_count_ = repetition_count;
+ break;
+ }
+
+ return repetition_count_;
}
bool WEBPImageDecoder::FrameIsReceivedAtIndex(size_t index) const {
- if (!demux_ || demux_state_ <= WEBP_DEMUX_PARSING_HEADER)
+ SkCodec::FrameInfo frame_info;
+ if (!codec_ || !codec_->getFrameInfo(index, &frame_info))
return false;
- if (!(format_flags_ & ANIMATION_FLAG))
- return ImageDecoder::FrameIsReceivedAtIndex(index);
- bool frame_is_received_at_index = index < frame_buffer_cache_.size();
- return frame_is_received_at_index;
+ return frame_info.fFullyReceived;
}
float WEBPImageDecoder::FrameDurationAtIndex(size_t index) const {
- return index < frame_buffer_cache_.size()
- ? frame_buffer_cache_[index].Duration()
- : 0;
+ if (index < frame_buffer_cache_.size())
+ return frame_buffer_cache_[index].Duration();
+ return 0;
}
-bool WEBPImageDecoder::UpdateDemuxer() {
- if (Failed())
- return false;
-
- if (have_already_parsed_this_data_)
- return true;
-
- have_already_parsed_this_data_ = true;
-
- const unsigned kWebpHeaderSize = 30;
- if (data_->size() < kWebpHeaderSize)
- return false; // Await VP8X header so WebPDemuxPartial succeeds.
-
- WebPDemuxDelete(demux_);
- consolidated_data_ = data_->GetAsSkData();
- WebPData input_data = {
- reinterpret_cast<const uint8_t*>(consolidated_data_->data()),
- consolidated_data_->size()};
- demux_ = WebPDemuxPartial(&input_data, &demux_state_);
- if (!demux_ || (IsAllDataReceived() && demux_state_ != WEBP_DEMUX_DONE)) {
- if (!demux_)
- consolidated_data_.reset();
- return SetFailed();
- }
-
- DCHECK_GT(demux_state_, WEBP_DEMUX_PARSING_HEADER);
- if (!WebPDemuxGetI(demux_, WEBP_FF_FRAME_COUNT))
- return false; // Wait until the encoded image frame data arrives.
-
- if (!IsDecodedSizeAvailable()) {
- int width = WebPDemuxGetI(demux_, WEBP_FF_CANVAS_WIDTH);
- int height = WebPDemuxGetI(demux_, WEBP_FF_CANVAS_HEIGHT);
- if (!SetSize(width, height))
- return SetFailed();
-
- format_flags_ = WebPDemuxGetI(demux_, WEBP_FF_FORMAT_FLAGS);
- if (!(format_flags_ & ANIMATION_FLAG)) {
- repetition_count_ = kAnimationNone;
- } else {
- // Since we have parsed at least one frame, even if partially,
- // the global animation (ANIM) properties have been read since
- // an ANIM chunk must precede the ANMF frame chunks.
- repetition_count_ = WebPDemuxGetI(demux_, WEBP_FF_LOOP_COUNT);
- // Repetition count is always <= 16 bits.
- DCHECK_EQ(repetition_count_, repetition_count_ & 0xffff);
- if (!repetition_count_)
- repetition_count_ = kAnimationLoopInfinite;
- // FIXME: Implement ICC profile support for animated images.
- format_flags_ &= ~ICCP_FLAG;
- }
+bool WEBPImageDecoder::SetFailed() {
+ segment_stream_ = nullptr;
+ codec_.reset();
+ return ImageDecoder::SetFailed();
+}
- if ((format_flags_ & ICCP_FLAG) && !IgnoresColorSpace())
- ReadColorProfile();
+size_t WEBPImageDecoder::ClearCacheExceptFrame(size_t index) {
+ // SkCodec attempts to report the earliest possible required frame, but it is
+ // possible that frame has been evicted, while a later frame (which could also
+ // be used as the required frame) is still cached. Try to preserve a frame
+ // that is still cached.
+ if (frame_buffer_cache_.size() <= 1)
+ return 0;
+
+ size_t index2 = kNotFound;
+ if (index < frame_buffer_cache_.size()) {
+ const ImageFrame& frame = frame_buffer_cache_[index];
+ if (frame.RequiredPreviousFrameIndex() != kNotFound &&
+ !FrameStatusSufficientForSuccessors(index))
+ index2 = GetViableReferenceFrameIndex(index);
}
- DCHECK(IsDecodedSizeAvailable());
+ return ClearCacheExceptTwoFrames(index, index2);
+}
- size_t frame_count = WebPDemuxGetI(demux_, WEBP_FF_FRAME_COUNT);
- UpdateAggressivePurging(frame_count);
+size_t WEBPImageDecoder::DecodeFrameCount() {
+ if (!codec_ || segment_stream_->IsCleared())
+ return frame_buffer_cache_.size();
- return true;
+ return codec_->getFrameCount();
}
-void WEBPImageDecoder::OnInitFrameBuffer(size_t frame_index) {
- // ImageDecoder::InitFrameBuffer does a DCHECK if |frame_index| exists.
- ImageFrame& buffer = frame_buffer_cache_[frame_index];
-
- const size_t required_previous_frame_index =
- buffer.RequiredPreviousFrameIndex();
- if (required_previous_frame_index == kNotFound) {
- frame_background_has_alpha_ =
- !buffer.OriginalFrameRect().Contains(IntRect(IntPoint(), Size()));
+void WEBPImageDecoder::InitializeNewFrame(size_t index) {
+ DCHECK(codec_);
+
+ ImageFrame& frame = frame_buffer_cache_[index];
+ // SkCodec does not inform us if only a portion of the image was updated
+ // in the current frame. Because of this, rather than correctly filling in
+ // the frame rect, we set the frame rect to be the image's full size.
+ // The original frame rect is not used, anyway.
+ IntSize full_image_size = Size();
+ frame.SetOriginalFrameRect(IntRect(IntPoint(), full_image_size));
+
+ SkCodec::FrameInfo frame_info;
+ bool frame_info_received = codec_->getFrameInfo(index, &frame_info);
+ DCHECK(frame_info_received);
+ frame.SetDuration(frame_info.fDuration);
+ size_t required_previous_frame_index;
+ if (frame_info.fRequiredFrame == SkCodec::kNone) {
+ required_previous_frame_index = WTF::kNotFound;
} else {
- const ImageFrame& prev_buffer =
- frame_buffer_cache_[required_previous_frame_index];
- frame_background_has_alpha_ =
- prev_buffer.HasAlpha() || (prev_buffer.GetDisposalMethod() ==
- ImageFrame::kDisposeOverwriteBgcolor);
+ required_previous_frame_index =
+ static_cast<size_t>(frame_info.fRequiredFrame);
}
+ frame.SetRequiredPreviousFrameIndex(required_previous_frame_index);
- // The buffer is transparent outside the decoded area while the image is
- // loading. The correct alpha value for the frame will be set when it is fully
- // decoded.
- buffer.SetHasAlpha(true);
-}
-
-bool WEBPImageDecoder::CanReusePreviousFrameBuffer(size_t frame_index) const {
- DCHECK(frame_index < frame_buffer_cache_.size());
- return frame_buffer_cache_[frame_index].GetAlphaBlendSource() !=
- ImageFrame::kBlendAtopPreviousFrame;
-}
-
-void WEBPImageDecoder::ClearFrameBuffer(size_t frame_index) {
- if (demux_ && demux_state_ >= WEBP_DEMUX_PARSED_HEADER &&
- frame_buffer_cache_[frame_index].GetStatus() ==
- ImageFrame::kFramePartial) {
- // Clear the decoder state so that this partial frame can be decoded again
- // when requested.
- ClearDecoder();
+ ImageFrame::DisposalMethod disposal_method = ImageFrame::kDisposeNotSpecified;
+ switch (frame_info.fDisposalMethod) {
+ case SkCodecAnimation::DisposalMethod::kKeep:
+ disposal_method = ImageFrame::kDisposeKeep;
+ break;
+ case SkCodecAnimation::DisposalMethod::kRestoreBGColor:
+ disposal_method = ImageFrame::kDisposeOverwriteBgcolor;
+ break;
+ default:
+ break;
}
- ImageDecoder::ClearFrameBuffer(frame_index);
+ frame.SetDisposalMethod(disposal_method);
}
-void WEBPImageDecoder::ReadColorProfile() {
- WebPChunkIterator chunk_iterator;
- if (!WebPDemuxGetChunk(demux_, "ICCP", 1, &chunk_iterator)) {
- WebPDemuxReleaseChunkIterator(&chunk_iterator);
+void WEBPImageDecoder::Decode(size_t index) {
+ if (!codec_ || segment_stream_->IsCleared())
return;
- }
- const char* profile_data =
- reinterpret_cast<const char*>(chunk_iterator.chunk.bytes);
- size_t profile_size = chunk_iterator.chunk.size;
+ DCHECK(!Failed());
- SetEmbeddedColorProfile(profile_data, profile_size);
+ DCHECK_LT(index, frame_buffer_cache_.size());
- WebPDemuxReleaseChunkIterator(&chunk_iterator);
-}
+ UpdateAggressivePurging(index);
+ SkImageInfo image_info = codec_->getInfo()
+ .makeColorType(kN32_SkColorType)
+ .makeColorSpace(ColorSpaceForSkImages());
-void WEBPImageDecoder::ApplyPostProcessing(size_t frame_index) {
- ImageFrame& buffer = frame_buffer_cache_[frame_index];
- int width;
- int decoded_height;
- if (!WebPIDecGetRGB(decoder_, &decoded_height, &width, 0, 0))
- return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
- if (decoded_height <= 0)
- return;
+ SkCodec::Options options;
+ options.fFrameIndex = index;
+ options.fPriorFrame = SkCodec::kNone;
+ options.fZeroInitialized = SkCodec::kNo_ZeroInitialized;
- const IntRect& frame_rect = buffer.OriginalFrameRect();
- SECURITY_DCHECK(width == frame_rect.Width());
- SECURITY_DCHECK(decoded_height <= frame_rect.Height());
- const int left = frame_rect.X();
- const int top = frame_rect.Y();
-
- // TODO (msarett):
- // Here we apply the color space transformation to the dst space.
- // It does not really make sense to transform to a gamma-encoded
- // space and then immediately after, perform a linear premultiply
- // and linear blending. Can we find a way to perform the
- // premultiplication and blending in a linear space?
- SkColorSpaceXform* xform = ColorTransform();
- if (xform) {
- const SkColorSpaceXform::ColorFormat kSrcFormat =
- SkColorSpaceXform::kBGRA_8888_ColorFormat;
- const SkColorSpaceXform::ColorFormat kDstFormat =
- SkColorSpaceXform::kRGBA_8888_ColorFormat;
- for (int y = decoded_height_; y < decoded_height; ++y) {
- const int canvas_y = top + y;
- uint8_t* row = reinterpret_cast<uint8_t*>(buffer.GetAddr(left, canvas_y));
- xform->apply(kDstFormat, row, kSrcFormat, row, width,
- kUnpremul_SkAlphaType);
-
- uint8_t* pixel = row;
- for (int x = 0; x < width; ++x, pixel += 4) {
- const int canvas_x = left + x;
- buffer.SetRGBA(canvas_x, canvas_y, pixel[0], pixel[1], pixel[2],
- pixel[3]);
+ ImageFrame& frame = frame_buffer_cache_[index];
+ if (frame.GetStatus() == ImageFrame::kFrameEmpty) {
+ size_t required_previous_frame_index = frame.RequiredPreviousFrameIndex();
+ if (required_previous_frame_index == kNotFound) {
+ frame.AllocatePixelData(Size().Width(), Size().Height(),
+ ColorSpaceForSkImages());
+ frame.ZeroFillPixelData();
+ } else {
+ size_t previous_frame_index = GetViableReferenceFrameIndex(index);
+ if (previous_frame_index == kNotFound) {
+ previous_frame_index = required_previous_frame_index;
+ Decode(previous_frame_index);
}
- }
- }
- // During the decoding of the current frame, we may have set some pixels to be
- // transparent (i.e. alpha < 255). If the alpha blend source was
- // 'BlendAtopPreviousFrame', the values of these pixels should be determined
- // by blending them against the pixels of the corresponding previous frame.
- // Compute the correct opaque values now.
- // FIXME: This could be avoided if libwebp decoder had an API that used the
- // previous required frame to do the alpha-blending by itself.
- if ((format_flags_ & ANIMATION_FLAG) && frame_index &&
- buffer.GetAlphaBlendSource() == ImageFrame::kBlendAtopPreviousFrame &&
- buffer.RequiredPreviousFrameIndex() != kNotFound) {
- ImageFrame& prev_buffer = frame_buffer_cache_[frame_index - 1];
- DCHECK_EQ(prev_buffer.GetStatus(), ImageFrame::kFrameComplete);
- ImageFrame::DisposalMethod prev_disposal_method =
- prev_buffer.GetDisposalMethod();
- if (prev_disposal_method == ImageFrame::kDisposeKeep) {
- // Blend transparent pixels with pixels in previous canvas.
- for (int y = decoded_height_; y < decoded_height; ++y) {
- blend_function_(buffer, prev_buffer, top + y, left, width);
- }
- } else if (prev_disposal_method == ImageFrame::kDisposeOverwriteBgcolor) {
- const IntRect& prev_rect = prev_buffer.OriginalFrameRect();
- // We need to blend a transparent pixel with the starting value (from just
- // after the InitFrame() call). If the pixel belongs to prev_rect, the
- // starting value was fully transparent, so this is a no-op. Otherwise, we
- // need to blend against the pixel from the previous canvas.
- for (int y = decoded_height_; y < decoded_height; ++y) {
- int canvas_y = top + y;
- int left1, width1, left2, width2;
- findBlendRangeAtRow(frame_rect, prev_rect, canvas_y, left1, width1,
- left2, width2);
- if (width1 > 0)
- blend_function_(buffer, prev_buffer, canvas_y, left1, width1);
- if (width2 > 0)
- blend_function_(buffer, prev_buffer, canvas_y, left2, width2);
+ // We try to reuse |previous_frame| as starting state to avoid copying.
+ // If CanReusePreviousFrameBuffer returns false, we must copy the data
+ // since |previous_frame| is necessary to decode this or later frames.
+ // In that case copy the data instead.
+ ImageFrame& previous_frame = frame_buffer_cache_[previous_frame_index];
+ if (!frame.TakeBitmapDataIfWritable(&previous_frame) &&
+ !frame.CopyBitmapData(previous_frame)) {
+ SetFailed();
+ return;
}
+ options.fPriorFrame = previous_frame_index;
}
}
- decoded_height_ = decoded_height;
- buffer.SetPixelsChanged(true);
-}
-
-size_t WEBPImageDecoder::DecodeFrameCount() {
- // If UpdateDemuxer() fails, return the existing number of frames. This way
- // if we get halfway through the image before decoding fails, we won't
- // suddenly start reporting that the image has zero frames.
- return UpdateDemuxer() ? WebPDemuxGetI(demux_, WEBP_FF_FRAME_COUNT)
- : frame_buffer_cache_.size();
-}
-
-void WEBPImageDecoder::InitializeNewFrame(size_t index) {
- if (!(format_flags_ & ANIMATION_FLAG)) {
- DCHECK(!index);
- return;
+ if (frame.GetStatus() == ImageFrame::kFrameAllocated) {
+ SkCodec::Result start_incremental_decode_result =
+ codec_->startIncrementalDecode(image_info, frame.Bitmap().getPixels(),
+ frame.Bitmap().rowBytes(), &options);
+ switch (start_incremental_decode_result) {
+ case SkCodec::kSuccess:
+ break;
+ case SkCodec::kIncompleteInput:
+ return;
+ default:
+ SetFailed();
+ return;
+ }
+ frame.SetStatus(ImageFrame::kFramePartial);
}
- WebPIterator animated_frame;
- WebPDemuxGetFrame(demux_, index + 1, &animated_frame);
- DCHECK_EQ(animated_frame.complete, 1);
- ImageFrame* buffer = &frame_buffer_cache_[index];
- IntRect frame_rect(animated_frame.x_offset, animated_frame.y_offset,
- animated_frame.width, animated_frame.height);
- buffer->SetOriginalFrameRect(
- Intersection(frame_rect, IntRect(IntPoint(), Size())));
- buffer->SetDuration(animated_frame.duration);
- buffer->SetDisposalMethod(animated_frame.dispose_method ==
- WEBP_MUX_DISPOSE_BACKGROUND
- ? ImageFrame::kDisposeOverwriteBgcolor
- : ImageFrame::kDisposeKeep);
- buffer->SetAlphaBlendSource(animated_frame.blend_method == WEBP_MUX_BLEND
- ? ImageFrame::kBlendAtopPreviousFrame
- : ImageFrame::kBlendAtopBgcolor);
- buffer->SetRequiredPreviousFrameIndex(
- FindRequiredPreviousFrame(index, !animated_frame.has_alpha));
- WebPDemuxReleaseIterator(&animated_frame);
-}
-void WEBPImageDecoder::Decode(size_t index) {
- if (Failed())
- return;
-
- Vector<size_t> frames_to_decode = FindFramesToDecode(index);
-
- DCHECK(demux_);
- for (auto i = frames_to_decode.rbegin(); i != frames_to_decode.rend(); ++i) {
- if ((format_flags_ & ANIMATION_FLAG) && !InitFrameBuffer(*i)) {
- SetFailed();
- return;
+ SkCodec::Result incremental_decode_result = codec_->incrementalDecode();
+ switch (incremental_decode_result) {
+ case SkCodec::kSuccess: {
+ SkCodec::FrameInfo frame_info;
+ bool frame_info_received = codec_->getFrameInfo(index, &frame_info);
+ DCHECK(frame_info_received);
+ frame.SetHasAlpha(!SkAlphaTypeIsOpaque(frame_info.fAlphaType));
+ frame.SetPixelsChanged(true);
+ frame.SetStatus(ImageFrame::kFrameComplete);
+ PostDecodeProcessing(index);
+ break;
}
-
- WebPIterator webp_frame;
- if (!WebPDemuxGetFrame(demux_, *i + 1, &webp_frame)) {
+ case SkCodec::kIncompleteInput:
+ frame.SetPixelsChanged(true);
+ if (FrameIsReceivedAtIndex(index) || IsAllDataReceived()) {
+ SetFailed();
+ }
+ break;
+ default:
SetFailed();
- } else {
- DecodeSingleFrame(webp_frame.fragment.bytes, webp_frame.fragment.size,
- *i);
- WebPDemuxReleaseIterator(&webp_frame);
- }
- if (Failed())
return;
-
- // If this returns false, we need more data to continue decoding.
- if (!PostDecodeProcessing(*i))
- break;
}
-
- // It is also a fatal error if all data is received and we have decoded all
- // frames available but the file is truncated.
- if (index >= frame_buffer_cache_.size() - 1 && IsAllDataReceived() &&
- demux_ && demux_state_ != WEBP_DEMUX_DONE)
- SetFailed();
}
-bool WEBPImageDecoder::DecodeSingleFrame(const uint8_t* data_bytes,
- size_t data_size,
- size_t frame_index) {
- if (Failed())
- return false;
-
- DCHECK(IsDecodedSizeAvailable());
-
- DCHECK_GT(frame_buffer_cache_.size(), frame_index);
- ImageFrame& buffer = frame_buffer_cache_[frame_index];
- DCHECK_NE(buffer.GetStatus(), ImageFrame::kFrameComplete);
-
- if (buffer.GetStatus() == ImageFrame::kFrameEmpty) {
- if (!buffer.AllocatePixelData(Size().Width(), Size().Height(),
- ColorSpaceForSkImages()))
- return SetFailed();
- buffer.ZeroFillPixelData();
- buffer.SetStatus(ImageFrame::kFramePartial);
- // The buffer is transparent outside the decoded area while the image is
- // loading. The correct alpha value for the frame will be set when it is
- // fully decoded.
- buffer.SetHasAlpha(true);
- buffer.SetOriginalFrameRect(IntRect(IntPoint(), Size()));
- }
-
- const IntRect& frame_rect = buffer.OriginalFrameRect();
- if (!decoder_) {
- WEBP_CSP_MODE mode = outputMode(format_flags_ & ALPHA_FLAG);
- if (!premultiply_alpha_)
- mode = outputMode(false);
- if (ColorTransform()) {
- // Swizzling between RGBA and BGRA is zero cost in a color transform.
- // So when we have a color transform, we should decode to whatever is
- // easiest for libwebp, and then let the color transform swizzle if
- // necessary.
- // Lossy webp is encoded as YUV (so RGBA and BGRA are the same cost).
- // Lossless webp is encoded as BGRA. This means decoding to BGRA is
- // either faster or the same cost as RGBA.
- mode = MODE_BGRA;
+size_t WEBPImageDecoder::GetViableReferenceFrameIndex(
+ size_t dependent_index) const {
+ DCHECK_LT(dependent_index, frame_buffer_cache_.size());
+
+ size_t required_previous_frame_index =
+ frame_buffer_cache_[dependent_index].RequiredPreviousFrameIndex();
+
+ // Any frame in the range [|required_previous_frame_index|, |dependent_index|)
+ // can be provided as the prior frame to SkCodec.
+ //
+ // SkCodec sets SkCodec::FrameInfo::fRequiredFrame to the earliest frame which
+ // can be used. This might come up when several frames update the same
+ // subregion. If the same subregion is about to be overwritten, it doesn't
+ // matter which frame in that chain is provided.
+ DCHECK_NE(required_previous_frame_index, kNotFound);
+ // Loop backwards because the frames most likely to be in cache are the most
+ // recent.
+ for (size_t i = dependent_index - 1; i != required_previous_frame_index;
+ i--) {
+ const ImageFrame& frame = frame_buffer_cache_[i];
+
+ if (frame.GetStatus() == ImageFrame::kFrameComplete) {
+ return i;
}
- WebPInitDecBuffer(&decoder_buffer_);
- decoder_buffer_.colorspace = mode;
- decoder_buffer_.u.RGBA.stride =
- Size().Width() * sizeof(ImageFrame::PixelData);
- decoder_buffer_.u.RGBA.size =
- decoder_buffer_.u.RGBA.stride * frame_rect.Height();
- decoder_buffer_.is_external_memory = 1;
- decoder_ = WebPINewDecoder(&decoder_buffer_);
- if (!decoder_)
- return SetFailed();
}
- decoder_buffer_.u.RGBA.rgba = reinterpret_cast<uint8_t*>(
- buffer.GetAddr(frame_rect.X(), frame_rect.Y()));
-
- switch (WebPIUpdate(decoder_, data_bytes, data_size)) {
- case VP8_STATUS_OK:
- ApplyPostProcessing(frame_index);
- buffer.SetHasAlpha((format_flags_ & ALPHA_FLAG) ||
- frame_background_has_alpha_);
- buffer.SetStatus(ImageFrame::kFrameComplete);
- ClearDecoder();
- return true;
- case VP8_STATUS_SUSPENDED:
- if (!IsAllDataReceived() && !FrameIsReceivedAtIndex(frame_index)) {
- ApplyPostProcessing(frame_index);
- return false;
- }
- // FALLTHROUGH
- default:
- Clear();
- return SetFailed();
- }
+ return kNotFound;
}
} // namespace blink

Powered by Google App Engine
This is Rietveld 408576698