OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 Google Inc. All Rights Reserved. |
| 2 // |
| 3 // Use of this source code is governed by a BSD-style license |
| 4 // that can be found in the COPYING file in the root of the source |
| 5 // tree. An additional intellectual property rights grant can be found |
| 6 // in the file PATENTS. All contributing project authors may |
| 7 // be found in the AUTHORS file in the root of the source tree. |
| 8 // ----------------------------------------------------------------------------- |
| 9 // |
| 10 // AnimDecoder implementation. |
| 11 // |
| 12 |
| 13 #ifdef HAVE_CONFIG_H |
| 14 #include "../webp/config.h" |
| 15 #endif |
| 16 |
| 17 #include <assert.h> |
| 18 #include <string.h> |
| 19 |
| 20 #include "../utils/utils.h" |
| 21 #include "../webp/decode.h" |
| 22 #include "../webp/demux.h" |
| 23 |
| 24 #define NUM_CHANNELS 4 |
| 25 |
| 26 typedef void (*BlendRowFunc)(uint32_t* const, const uint32_t* const, int); |
| 27 static void BlendPixelRowNonPremult(uint32_t* const src, |
| 28 const uint32_t* const dst, int num_pixels); |
| 29 static void BlendPixelRowPremult(uint32_t* const src, const uint32_t* const dst, |
| 30 int num_pixels); |
| 31 |
| 32 struct WebPAnimDecoder { |
| 33 WebPDemuxer* demux_; // Demuxer created from given WebP bitstream. |
| 34 WebPDecoderConfig config_; // Decoder config. |
| 35 // Note: we use a pointer to a function blending multiple pixels at a time to |
| 36 // allow possible inlining of per-pixel blending function. |
| 37 BlendRowFunc blend_func_; // Pointer to the chose blend row function. |
| 38 WebPAnimInfo info_; // Global info about the animation. |
| 39 uint8_t* curr_frame_; // Current canvas (not disposed). |
| 40 uint8_t* prev_frame_disposed_; // Previous canvas (properly disposed). |
| 41 int prev_frame_timestamp_; // Previous frame timestamp (milliseconds). |
| 42 WebPIterator prev_iter_; // Iterator object for previous frame. |
| 43 int prev_frame_was_keyframe_; // True if previous frame was a keyframe. |
| 44 int next_frame_; // Index of the next frame to be decoded |
| 45 // (starting from 1). |
| 46 }; |
| 47 |
| 48 static void DefaultDecoderOptions(WebPAnimDecoderOptions* const dec_options) { |
| 49 dec_options->color_mode = MODE_RGBA; |
| 50 dec_options->use_threads = 0; |
| 51 } |
| 52 |
| 53 int WebPAnimDecoderOptionsInitInternal(WebPAnimDecoderOptions* dec_options, |
| 54 int abi_version) { |
| 55 if (dec_options == NULL || |
| 56 WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_DEMUX_ABI_VERSION)) { |
| 57 return 0; |
| 58 } |
| 59 DefaultDecoderOptions(dec_options); |
| 60 return 1; |
| 61 } |
| 62 |
| 63 static int ApplyDecoderOptions(const WebPAnimDecoderOptions* const dec_options, |
| 64 WebPAnimDecoder* const dec) { |
| 65 WEBP_CSP_MODE mode; |
| 66 WebPDecoderConfig* config = &dec->config_; |
| 67 assert(dec_options != NULL); |
| 68 |
| 69 mode = dec_options->color_mode; |
| 70 if (mode != MODE_RGBA && mode != MODE_BGRA && |
| 71 mode != MODE_rgbA && mode != MODE_bgrA) { |
| 72 return 0; |
| 73 } |
| 74 dec->blend_func_ = (mode == MODE_RGBA || mode == MODE_BGRA) |
| 75 ? &BlendPixelRowNonPremult |
| 76 : &BlendPixelRowPremult; |
| 77 WebPInitDecoderConfig(config); |
| 78 config->output.colorspace = mode; |
| 79 config->output.is_external_memory = 1; |
| 80 config->options.use_threads = dec_options->use_threads; |
| 81 // Note: config->output.u.RGBA is set at the time of decoding each frame. |
| 82 return 1; |
| 83 } |
| 84 |
| 85 WebPAnimDecoder* WebPAnimDecoderNewInternal( |
| 86 const WebPData* webp_data, const WebPAnimDecoderOptions* dec_options, |
| 87 int abi_version) { |
| 88 WebPAnimDecoderOptions options; |
| 89 WebPAnimDecoder* dec = NULL; |
| 90 if (webp_data == NULL || |
| 91 WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_DEMUX_ABI_VERSION)) { |
| 92 return NULL; |
| 93 } |
| 94 |
| 95 // Note: calloc() so that the pointer members are initialized to NULL. |
| 96 dec = (WebPAnimDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec)); |
| 97 if (dec == NULL) goto Error; |
| 98 |
| 99 if (dec_options != NULL) { |
| 100 options = *dec_options; |
| 101 } else { |
| 102 DefaultDecoderOptions(&options); |
| 103 } |
| 104 if (!ApplyDecoderOptions(&options, dec)) goto Error; |
| 105 |
| 106 dec->demux_ = WebPDemux(webp_data); |
| 107 if (dec->demux_ == NULL) goto Error; |
| 108 |
| 109 dec->info_.canvas_width = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_WIDTH); |
| 110 dec->info_.canvas_height = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_HEIGHT); |
| 111 dec->info_.loop_count = WebPDemuxGetI(dec->demux_, WEBP_FF_LOOP_COUNT); |
| 112 dec->info_.bgcolor = WebPDemuxGetI(dec->demux_, WEBP_FF_BACKGROUND_COLOR); |
| 113 dec->info_.frame_count = WebPDemuxGetI(dec->demux_, WEBP_FF_FRAME_COUNT); |
| 114 |
| 115 { |
| 116 const int canvas_bytes = |
| 117 dec->info_.canvas_width * NUM_CHANNELS * dec->info_.canvas_height; |
| 118 // Note: calloc() because we fill frame with zeroes as well. |
| 119 dec->curr_frame_ = WebPSafeCalloc(1ULL, canvas_bytes); |
| 120 if (dec->curr_frame_ == NULL) goto Error; |
| 121 dec->prev_frame_disposed_ = WebPSafeCalloc(1ULL, canvas_bytes); |
| 122 if (dec->prev_frame_disposed_ == NULL) goto Error; |
| 123 } |
| 124 |
| 125 WebPAnimDecoderReset(dec); |
| 126 |
| 127 return dec; |
| 128 |
| 129 Error: |
| 130 WebPAnimDecoderDelete(dec); |
| 131 return NULL; |
| 132 } |
| 133 |
| 134 int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, WebPAnimInfo* info) { |
| 135 if (dec == NULL || info == NULL) return 0; |
| 136 *info = dec->info_; |
| 137 return 1; |
| 138 } |
| 139 |
| 140 // Returns true if the frame covers the full canvas. |
| 141 static int IsFullFrame(int width, int height, int canvas_width, |
| 142 int canvas_height) { |
| 143 return (width == canvas_width && height == canvas_height); |
| 144 } |
| 145 |
| 146 // Clear the canvas to transparent. |
| 147 static void ZeroFillCanvas(uint8_t* buf, uint32_t canvas_width, |
| 148 uint32_t canvas_height) { |
| 149 memset(buf, 0, canvas_width * NUM_CHANNELS * canvas_height); |
| 150 } |
| 151 |
| 152 // Clear given frame rectangle to transparent. |
| 153 static void ZeroFillFrameRect(uint8_t* buf, int buf_stride, int x_offset, |
| 154 int y_offset, int width, int height) { |
| 155 int j; |
| 156 assert(width * NUM_CHANNELS <= buf_stride); |
| 157 buf += y_offset * buf_stride + x_offset * NUM_CHANNELS; |
| 158 for (j = 0; j < height; ++j) { |
| 159 memset(buf, 0, width * NUM_CHANNELS); |
| 160 buf += buf_stride; |
| 161 } |
| 162 } |
| 163 |
| 164 // Copy width * height pixels from 'src' to 'dst'. |
| 165 static void CopyCanvas(const uint8_t* src, uint8_t* dst, |
| 166 uint32_t width, uint32_t height) { |
| 167 assert(src != NULL && dst != NULL); |
| 168 memcpy(dst, src, width * NUM_CHANNELS * height); |
| 169 } |
| 170 |
| 171 // Returns true if the current frame is a key-frame. |
| 172 static int IsKeyFrame(const WebPIterator* const curr, |
| 173 const WebPIterator* const prev, |
| 174 int prev_frame_was_key_frame, |
| 175 int canvas_width, int canvas_height) { |
| 176 if (curr->frame_num == 1) { |
| 177 return 1; |
| 178 } else if ((!curr->has_alpha || curr->blend_method == WEBP_MUX_NO_BLEND) && |
| 179 IsFullFrame(curr->width, curr->height, |
| 180 canvas_width, canvas_height)) { |
| 181 return 1; |
| 182 } else { |
| 183 return (prev->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) && |
| 184 (IsFullFrame(prev->width, prev->height, canvas_width, |
| 185 canvas_height) || |
| 186 prev_frame_was_key_frame); |
| 187 } |
| 188 } |
| 189 |
| 190 |
| 191 // Blend a single channel of 'src' over 'dst', given their alpha channel values. |
| 192 // 'src' and 'dst' are assumed to be NOT pre-multiplied by alpha. |
| 193 static uint8_t BlendChannelNonPremult(uint32_t src, uint8_t src_a, |
| 194 uint32_t dst, uint8_t dst_a, |
| 195 uint32_t scale, int shift) { |
| 196 const uint8_t src_channel = (src >> shift) & 0xff; |
| 197 const uint8_t dst_channel = (dst >> shift) & 0xff; |
| 198 const uint32_t blend_unscaled = src_channel * src_a + dst_channel * dst_a; |
| 199 assert(blend_unscaled < (1ULL << 32) / scale); |
| 200 return (blend_unscaled * scale) >> 24; |
| 201 } |
| 202 |
| 203 // Blend 'src' over 'dst' assuming they are NOT pre-multiplied by alpha. |
| 204 static uint32_t BlendPixelNonPremult(uint32_t src, uint32_t dst) { |
| 205 const uint8_t src_a = (src >> 24) & 0xff; |
| 206 |
| 207 if (src_a == 0) { |
| 208 return dst; |
| 209 } else { |
| 210 const uint8_t dst_a = (dst >> 24) & 0xff; |
| 211 // This is the approximate integer arithmetic for the actual formula: |
| 212 // dst_factor_a = (dst_a * (255 - src_a)) / 255. |
| 213 const uint8_t dst_factor_a = (dst_a * (256 - src_a)) >> 8; |
| 214 const uint8_t blend_a = src_a + dst_factor_a; |
| 215 const uint32_t scale = (1UL << 24) / blend_a; |
| 216 |
| 217 const uint8_t blend_r = |
| 218 BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 0); |
| 219 const uint8_t blend_g = |
| 220 BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 8); |
| 221 const uint8_t blend_b = |
| 222 BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 16); |
| 223 assert(src_a + dst_factor_a < 256); |
| 224 |
| 225 return (blend_r << 0) | |
| 226 (blend_g << 8) | |
| 227 (blend_b << 16) | |
| 228 ((uint32_t)blend_a << 24); |
| 229 } |
| 230 } |
| 231 |
| 232 // Blend 'num_pixels' in 'src' over 'dst' assuming they are NOT pre-multiplied |
| 233 // by alpha. |
| 234 static void BlendPixelRowNonPremult(uint32_t* const src, |
| 235 const uint32_t* const dst, int num_pixels) { |
| 236 int i; |
| 237 for (i = 0; i < num_pixels; ++i) { |
| 238 const uint8_t src_alpha = (src[i] >> 24) & 0xff; |
| 239 if (src_alpha != 0xff) { |
| 240 src[i] = BlendPixelNonPremult(src[i], dst[i]); |
| 241 } |
| 242 } |
| 243 } |
| 244 |
| 245 // Individually multiply each channel in 'pix' by 'scale'. |
| 246 static WEBP_INLINE uint32_t ChannelwiseMultiply(uint32_t pix, uint32_t scale) { |
| 247 uint32_t mask = 0x00FF00FF; |
| 248 uint32_t rb = ((pix & mask) * scale) >> 8; |
| 249 uint32_t ag = ((pix >> 8) & mask) * scale; |
| 250 return (rb & mask) | (ag & ~mask); |
| 251 } |
| 252 |
| 253 // Blend 'src' over 'dst' assuming they are pre-multiplied by alpha. |
| 254 static uint32_t BlendPixelPremult(uint32_t src, uint32_t dst) { |
| 255 const uint8_t src_a = (src >> 24) & 0xff; |
| 256 return src + ChannelwiseMultiply(dst, 256 - src_a); |
| 257 } |
| 258 |
| 259 // Blend 'num_pixels' in 'src' over 'dst' assuming they are pre-multiplied by |
| 260 // alpha. |
| 261 static void BlendPixelRowPremult(uint32_t* const src, const uint32_t* const dst, |
| 262 int num_pixels) { |
| 263 int i; |
| 264 for (i = 0; i < num_pixels; ++i) { |
| 265 const uint8_t src_alpha = (src[i] >> 24) & 0xff; |
| 266 if (src_alpha != 0xff) { |
| 267 src[i] = BlendPixelPremult(src[i], dst[i]); |
| 268 } |
| 269 } |
| 270 } |
| 271 |
| 272 // Returns two ranges (<left, width> pairs) at row 'canvas_y', that belong to |
| 273 // 'src' but not 'dst'. A point range is empty if the corresponding width is 0. |
| 274 static void FindBlendRangeAtRow(const WebPIterator* const src, |
| 275 const WebPIterator* const dst, int canvas_y, |
| 276 int* const left1, int* const width1, |
| 277 int* const left2, int* const width2) { |
| 278 const int src_max_x = src->x_offset + src->width; |
| 279 const int dst_max_x = dst->x_offset + dst->width; |
| 280 const int dst_max_y = dst->y_offset + dst->height; |
| 281 assert(canvas_y >= src->y_offset && canvas_y < (src->y_offset + src->height)); |
| 282 *left1 = -1; |
| 283 *width1 = 0; |
| 284 *left2 = -1; |
| 285 *width2 = 0; |
| 286 |
| 287 if (canvas_y < dst->y_offset || canvas_y >= dst_max_y || |
| 288 src->x_offset >= dst_max_x || src_max_x <= dst->x_offset) { |
| 289 *left1 = src->x_offset; |
| 290 *width1 = src->width; |
| 291 return; |
| 292 } |
| 293 |
| 294 if (src->x_offset < dst->x_offset) { |
| 295 *left1 = src->x_offset; |
| 296 *width1 = dst->x_offset - src->x_offset; |
| 297 } |
| 298 |
| 299 if (src_max_x > dst_max_x) { |
| 300 *left2 = dst_max_x; |
| 301 *width2 = src_max_x - dst_max_x; |
| 302 } |
| 303 } |
| 304 |
| 305 int WebPAnimDecoderGetNext(WebPAnimDecoder* dec, |
| 306 uint8_t** buf_ptr, int* timestamp_ptr) { |
| 307 WebPIterator iter; |
| 308 uint32_t width; |
| 309 uint32_t height; |
| 310 int is_key_frame; |
| 311 int timestamp; |
| 312 BlendRowFunc blend_row; |
| 313 |
| 314 if (dec == NULL || buf_ptr == NULL || timestamp_ptr == NULL) return 0; |
| 315 if (!WebPAnimDecoderHasMoreFrames(dec)) return 0; |
| 316 |
| 317 width = dec->info_.canvas_width; |
| 318 height = dec->info_.canvas_height; |
| 319 blend_row = dec->blend_func_; |
| 320 |
| 321 // Get compressed frame. |
| 322 if (!WebPDemuxGetFrame(dec->demux_, dec->next_frame_, &iter)) { |
| 323 return 0; |
| 324 } |
| 325 timestamp = dec->prev_frame_timestamp_ + iter.duration; |
| 326 |
| 327 // Initialize. |
| 328 is_key_frame = IsKeyFrame(&iter, &dec->prev_iter_, |
| 329 dec->prev_frame_was_keyframe_, width, height); |
| 330 if (is_key_frame) { |
| 331 ZeroFillCanvas(dec->curr_frame_, width, height); |
| 332 } else { |
| 333 CopyCanvas(dec->prev_frame_disposed_, dec->curr_frame_, width, height); |
| 334 } |
| 335 |
| 336 // Decode. |
| 337 { |
| 338 const uint8_t* in = iter.fragment.bytes; |
| 339 const size_t in_size = iter.fragment.size; |
| 340 const size_t out_offset = |
| 341 (iter.y_offset * width + iter.x_offset) * NUM_CHANNELS; |
| 342 WebPDecoderConfig* const config = &dec->config_; |
| 343 WebPRGBABuffer* const buf = &config->output.u.RGBA; |
| 344 buf->stride = NUM_CHANNELS * width; |
| 345 buf->size = buf->stride * iter.height; |
| 346 buf->rgba = dec->curr_frame_ + out_offset; |
| 347 |
| 348 if (WebPDecode(in, in_size, config) != VP8_STATUS_OK) { |
| 349 goto Error; |
| 350 } |
| 351 } |
| 352 |
| 353 // During the decoding of current frame, we may have set some pixels to be |
| 354 // transparent (i.e. alpha < 255). However, the value of each of these |
| 355 // pixels should have been determined by blending it against the value of |
| 356 // that pixel in the previous frame if blending method of is WEBP_MUX_BLEND. |
| 357 if (iter.frame_num > 1 && iter.blend_method == WEBP_MUX_BLEND && |
| 358 !is_key_frame) { |
| 359 if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_NONE) { |
| 360 int y; |
| 361 // Blend transparent pixels with pixels in previous canvas. |
| 362 for (y = 0; y < iter.height; ++y) { |
| 363 const size_t offset = |
| 364 (iter.y_offset + y) * width + iter.x_offset; |
| 365 blend_row((uint32_t*)dec->curr_frame_ + offset, |
| 366 (uint32_t*)dec->prev_frame_disposed_ + offset, iter.width); |
| 367 } |
| 368 } else { |
| 369 int y; |
| 370 assert(dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND); |
| 371 // We need to blend a transparent pixel with its value just after |
| 372 // initialization. That is, blend it with: |
| 373 // * Fully transparent pixel if it belongs to prevRect <-- No-op. |
| 374 // * The pixel in the previous canvas otherwise <-- Need alpha-blending. |
| 375 for (y = 0; y < iter.height; ++y) { |
| 376 const int canvas_y = iter.y_offset + y; |
| 377 int left1, width1, left2, width2; |
| 378 FindBlendRangeAtRow(&iter, &dec->prev_iter_, canvas_y, &left1, &width1, |
| 379 &left2, &width2); |
| 380 if (width1 > 0) { |
| 381 const size_t offset1 = canvas_y * width + left1; |
| 382 blend_row((uint32_t*)dec->curr_frame_ + offset1, |
| 383 (uint32_t*)dec->prev_frame_disposed_ + offset1, width1); |
| 384 } |
| 385 if (width2 > 0) { |
| 386 const size_t offset2 = canvas_y * width + left2; |
| 387 blend_row((uint32_t*)dec->curr_frame_ + offset2, |
| 388 (uint32_t*)dec->prev_frame_disposed_ + offset2, width2); |
| 389 } |
| 390 } |
| 391 } |
| 392 } |
| 393 |
| 394 // Update info of the previous frame and dispose it for the next iteration. |
| 395 dec->prev_frame_timestamp_ = timestamp; |
| 396 dec->prev_iter_ = iter; |
| 397 dec->prev_frame_was_keyframe_ = is_key_frame; |
| 398 CopyCanvas(dec->curr_frame_, dec->prev_frame_disposed_, width, height); |
| 399 if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) { |
| 400 ZeroFillFrameRect(dec->prev_frame_disposed_, width * NUM_CHANNELS, |
| 401 dec->prev_iter_.x_offset, dec->prev_iter_.y_offset, |
| 402 dec->prev_iter_.width, dec->prev_iter_.height); |
| 403 } |
| 404 ++dec->next_frame_; |
| 405 |
| 406 // All OK, fill in the values. |
| 407 *buf_ptr = dec->curr_frame_; |
| 408 *timestamp_ptr = timestamp; |
| 409 return 1; |
| 410 |
| 411 Error: |
| 412 WebPDemuxReleaseIterator(&iter); |
| 413 return 0; |
| 414 } |
| 415 |
| 416 int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec) { |
| 417 if (dec == NULL) return 0; |
| 418 return (dec->next_frame_ <= (int)dec->info_.frame_count); |
| 419 } |
| 420 |
| 421 void WebPAnimDecoderReset(WebPAnimDecoder* dec) { |
| 422 if (dec != NULL) { |
| 423 dec->prev_frame_timestamp_ = 0; |
| 424 memset(&dec->prev_iter_, 0, sizeof(dec->prev_iter_)); |
| 425 dec->prev_frame_was_keyframe_ = 0; |
| 426 dec->next_frame_ = 1; |
| 427 } |
| 428 } |
| 429 |
| 430 const WebPDemuxer* WebPAnimDecoderGetDemuxer(const WebPAnimDecoder* dec) { |
| 431 if (dec == NULL) return NULL; |
| 432 return dec->demux_; |
| 433 } |
| 434 |
| 435 void WebPAnimDecoderDelete(WebPAnimDecoder* dec) { |
| 436 if (dec != NULL) { |
| 437 WebPDemuxDelete(dec->demux_); |
| 438 WebPSafeFree(dec->curr_frame_); |
| 439 WebPSafeFree(dec->prev_frame_disposed_); |
| 440 WebPSafeFree(dec); |
| 441 } |
| 442 } |
OLD | NEW |