OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2011 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 #include <assert.h> |
| 11 #include <stdlib.h> |
| 12 #include "vpx_config.h" |
| 13 #include "vp9/encoder/vp9_lookahead.h" |
| 14 #include "vp9/common/vp9_extend.h" |
| 15 |
| 16 #define MAX_LAG_BUFFERS 25 |
| 17 |
| 18 struct lookahead_ctx { |
| 19 unsigned int max_sz; /* Absolute size of the queue */ |
| 20 unsigned int sz; /* Number of buffers currently in the queue */ |
| 21 unsigned int read_idx; /* Read index */ |
| 22 unsigned int write_idx; /* Write index */ |
| 23 struct lookahead_entry *buf; /* Buffer list */ |
| 24 }; |
| 25 |
| 26 |
| 27 /* Return the buffer at the given absolute index and increment the index */ |
| 28 static struct lookahead_entry * |
| 29 pop(struct lookahead_ctx *ctx, |
| 30 unsigned int *idx) { |
| 31 unsigned int index = *idx; |
| 32 struct lookahead_entry *buf = ctx->buf + index; |
| 33 |
| 34 assert(index < ctx->max_sz); |
| 35 if (++index >= ctx->max_sz) |
| 36 index -= ctx->max_sz; |
| 37 *idx = index; |
| 38 return buf; |
| 39 } |
| 40 |
| 41 |
| 42 void |
| 43 vp9_lookahead_destroy(struct lookahead_ctx *ctx) { |
| 44 if (ctx) { |
| 45 if (ctx->buf) { |
| 46 unsigned int i; |
| 47 |
| 48 for (i = 0; i < ctx->max_sz; i++) |
| 49 vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img); |
| 50 free(ctx->buf); |
| 51 } |
| 52 free(ctx); |
| 53 } |
| 54 } |
| 55 |
| 56 |
| 57 struct lookahead_ctx * |
| 58 vp9_lookahead_init(unsigned int width, |
| 59 unsigned int height, |
| 60 unsigned int depth) { |
| 61 struct lookahead_ctx *ctx = NULL; |
| 62 |
| 63 /* Clamp the lookahead queue depth */ |
| 64 if (depth < 1) |
| 65 depth = 1; |
| 66 else if (depth > MAX_LAG_BUFFERS) |
| 67 depth = MAX_LAG_BUFFERS; |
| 68 |
| 69 /* Align the buffer dimensions */ |
| 70 width = (width + 15) &~15; |
| 71 height = (height + 15) &~15; |
| 72 |
| 73 /* Allocate the lookahead structures */ |
| 74 ctx = calloc(1, sizeof(*ctx)); |
| 75 if (ctx) { |
| 76 unsigned int i; |
| 77 ctx->max_sz = depth; |
| 78 ctx->buf = calloc(depth, sizeof(*ctx->buf)); |
| 79 if (!ctx->buf) |
| 80 goto bail; |
| 81 for (i = 0; i < depth; i++) |
| 82 if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img, |
| 83 width, height, VP9BORDERINPIXELS)) |
| 84 goto bail; |
| 85 } |
| 86 return ctx; |
| 87 bail: |
| 88 vp9_lookahead_destroy(ctx); |
| 89 return NULL; |
| 90 } |
| 91 |
| 92 |
| 93 int |
| 94 vp9_lookahead_push(struct lookahead_ctx *ctx, |
| 95 YV12_BUFFER_CONFIG *src, |
| 96 int64_t ts_start, |
| 97 int64_t ts_end, |
| 98 unsigned int flags, |
| 99 unsigned char *active_map) { |
| 100 struct lookahead_entry *buf; |
| 101 int row, col, active_end; |
| 102 int mb_rows = (src->y_height + 15) >> 4; |
| 103 int mb_cols = (src->y_width + 15) >> 4; |
| 104 |
| 105 if (ctx->sz + 1 > ctx->max_sz) |
| 106 return 1; |
| 107 ctx->sz++; |
| 108 buf = pop(ctx, &ctx->write_idx); |
| 109 |
| 110 // Only do this partial copy if the following conditions are all met: |
| 111 // 1. Lookahead queue has has size of 1. |
| 112 // 2. Active map is provided. |
| 113 // 3. This is not a key frame, golden nor altref frame. |
| 114 if (ctx->max_sz == 1 && active_map && !flags) { |
| 115 for (row = 0; row < mb_rows; ++row) { |
| 116 col = 0; |
| 117 |
| 118 while (1) { |
| 119 // Find the first active macroblock in this row. |
| 120 for (; col < mb_cols; ++col) { |
| 121 if (active_map[col]) |
| 122 break; |
| 123 } |
| 124 |
| 125 // No more active macroblock in this row. |
| 126 if (col == mb_cols) |
| 127 break; |
| 128 |
| 129 // Find the end of active region in this row. |
| 130 active_end = col; |
| 131 |
| 132 for (; active_end < mb_cols; ++active_end) { |
| 133 if (!active_map[active_end]) |
| 134 break; |
| 135 } |
| 136 |
| 137 // Only copy this active region. |
| 138 vp9_copy_and_extend_frame_with_rect(src, &buf->img, |
| 139 row << 4, |
| 140 col << 4, 16, |
| 141 (active_end - col) << 4); |
| 142 |
| 143 // Start again from the end of this active region. |
| 144 col = active_end; |
| 145 } |
| 146 |
| 147 active_map += mb_cols; |
| 148 } |
| 149 } else { |
| 150 vp9_copy_and_extend_frame(src, &buf->img); |
| 151 } |
| 152 buf->ts_start = ts_start; |
| 153 buf->ts_end = ts_end; |
| 154 buf->flags = flags; |
| 155 return 0; |
| 156 } |
| 157 |
| 158 |
| 159 struct lookahead_entry * |
| 160 vp9_lookahead_pop(struct lookahead_ctx *ctx, |
| 161 int drain) { |
| 162 struct lookahead_entry *buf = NULL; |
| 163 |
| 164 if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) { |
| 165 buf = pop(ctx, &ctx->read_idx); |
| 166 ctx->sz--; |
| 167 } |
| 168 return buf; |
| 169 } |
| 170 |
| 171 |
| 172 struct lookahead_entry * |
| 173 vp9_lookahead_peek(struct lookahead_ctx *ctx, |
| 174 int index) { |
| 175 struct lookahead_entry *buf = NULL; |
| 176 |
| 177 assert(index < ctx->max_sz); |
| 178 if (index < (int)ctx->sz) { |
| 179 index += ctx->read_idx; |
| 180 if (index >= (int)ctx->max_sz) |
| 181 index -= ctx->max_sz; |
| 182 buf = ctx->buf + index; |
| 183 } |
| 184 return buf; |
| 185 } |
| 186 |
| 187 |
| 188 unsigned int |
| 189 vp9_lookahead_depth(struct lookahead_ctx *ctx) { |
| 190 return ctx->sz; |
| 191 } |
OLD | NEW |