| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "./vpx_config.h" | 11 #include "./vpx_config.h" |
| 12 #include "vp9/common/vp9_loopfilter.h" | 12 #include "vp9/common/vp9_loopfilter.h" |
| 13 #include "vp9/common/vp9_onyxc_int.h" | 13 #include "vp9/common/vp9_onyxc_int.h" |
| 14 #include "vp9/common/vp9_reconinter.h" | 14 #include "vp9/common/vp9_reconinter.h" |
| 15 #include "vpx_mem/vpx_mem.h" | 15 #include "vpx_mem/vpx_mem.h" |
| 16 | 16 |
| 17 #include "vp9/common/vp9_seg_common.h" | 17 #include "vp9/common/vp9_seg_common.h" |
| 18 | 18 |
| 19 // 64 bit masks for left transform size. Each 1 represents a position where | 19 // 64 bit masks for left transform size. Each 1 represents a position where |
| 20 // we should apply a loop filter across the left border of an 8x8 block | 20 // we should apply a loop filter across the left border of an 8x8 block |
| 21 // boundary. | 21 // boundary. |
| 22 // | 22 // |
| 23 // In the case of TX_16X16-> ( in low order byte first we end up with | 23 // In the case of TX_16X16-> ( in low order byte first we end up with |
| 24 // a mask that looks like this | 24 // a mask that looks like this |
| 25 // | 25 // |
| 26 // 10101010 | 26 // 10101010 |
| 27 // 10101010 | 27 // 10101010 |
| 28 // 10101010 | 28 // 10101010 |
| 29 // 10101010 | 29 // 10101010 |
| 30 // 10101010 | 30 // 10101010 |
| 31 // 10101010 | 31 // 10101010 |
| 32 // 10101010 | 32 // 10101010 |
| 33 // 10101010 | 33 // 10101010 |
| 34 // | 34 // |
| 35 // A loopfilter should be applied to every other 8x8 horizontally. | 35 // A loopfilter should be applied to every other 8x8 horizontally. |
| 36 static const uint64_t left_64x64_txform_mask[TX_SIZES]= { | 36 static const uint64_t left_64x64_txform_mask[TX_SIZES]= { |
| 37 0xffffffffffffffff, // TX_4X4 | 37 0xffffffffffffffff, // TX_4X4 |
| 38 0xffffffffffffffff, // TX_8x8 | 38 0xffffffffffffffff, // TX_8x8 |
| 39 0x5555555555555555, // TX_16x16 | 39 0x5555555555555555, // TX_16x16 |
| 40 0x1111111111111111, // TX_32x32 | 40 0x1111111111111111, // TX_32x32 |
| 41 }; | 41 }; |
| 42 | 42 |
| 43 // 64 bit masks for above transform size. Each 1 represents a position where | 43 // 64 bit masks for above transform size. Each 1 represents a position where |
| 44 // we should apply a loop filter across the top border of an 8x8 block | 44 // we should apply a loop filter across the top border of an 8x8 block |
| 45 // boundary. | 45 // boundary. |
| 46 // | 46 // |
| 47 // In the case of TX_32x32 -> ( in low order byte first we end up with | 47 // In the case of TX_32x32 -> ( in low order byte first we end up with |
| 48 // a mask that looks like this | 48 // a mask that looks like this |
| 49 // | 49 // |
| 50 // 11111111 | 50 // 11111111 |
| 51 // 00000000 | 51 // 00000000 |
| 52 // 00000000 | 52 // 00000000 |
| 53 // 00000000 | 53 // 00000000 |
| 54 // 11111111 | 54 // 11111111 |
| 55 // 00000000 | 55 // 00000000 |
| 56 // 00000000 | 56 // 00000000 |
| 57 // 00000000 | 57 // 00000000 |
| 58 // | 58 // |
| 59 // A loopfilter should be applied to every other 4 the row vertically. | 59 // A loopfilter should be applied to every other 4 the row vertically. |
| 60 static const uint64_t above_64x64_txform_mask[TX_SIZES]= { | 60 static const uint64_t above_64x64_txform_mask[TX_SIZES]= { |
| 61 0xffffffffffffffff, // TX_4X4 | 61 0xffffffffffffffff, // TX_4X4 |
| 62 0xffffffffffffffff, // TX_8x8 | 62 0xffffffffffffffff, // TX_8x8 |
| 63 0x00ff00ff00ff00ff, // TX_16x16 | 63 0x00ff00ff00ff00ff, // TX_16x16 |
| 64 0x000000ff000000ff, // TX_32x32 | 64 0x000000ff000000ff, // TX_32x32 |
| 65 }; | 65 }; |
| 66 | 66 |
| 67 // 64 bit masks for prediction sizes (left). Each 1 represents a position | 67 // 64 bit masks for prediction sizes (left). Each 1 represents a position |
| 68 // where left border of an 8x8 block. These are aligned to the right most | 68 // where left border of an 8x8 block. These are aligned to the right most |
| 69 // appropriate bit, and then shifted into place. | 69 // appropriate bit, and then shifted into place. |
| 70 // | 70 // |
| 71 // In the case of TX_16x32 -> ( low order byte first ) we end up with | 71 // In the case of TX_16x32 -> ( low order byte first ) we end up with |
| 72 // a mask that looks like this : | 72 // a mask that looks like this : |
| 73 // | 73 // |
| 74 // 10000000 | 74 // 10000000 |
| 75 // 10000000 | 75 // 10000000 |
| 76 // 10000000 | 76 // 10000000 |
| 77 // 10000000 | 77 // 10000000 |
| 78 // 00000000 | 78 // 00000000 |
| 79 // 00000000 | 79 // 00000000 |
| 80 // 00000000 | 80 // 00000000 |
| 81 // 00000000 | 81 // 00000000 |
| 82 static const uint64_t left_prediction_mask[BLOCK_SIZES] = { | 82 static const uint64_t left_prediction_mask[BLOCK_SIZES] = { |
| 83 0x0000000000000001, // BLOCK_4X4, | 83 0x0000000000000001, // BLOCK_4X4, |
| 84 0x0000000000000001, // BLOCK_4X8, | 84 0x0000000000000001, // BLOCK_4X8, |
| 85 0x0000000000000001, // BLOCK_8X4, | 85 0x0000000000000001, // BLOCK_8X4, |
| 86 0x0000000000000001, // BLOCK_8X8, | 86 0x0000000000000001, // BLOCK_8X8, |
| 87 0x0000000000000101, // BLOCK_8X16, | 87 0x0000000000000101, // BLOCK_8X16, |
| 88 0x0000000000000001, // BLOCK_16X8, | 88 0x0000000000000001, // BLOCK_16X8, |
| 89 0x0000000000000101, // BLOCK_16X16, | 89 0x0000000000000101, // BLOCK_16X16, |
| 90 0x0000000001010101, // BLOCK_16X32, | 90 0x0000000001010101, // BLOCK_16X32, |
| 91 0x0000000000000101, // BLOCK_32X16, | 91 0x0000000000000101, // BLOCK_32X16, |
| 92 0x0000000001010101, // BLOCK_32X32, | 92 0x0000000001010101, // BLOCK_32X32, |
| 93 0x0101010101010101, // BLOCK_32X64, | 93 0x0101010101010101, // BLOCK_32X64, |
| 94 0x0000000001010101, // BLOCK_64X32, | 94 0x0000000001010101, // BLOCK_64X32, |
| 95 0x0101010101010101, // BLOCK_64X64 | 95 0x0101010101010101, // BLOCK_64X64 |
| 96 }; | 96 }; |
| 97 | 97 |
| 98 // 64 bit mask to shift and set for each prediction size. | 98 // 64 bit mask to shift and set for each prediction size. |
| 99 static const uint64_t above_prediction_mask[BLOCK_SIZES] = { | 99 static const uint64_t above_prediction_mask[BLOCK_SIZES] = { |
| 100 0x0000000000000001, // BLOCK_4X4 | 100 0x0000000000000001, // BLOCK_4X4 |
| 101 0x0000000000000001, // BLOCK_4X8 | 101 0x0000000000000001, // BLOCK_4X8 |
| 102 0x0000000000000001, // BLOCK_8X4 | 102 0x0000000000000001, // BLOCK_8X4 |
| 103 0x0000000000000001, // BLOCK_8X8 | 103 0x0000000000000001, // BLOCK_8X8 |
| 104 0x0000000000000001, // BLOCK_8X16, | 104 0x0000000000000001, // BLOCK_8X16, |
| 105 0x0000000000000003, // BLOCK_16X8 | 105 0x0000000000000003, // BLOCK_16X8 |
| 106 0x0000000000000003, // BLOCK_16X16 | 106 0x0000000000000003, // BLOCK_16X16 |
| 107 0x0000000000000003, // BLOCK_16X32, | 107 0x0000000000000003, // BLOCK_16X32, |
| 108 0x000000000000000f, // BLOCK_32X16, | 108 0x000000000000000f, // BLOCK_32X16, |
| 109 0x000000000000000f, // BLOCK_32X32, | 109 0x000000000000000f, // BLOCK_32X32, |
| 110 0x000000000000000f, // BLOCK_32X64, | 110 0x000000000000000f, // BLOCK_32X64, |
| 111 0x00000000000000ff, // BLOCK_64X32, | 111 0x00000000000000ff, // BLOCK_64X32, |
| 112 0x00000000000000ff, // BLOCK_64X64 | 112 0x00000000000000ff, // BLOCK_64X64 |
| 113 }; | 113 }; |
| 114 // 64 bit mask to shift and set for each prediction size. A bit is set for | 114 // 64 bit mask to shift and set for each prediction size. A bit is set for |
| 115 // each 8x8 block that would be in the left most block of the given block | 115 // each 8x8 block that would be in the left most block of the given block |
| 116 // size in the 64x64 block. | 116 // size in the 64x64 block. |
| 117 static const uint64_t size_mask[BLOCK_SIZES] = { | 117 static const uint64_t size_mask[BLOCK_SIZES] = { |
| 118 0x0000000000000001, // BLOCK_4X4 | 118 0x0000000000000001, // BLOCK_4X4 |
| 119 0x0000000000000001, // BLOCK_4X8 | 119 0x0000000000000001, // BLOCK_4X8 |
| 120 0x0000000000000001, // BLOCK_8X4 | 120 0x0000000000000001, // BLOCK_8X4 |
| 121 0x0000000000000001, // BLOCK_8X8 | 121 0x0000000000000001, // BLOCK_8X8 |
| 122 0x0000000000000101, // BLOCK_8X16, | 122 0x0000000000000101, // BLOCK_8X16, |
| 123 0x0000000000000003, // BLOCK_16X8 | 123 0x0000000000000003, // BLOCK_16X8 |
| 124 0x0000000000000303, // BLOCK_16X16 | 124 0x0000000000000303, // BLOCK_16X16 |
| 125 0x0000000003030303, // BLOCK_16X32, | 125 0x0000000003030303, // BLOCK_16X32, |
| 126 0x0000000000000f0f, // BLOCK_32X16, | 126 0x0000000000000f0f, // BLOCK_32X16, |
| 127 0x000000000f0f0f0f, // BLOCK_32X32, | 127 0x000000000f0f0f0f, // BLOCK_32X32, |
| 128 0x0f0f0f0f0f0f0f0f, // BLOCK_32X64, | 128 0x0f0f0f0f0f0f0f0f, // BLOCK_32X64, |
| 129 0x00000000ffffffff, // BLOCK_64X32, | 129 0x00000000ffffffff, // BLOCK_64X32, |
| 130 0xffffffffffffffff, // BLOCK_64X64 | 130 0xffffffffffffffff, // BLOCK_64X64 |
| 131 }; | 131 }; |
| 132 | 132 |
| 133 // These are used for masking the left and above borders. | 133 // These are used for masking the left and above borders. |
| 134 static const uint64_t left_border = 0x1111111111111111; | 134 static const uint64_t left_border = 0x1111111111111111; |
| 135 static const uint64_t above_border = 0x000000ff000000ff; | 135 static const uint64_t above_border = 0x000000ff000000ff; |
| 136 | 136 |
| 137 // 16 bit masks for uv transform sizes. | 137 // 16 bit masks for uv transform sizes. |
| 138 static const uint16_t left_64x64_txform_mask_uv[TX_SIZES]= { | 138 static const uint16_t left_64x64_txform_mask_uv[TX_SIZES]= { |
| 139 0xffff, // TX_4X4 | 139 0xffff, // TX_4X4 |
| 140 0xffff, // TX_8x8 | 140 0xffff, // TX_8x8 |
| 141 0x5555, // TX_16x16 | 141 0x5555, // TX_16x16 |
| 142 0x1111, // TX_32x32 | 142 0x1111, // TX_32x32 |
| 143 }; | 143 }; |
| 144 | 144 |
| 145 static const uint16_t above_64x64_txform_mask_uv[TX_SIZES]= { | 145 static const uint16_t above_64x64_txform_mask_uv[TX_SIZES]= { |
| 146 0xffff, // TX_4X4 | 146 0xffff, // TX_4X4 |
| 147 0xffff, // TX_8x8 | 147 0xffff, // TX_8x8 |
| 148 0x0f0f, // TX_16x16 | 148 0x0f0f, // TX_16x16 |
| 149 0x000f, // TX_32x32 | 149 0x000f, // TX_32x32 |
| 150 }; | 150 }; |
| 151 | 151 |
| 152 // 16 bit left mask to shift and set for each uv prediction size. | 152 // 16 bit left mask to shift and set for each uv prediction size. |
| 153 static const uint16_t left_prediction_mask_uv[BLOCK_SIZES] = { | 153 static const uint16_t left_prediction_mask_uv[BLOCK_SIZES] = { |
| 154 0x0001, // BLOCK_4X4, | 154 0x0001, // BLOCK_4X4, |
| 155 0x0001, // BLOCK_4X8, | 155 0x0001, // BLOCK_4X8, |
| 156 0x0001, // BLOCK_8X4, | 156 0x0001, // BLOCK_8X4, |
| 157 0x0001, // BLOCK_8X8, | 157 0x0001, // BLOCK_8X8, |
| 158 0x0001, // BLOCK_8X16, | 158 0x0001, // BLOCK_8X16, |
| 159 0x0001, // BLOCK_16X8, | 159 0x0001, // BLOCK_16X8, |
| 160 0x0001, // BLOCK_16X16, | 160 0x0001, // BLOCK_16X16, |
| 161 0x0011, // BLOCK_16X32, | 161 0x0011, // BLOCK_16X32, |
| 162 0x0001, // BLOCK_32X16, | 162 0x0001, // BLOCK_32X16, |
| 163 0x0011, // BLOCK_32X32, | 163 0x0011, // BLOCK_32X32, |
| 164 0x1111, // BLOCK_32X64 | 164 0x1111, // BLOCK_32X64 |
| 165 0x0011, // BLOCK_64X32, | 165 0x0011, // BLOCK_64X32, |
| 166 0x1111, // BLOCK_64X64 | 166 0x1111, // BLOCK_64X64 |
| 167 }; | 167 }; |
| 168 // 16 bit above mask to shift and set for uv each prediction size. | 168 // 16 bit above mask to shift and set for uv each prediction size. |
| 169 static const uint16_t above_prediction_mask_uv[BLOCK_SIZES] = { | 169 static const uint16_t above_prediction_mask_uv[BLOCK_SIZES] = { |
| 170 0x0001, // BLOCK_4X4 | 170 0x0001, // BLOCK_4X4 |
| 171 0x0001, // BLOCK_4X8 | 171 0x0001, // BLOCK_4X8 |
| 172 0x0001, // BLOCK_8X4 | 172 0x0001, // BLOCK_8X4 |
| 173 0x0001, // BLOCK_8X8 | 173 0x0001, // BLOCK_8X8 |
| 174 0x0001, // BLOCK_8X16, | 174 0x0001, // BLOCK_8X16, |
| 175 0x0001, // BLOCK_16X8 | 175 0x0001, // BLOCK_16X8 |
| 176 0x0001, // BLOCK_16X16 | 176 0x0001, // BLOCK_16X16 |
| 177 0x0001, // BLOCK_16X32, | 177 0x0001, // BLOCK_16X32, |
| 178 0x0003, // BLOCK_32X16, | 178 0x0003, // BLOCK_32X16, |
| 179 0x0003, // BLOCK_32X32, | 179 0x0003, // BLOCK_32X32, |
| 180 0x0003, // BLOCK_32X64, | 180 0x0003, // BLOCK_32X64, |
| 181 0x000f, // BLOCK_64X32, | 181 0x000f, // BLOCK_64X32, |
| 182 0x000f, // BLOCK_64X64 | 182 0x000f, // BLOCK_64X64 |
| 183 }; | 183 }; |
| 184 | 184 |
| 185 // 64 bit mask to shift and set for each uv prediction size | 185 // 64 bit mask to shift and set for each uv prediction size |
| 186 static const uint16_t size_mask_uv[BLOCK_SIZES] = { | 186 static const uint16_t size_mask_uv[BLOCK_SIZES] = { |
| 187 0x0001, // BLOCK_4X4 | 187 0x0001, // BLOCK_4X4 |
| 188 0x0001, // BLOCK_4X8 | 188 0x0001, // BLOCK_4X8 |
| 189 0x0001, // BLOCK_8X4 | 189 0x0001, // BLOCK_8X4 |
| 190 0x0001, // BLOCK_8X8 | 190 0x0001, // BLOCK_8X8 |
| 191 0x0001, // BLOCK_8X16, | 191 0x0001, // BLOCK_8X16, |
| 192 0x0001, // BLOCK_16X8 | 192 0x0001, // BLOCK_16X8 |
| 193 0x0001, // BLOCK_16X16 | 193 0x0001, // BLOCK_16X16 |
| 194 0x0011, // BLOCK_16X32, | 194 0x0011, // BLOCK_16X32, |
| 195 0x0003, // BLOCK_32X16, | 195 0x0003, // BLOCK_32X16, |
| 196 0x0033, // BLOCK_32X32, | 196 0x0033, // BLOCK_32X32, |
| 197 0x3333, // BLOCK_32X64, | 197 0x3333, // BLOCK_32X64, |
| 198 0x00ff, // BLOCK_64X32, | 198 0x00ff, // BLOCK_64X32, |
| 199 0xffff, // BLOCK_64X64 | 199 0xffff, // BLOCK_64X64 |
| 200 }; | 200 }; |
| 201 static const uint16_t left_border_uv = 0x1111; | 201 static const uint16_t left_border_uv = 0x1111; |
| 202 static const uint16_t above_border_uv = 0x000f; | 202 static const uint16_t above_border_uv = 0x000f; |
| 203 | 203 |
| 204 static const int mode_lf_lut[MB_MODE_COUNT] = { | 204 static const int mode_lf_lut[MB_MODE_COUNT] = { |
| 205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // INTRA_MODES | 205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // INTRA_MODES |
| 206 1, 1, 0, 1 // INTER_MODES (ZEROMV == 0) | 206 1, 1, 0, 1 // INTER_MODES (ZEROMV == 0) |
| 207 }; | 207 }; |
| 208 | 208 |
| 209 static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) { | 209 static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) { |
| 210 int lvl; | 210 int lvl; |
| 211 | 211 |
| 212 // For each possible value for the loop filter fill out limits | 212 // For each possible value for the loop filter fill out limits |
| 213 for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) { | 213 for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) { |
| 214 // Set loop filter paramaeters that control sharpness. | 214 // Set loop filter parameters that control sharpness. |
| 215 int block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4)); | 215 int block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4)); |
| 216 | 216 |
| 217 if (sharpness_lvl > 0) { | 217 if (sharpness_lvl > 0) { |
| 218 if (block_inside_limit > (9 - sharpness_lvl)) | 218 if (block_inside_limit > (9 - sharpness_lvl)) |
| 219 block_inside_limit = (9 - sharpness_lvl); | 219 block_inside_limit = (9 - sharpness_lvl); |
| 220 } | 220 } |
| 221 | 221 |
| 222 if (block_inside_limit < 1) | 222 if (block_inside_limit < 1) |
| 223 block_inside_limit = 1; | 223 block_inside_limit = 1; |
| 224 | 224 |
| (...skipping 18 matching lines...) Expand all Loading... |
| 243 update_sharpness(lfi, lf->sharpness_level); | 243 update_sharpness(lfi, lf->sharpness_level); |
| 244 lf->last_sharpness_level = lf->sharpness_level; | 244 lf->last_sharpness_level = lf->sharpness_level; |
| 245 | 245 |
| 246 // init hev threshold const vectors | 246 // init hev threshold const vectors |
| 247 for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) | 247 for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) |
| 248 vpx_memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH); | 248 vpx_memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH); |
| 249 } | 249 } |
| 250 | 250 |
| 251 void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) { | 251 void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) { |
| 252 int seg_id; | 252 int seg_id; |
| 253 // n_shift is the a multiplier for lf_deltas | 253 // n_shift is the multiplier for lf_deltas |
| 254 // the multiplier is 1 for when filter_lvl is between 0 and 31; | 254 // the multiplier is 1 for when filter_lvl is between 0 and 31; |
| 255 // 2 when filter_lvl is between 32 and 63 | 255 // 2 when filter_lvl is between 32 and 63 |
| 256 const int scale = 1 << (default_filt_lvl >> 5); | 256 const int scale = 1 << (default_filt_lvl >> 5); |
| 257 loop_filter_info_n *const lfi = &cm->lf_info; | 257 loop_filter_info_n *const lfi = &cm->lf_info; |
| 258 struct loopfilter *const lf = &cm->lf; | 258 struct loopfilter *const lf = &cm->lf; |
| 259 const struct segmentation *const seg = &cm->seg; | 259 const struct segmentation *const seg = &cm->seg; |
| 260 | 260 |
| 261 // update limits if sharpness has changed | 261 // update limits if sharpness has changed |
| 262 if (lf->last_sharpness_level != lf->sharpness_level) { | 262 if (lf->last_sharpness_level != lf->sharpness_level) { |
| 263 update_sharpness(lfi, lf->sharpness_level); | 263 update_sharpness(lfi, lf->sharpness_level); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 309 unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff; | 309 unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff; |
| 310 unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff; | 310 unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff; |
| 311 unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff; | 311 unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff; |
| 312 unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff; | 312 unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff; |
| 313 unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff; | 313 unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff; |
| 314 unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff; | 314 unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff; |
| 315 unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff; | 315 unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff; |
| 316 unsigned int mask; | 316 unsigned int mask; |
| 317 | 317 |
| 318 for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 | | 318 for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 | |
| 319 mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1; | 319 mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1; |
| 320 mask; mask >>= 1) { | 320 mask; mask >>= 1) { |
| 321 const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl; | 321 const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl; |
| 322 const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward); | 322 const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward); |
| 323 | 323 |
| 324 // TODO(yunqingwang): count in loopfilter functions should be removed. | 324 // TODO(yunqingwang): count in loopfilter functions should be removed. |
| 325 if (mask & 1) { | 325 if (mask & 1) { |
| 326 if ((mask_16x16_0 | mask_16x16_1) & 1) { | 326 if ((mask_16x16_0 | mask_16x16_1) & 1) { |
| 327 if ((mask_16x16_0 & mask_16x16_1) & 1) { | 327 if ((mask_16x16_0 & mask_16x16_1) & 1) { |
| 328 vp9_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim, | 328 vp9_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim, |
| 329 lfi0->hev_thr); | 329 lfi0->hev_thr); |
| 330 } else if (mask_16x16_0 & 1) { | 330 } else if (mask_16x16_0 & 1) { |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 482 s += 8 * count; | 482 s += 8 * count; |
| 483 lfl += count; | 483 lfl += count; |
| 484 mask_16x16 >>= count; | 484 mask_16x16 >>= count; |
| 485 mask_8x8 >>= count; | 485 mask_8x8 >>= count; |
| 486 mask_4x4 >>= count; | 486 mask_4x4 >>= count; |
| 487 mask_4x4_int >>= count; | 487 mask_4x4_int >>= count; |
| 488 } | 488 } |
| 489 } | 489 } |
| 490 | 490 |
| 491 // This function ors into the current lfm structure, where to do loop | 491 // This function ors into the current lfm structure, where to do loop |
| 492 // filters for the specific mi we are looking at. It uses information | 492 // filters for the specific mi we are looking at. It uses information |
| 493 // including the block_size_type (32x16, 32x32, etc), the transform size, | 493 // including the block_size_type (32x16, 32x32, etc.), the transform size, |
| 494 // whether there were any coefficients encoded, and the loop filter strength | 494 // whether there were any coefficients encoded, and the loop filter strength |
| 495 // block we are currently looking at. Shift is used to position the | 495 // block we are currently looking at. Shift is used to position the |
| 496 // 1's we produce. | 496 // 1's we produce. |
| 497 // TODO(JBB) Need another function for different resolution color.. | 497 // TODO(JBB) Need another function for different resolution color.. |
| 498 static void build_masks(const loop_filter_info_n *const lfi_n, | 498 static void build_masks(const loop_filter_info_n *const lfi_n, |
| 499 const MODE_INFO *mi, const int shift_y, | 499 const MODE_INFO *mi, const int shift_y, |
| 500 const int shift_uv, | 500 const int shift_uv, |
| 501 LOOP_FILTER_MASK *lfm) { | 501 LOOP_FILTER_MASK *lfm) { |
| 502 const MB_MODE_INFO *mbmi = &mi->mbmi; | 502 const MB_MODE_INFO *mbmi = &mi->mbmi; |
| 503 const BLOCK_SIZE block_size = mbmi->sb_type; | 503 const BLOCK_SIZE block_size = mbmi->sb_type; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 519 const int w = num_8x8_blocks_wide_lookup[block_size]; | 519 const int w = num_8x8_blocks_wide_lookup[block_size]; |
| 520 const int h = num_8x8_blocks_high_lookup[block_size]; | 520 const int h = num_8x8_blocks_high_lookup[block_size]; |
| 521 int index = shift_y; | 521 int index = shift_y; |
| 522 for (i = 0; i < h; i++) { | 522 for (i = 0; i < h; i++) { |
| 523 vpx_memset(&lfm->lfl_y[index], filter_level, w); | 523 vpx_memset(&lfm->lfl_y[index], filter_level, w); |
| 524 index += 8; | 524 index += 8; |
| 525 } | 525 } |
| 526 } | 526 } |
| 527 | 527 |
| 528 // These set 1 in the current block size for the block size edges. | 528 // These set 1 in the current block size for the block size edges. |
| 529 // For instance if the block size is 32x16, we'll set : | 529 // For instance if the block size is 32x16, we'll set: |
| 530 // above = 1111 | 530 // above = 1111 |
| 531 // 0000 | 531 // 0000 |
| 532 // and | 532 // and |
| 533 // left = 1000 | 533 // left = 1000 |
| 534 // = 1000 | 534 // = 1000 |
| 535 // NOTE : In this example the low bit is left most ( 1000 ) is stored as | 535 // NOTE : In this example the low bit is left most ( 1000 ) is stored as |
| 536 // 1, not 8... | 536 // 1, not 8... |
| 537 // | 537 // |
| 538 // U and v set things on a 16 bit scale. | 538 // U and V set things on a 16 bit scale. |
| 539 // | 539 // |
| 540 *above_y |= above_prediction_mask[block_size] << shift_y; | 540 *above_y |= above_prediction_mask[block_size] << shift_y; |
| 541 *above_uv |= above_prediction_mask_uv[block_size] << shift_uv; | 541 *above_uv |= above_prediction_mask_uv[block_size] << shift_uv; |
| 542 *left_y |= left_prediction_mask[block_size] << shift_y; | 542 *left_y |= left_prediction_mask[block_size] << shift_y; |
| 543 *left_uv |= left_prediction_mask_uv[block_size] << shift_uv; | 543 *left_uv |= left_prediction_mask_uv[block_size] << shift_uv; |
| 544 | 544 |
| 545 // If the block has no coefficients and is not intra we skip applying | 545 // If the block has no coefficients and is not intra we skip applying |
| 546 // the loop filter on block edges. | 546 // the loop filter on block edges. |
| 547 if (mbmi->skip && is_inter_block(mbmi)) | 547 if (mbmi->skip && is_inter_block(mbmi)) |
| 548 return; | 548 return; |
| 549 | 549 |
| 550 // Here we are adding a mask for the transform size. The transform | 550 // Here we are adding a mask for the transform size. The transform |
| 551 // size mask is set to be correct for a 64x64 prediction block size. We | 551 // size mask is set to be correct for a 64x64 prediction block size. We |
| 552 // mask to match the size of the block we are working on and then shift it | 552 // mask to match the size of the block we are working on and then shift it |
| 553 // into place.. | 553 // into place.. |
| 554 *above_y |= (size_mask[block_size] & | 554 *above_y |= (size_mask[block_size] & |
| 555 above_64x64_txform_mask[tx_size_y]) << shift_y; | 555 above_64x64_txform_mask[tx_size_y]) << shift_y; |
| 556 *above_uv |= (size_mask_uv[block_size] & | 556 *above_uv |= (size_mask_uv[block_size] & |
| 557 above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; | 557 above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; |
| 558 | 558 |
| 559 *left_y |= (size_mask[block_size] & | 559 *left_y |= (size_mask[block_size] & |
| 560 left_64x64_txform_mask[tx_size_y]) << shift_y; | 560 left_64x64_txform_mask[tx_size_y]) << shift_y; |
| 561 *left_uv |= (size_mask_uv[block_size] & | 561 *left_uv |= (size_mask_uv[block_size] & |
| 562 left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; | 562 left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; |
| 563 | 563 |
| 564 // Here we are trying to determine what to do with the internal 4x4 block | 564 // Here we are trying to determine what to do with the internal 4x4 block |
| 565 // boundaries. These differ from the 4x4 boundaries on the outside edge of | 565 // boundaries. These differ from the 4x4 boundaries on the outside edge of |
| 566 // an 8x8 in that the internal ones can be skipped and don't depend on | 566 // an 8x8 in that the internal ones can be skipped and don't depend on |
| 567 // the prediction block size. | 567 // the prediction block size. |
| 568 if (tx_size_y == TX_4X4) | 568 if (tx_size_y == TX_4X4) |
| 569 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y; | 569 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y; |
| 570 | 570 |
| 571 if (tx_size_uv == TX_4X4) | 571 if (tx_size_uv == TX_4X4) |
| 572 *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv; | 572 *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv; |
| 573 } | 573 } |
| 574 | 574 |
| 575 // This function does the same thing as the one above with the exception that | 575 // This function does the same thing as the one above with the exception that |
| 576 // it only affects the y masks. It exists because for blocks < 16x16 in size, | 576 // it only affects the y masks. It exists because for blocks < 16x16 in size, |
| 577 // we only update u and v masks on the first block. | 577 // we only update u and v masks on the first block. |
| 578 static void build_y_mask(const loop_filter_info_n *const lfi_n, | 578 static void build_y_mask(const loop_filter_info_n *const lfi_n, |
| 579 const MODE_INFO *mi, const int shift_y, | 579 const MODE_INFO *mi, const int shift_y, |
| 580 LOOP_FILTER_MASK *lfm) { | 580 LOOP_FILTER_MASK *lfm) { |
| 581 const MB_MODE_INFO *mbmi = &mi->mbmi; | 581 const MB_MODE_INFO *mbmi = &mi->mbmi; |
| 582 const BLOCK_SIZE block_size = mbmi->sb_type; | 582 const BLOCK_SIZE block_size = mbmi->sb_type; |
| 583 const TX_SIZE tx_size_y = mbmi->tx_size; | 583 const TX_SIZE tx_size_y = mbmi->tx_size; |
| 584 const int filter_level = get_filter_level(lfi_n, mbmi); | 584 const int filter_level = get_filter_level(lfi_n, mbmi); |
| 585 uint64_t *const left_y = &lfm->left_y[tx_size_y]; | 585 uint64_t *const left_y = &lfm->left_y[tx_size_y]; |
| 586 uint64_t *const above_y = &lfm->above_y[tx_size_y]; | 586 uint64_t *const above_y = &lfm->above_y[tx_size_y]; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 620 // TODO(JBB): This function only works for yv12. | 620 // TODO(JBB): This function only works for yv12. |
| 621 void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, | 621 void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, |
| 622 MODE_INFO **mi, const int mode_info_stride, | 622 MODE_INFO **mi, const int mode_info_stride, |
| 623 LOOP_FILTER_MASK *lfm) { | 623 LOOP_FILTER_MASK *lfm) { |
| 624 int idx_32, idx_16, idx_8; | 624 int idx_32, idx_16, idx_8; |
| 625 const loop_filter_info_n *const lfi_n = &cm->lf_info; | 625 const loop_filter_info_n *const lfi_n = &cm->lf_info; |
| 626 MODE_INFO **mip = mi; | 626 MODE_INFO **mip = mi; |
| 627 MODE_INFO **mip2 = mi; | 627 MODE_INFO **mip2 = mi; |
| 628 | 628 |
| 629 // These are offsets to the next mi in the 64x64 block. It is what gets | 629 // These are offsets to the next mi in the 64x64 block. It is what gets |
| 630 // added to the mi ptr as we go through each loop. It helps us to avoids | 630 // added to the mi ptr as we go through each loop. It helps us to avoid |
| 631 // setting up special row and column counters for each index. The last step | 631 // setting up special row and column counters for each index. The last step |
| 632 // brings us out back to the starting position. | 632 // brings us out back to the starting position. |
| 633 const int offset_32[] = {4, (mode_info_stride << 2) - 4, 4, | 633 const int offset_32[] = {4, (mode_info_stride << 2) - 4, 4, |
| 634 -(mode_info_stride << 2) - 4}; | 634 -(mode_info_stride << 2) - 4}; |
| 635 const int offset_16[] = {2, (mode_info_stride << 1) - 2, 2, | 635 const int offset_16[] = {2, (mode_info_stride << 1) - 2, 2, |
| 636 -(mode_info_stride << 1) - 2}; | 636 -(mode_info_stride << 1) - 2}; |
| 637 const int offset[] = {1, mode_info_stride - 1, 1, -mode_info_stride - 1}; | 637 const int offset[] = {1, mode_info_stride - 1, 1, -mode_info_stride - 1}; |
| 638 | 638 |
| 639 // Following variables represent shifts to position the current block | 639 // Following variables represent shifts to position the current block |
| 640 // mask over the appropriate block. A shift of 36 to the left will move | 640 // mask over the appropriate block. A shift of 36 to the left will move |
| 641 // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left | 641 // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left |
| 642 // 4 rows to the appropriate spot. | 642 // 4 rows to the appropriate spot. |
| 643 const int shift_32_y[] = {0, 4, 32, 36}; | 643 const int shift_32_y[] = {0, 4, 32, 36}; |
| 644 const int shift_16_y[] = {0, 2, 16, 18}; | 644 const int shift_16_y[] = {0, 2, 16, 18}; |
| 645 const int shift_8_y[] = {0, 1, 8, 9}; | 645 const int shift_8_y[] = {0, 1, 8, 9}; |
| 646 const int shift_32_uv[] = {0, 2, 8, 10}; | 646 const int shift_32_uv[] = {0, 2, 8, 10}; |
| 647 const int shift_16_uv[] = {0, 1, 4, 5}; | 647 const int shift_16_uv[] = {0, 1, 4, 5}; |
| 648 int i; | 648 int i; |
| 649 const int max_rows = (mi_row + MI_BLOCK_SIZE > cm->mi_rows ? | 649 const int max_rows = (mi_row + MI_BLOCK_SIZE > cm->mi_rows ? |
| 650 cm->mi_rows - mi_row : MI_BLOCK_SIZE); | 650 cm->mi_rows - mi_row : MI_BLOCK_SIZE); |
| 651 const int max_cols = (mi_col + MI_BLOCK_SIZE > cm->mi_cols ? | 651 const int max_cols = (mi_col + MI_BLOCK_SIZE > cm->mi_cols ? |
| 652 cm->mi_cols - mi_col : MI_BLOCK_SIZE); | 652 cm->mi_cols - mi_col : MI_BLOCK_SIZE); |
| 653 | 653 |
| 654 vp9_zero(*lfm); | 654 vp9_zero(*lfm); |
| 655 assert(mip[0] != NULL); |
| 655 | 656 |
| 656 // TODO(jimbankoski): Try moving most of the following code into decode | 657 // TODO(jimbankoski): Try moving most of the following code into decode |
| 657 // loop and storing lfm in the mbmi structure so that we don't have to go | 658 // loop and storing lfm in the mbmi structure so that we don't have to go |
| 658 // through the recursive loop structure multiple times. | 659 // through the recursive loop structure multiple times. |
| 659 switch (mip[0]->mbmi.sb_type) { | 660 switch (mip[0]->mbmi.sb_type) { |
| 660 case BLOCK_64X64: | 661 case BLOCK_64X64: |
| 661 build_masks(lfi_n, mip[0] , 0, 0, lfm); | 662 build_masks(lfi_n, mip[0] , 0, 0, lfm); |
| 662 break; | 663 break; |
| 663 case BLOCK_64X32: | 664 case BLOCK_64X32: |
| 664 build_masks(lfi_n, mip[0], 0, 0, lfm); | 665 build_masks(lfi_n, mip[0], 0, 0, lfm); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 760 break; | 761 break; |
| 761 } | 762 } |
| 762 // The largest loopfilter we have is 16x16 so we use the 16x16 mask | 763 // The largest loopfilter we have is 16x16 so we use the 16x16 mask |
| 763 // for 32x32 transforms also also. | 764 // for 32x32 transforms also also. |
| 764 lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32]; | 765 lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32]; |
| 765 lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32]; | 766 lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32]; |
| 766 lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32]; | 767 lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32]; |
| 767 lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32]; | 768 lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32]; |
| 768 | 769 |
| 769 // We do at least 8 tap filter on every 32x32 even if the transform size | 770 // We do at least 8 tap filter on every 32x32 even if the transform size |
| 770 // is 4x4. So if the 4x4 is set on a border pixel add it to the 8x8 and | 771 // is 4x4. So if the 4x4 is set on a border pixel add it to the 8x8 and |
| 771 // remove it from the 4x4. | 772 // remove it from the 4x4. |
| 772 lfm->left_y[TX_8X8] |= lfm->left_y[TX_4X4] & left_border; | 773 lfm->left_y[TX_8X8] |= lfm->left_y[TX_4X4] & left_border; |
| 773 lfm->left_y[TX_4X4] &= ~left_border; | 774 lfm->left_y[TX_4X4] &= ~left_border; |
| 774 lfm->above_y[TX_8X8] |= lfm->above_y[TX_4X4] & above_border; | 775 lfm->above_y[TX_8X8] |= lfm->above_y[TX_4X4] & above_border; |
| 775 lfm->above_y[TX_4X4] &= ~above_border; | 776 lfm->above_y[TX_4X4] &= ~above_border; |
| 776 lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_4X4] & left_border_uv; | 777 lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_4X4] & left_border_uv; |
| 777 lfm->left_uv[TX_4X4] &= ~left_border_uv; | 778 lfm->left_uv[TX_4X4] &= ~left_border_uv; |
| 778 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_4X4] & above_border_uv; | 779 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_4X4] & above_border_uv; |
| 779 lfm->above_uv[TX_4X4] &= ~above_border_uv; | 780 lfm->above_uv[TX_4X4] &= ~above_border_uv; |
| 780 | 781 |
| 781 // We do some special edge handling. | 782 // We do some special edge handling. |
| 782 if (mi_row + MI_BLOCK_SIZE > cm->mi_rows) { | 783 if (mi_row + MI_BLOCK_SIZE > cm->mi_rows) { |
| 783 const uint64_t rows = cm->mi_rows - mi_row; | 784 const uint64_t rows = cm->mi_rows - mi_row; |
| 784 | 785 |
| 785 // Each pixel inside the border gets a 1, | 786 // Each pixel inside the border gets a 1, |
| 786 const uint64_t mask_y = (((uint64_t) 1 << (rows << 3)) - 1); | 787 const uint64_t mask_y = (((uint64_t) 1 << (rows << 3)) - 1); |
| 787 const uint16_t mask_uv = (((uint16_t) 1 << (((rows + 1) >> 1) << 2)) - 1); | 788 const uint16_t mask_uv = (((uint16_t) 1 << (((rows + 1) >> 1) << 2)) - 1); |
| 788 | 789 |
| 789 // Remove values completely outside our border. | 790 // Remove values completely outside our border. |
| 790 for (i = 0; i < TX_32X32; i++) { | 791 for (i = 0; i < TX_32X32; i++) { |
| 791 lfm->left_y[i] &= mask_y; | 792 lfm->left_y[i] &= mask_y; |
| 792 lfm->above_y[i] &= mask_y; | 793 lfm->above_y[i] &= mask_y; |
| 793 lfm->left_uv[i] &= mask_uv; | 794 lfm->left_uv[i] &= mask_uv; |
| 794 lfm->above_uv[i] &= mask_uv; | 795 lfm->above_uv[i] &= mask_uv; |
| 795 } | 796 } |
| 796 lfm->int_4x4_y &= mask_y; | 797 lfm->int_4x4_y &= mask_y; |
| 797 lfm->int_4x4_uv &= mask_uv; | 798 lfm->int_4x4_uv &= mask_uv; |
| 798 | 799 |
| 799 // We don't apply a wide loop filter on the last uv block row. If set | 800 // We don't apply a wide loop filter on the last uv block row. If set |
| 800 // apply the shorter one instead. | 801 // apply the shorter one instead. |
| 801 if (rows == 1) { | 802 if (rows == 1) { |
| 802 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16]; | 803 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16]; |
| 803 lfm->above_uv[TX_16X16] = 0; | 804 lfm->above_uv[TX_16X16] = 0; |
| 804 } | 805 } |
| 805 if (rows == 5) { | 806 if (rows == 5) { |
| 806 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16] & 0xff00; | 807 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16] & 0xff00; |
| 807 lfm->above_uv[TX_16X16] &= ~(lfm->above_uv[TX_16X16] & 0xff00); | 808 lfm->above_uv[TX_16X16] &= ~(lfm->above_uv[TX_16X16] & 0xff00); |
| 808 } | 809 } |
| 809 } | 810 } |
| (...skipping 13 matching lines...) Expand all Loading... |
| 823 // Remove the bits outside the image edge. | 824 // Remove the bits outside the image edge. |
| 824 for (i = 0; i < TX_32X32; i++) { | 825 for (i = 0; i < TX_32X32; i++) { |
| 825 lfm->left_y[i] &= mask_y; | 826 lfm->left_y[i] &= mask_y; |
| 826 lfm->above_y[i] &= mask_y; | 827 lfm->above_y[i] &= mask_y; |
| 827 lfm->left_uv[i] &= mask_uv; | 828 lfm->left_uv[i] &= mask_uv; |
| 828 lfm->above_uv[i] &= mask_uv; | 829 lfm->above_uv[i] &= mask_uv; |
| 829 } | 830 } |
| 830 lfm->int_4x4_y &= mask_y; | 831 lfm->int_4x4_y &= mask_y; |
| 831 lfm->int_4x4_uv &= mask_uv_int; | 832 lfm->int_4x4_uv &= mask_uv_int; |
| 832 | 833 |
| 833 // We don't apply a wide loop filter on the last uv column. If set | 834 // We don't apply a wide loop filter on the last uv column. If set |
| 834 // apply the shorter one instead. | 835 // apply the shorter one instead. |
| 835 if (columns == 1) { | 836 if (columns == 1) { |
| 836 lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_16X16]; | 837 lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_16X16]; |
| 837 lfm->left_uv[TX_16X16] = 0; | 838 lfm->left_uv[TX_16X16] = 0; |
| 838 } | 839 } |
| 839 if (columns == 5) { | 840 if (columns == 5) { |
| 840 lfm->left_uv[TX_8X8] |= (lfm->left_uv[TX_16X16] & 0xcccc); | 841 lfm->left_uv[TX_8X8] |= (lfm->left_uv[TX_16X16] & 0xcccc); |
| 841 lfm->left_uv[TX_16X16] &= ~(lfm->left_uv[TX_16X16] & 0xcccc); | 842 lfm->left_uv[TX_16X16] &= ~(lfm->left_uv[TX_16X16] & 0xcccc); |
| 842 } | 843 } |
| 843 } | 844 } |
| 844 // We don't a loop filter on the first column in the image. Mask that out. | 845 // We don't apply a loop filter on the first column in the image, mask that |
| 846 // out. |
| 845 if (mi_col == 0) { | 847 if (mi_col == 0) { |
| 846 for (i = 0; i < TX_32X32; i++) { | 848 for (i = 0; i < TX_32X32; i++) { |
| 847 lfm->left_y[i] &= 0xfefefefefefefefe; | 849 lfm->left_y[i] &= 0xfefefefefefefefe; |
| 848 lfm->left_uv[i] &= 0xeeee; | 850 lfm->left_uv[i] &= 0xeeee; |
| 849 } | 851 } |
| 850 } | 852 } |
| 851 | 853 |
| 852 // Assert if we try to apply 2 different loop filters at the same position. | 854 // Assert if we try to apply 2 different loop filters at the same position. |
| 853 assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_8X8])); | 855 assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_8X8])); |
| 854 assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4])); | 856 assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4])); |
| (...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1245 y_only); | 1247 y_only); |
| 1246 } | 1248 } |
| 1247 | 1249 |
| 1248 int vp9_loop_filter_worker(void *arg1, void *arg2) { | 1250 int vp9_loop_filter_worker(void *arg1, void *arg2) { |
| 1249 LFWorkerData *const lf_data = (LFWorkerData*)arg1; | 1251 LFWorkerData *const lf_data = (LFWorkerData*)arg1; |
| 1250 (void)arg2; | 1252 (void)arg2; |
| 1251 vp9_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, | 1253 vp9_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
| 1252 lf_data->start, lf_data->stop, lf_data->y_only); | 1254 lf_data->start, lf_data->stop, lf_data->y_only); |
| 1253 return 1; | 1255 return 1; |
| 1254 } | 1256 } |
| OLD | NEW |