OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "./vp9_rtcd.h" | 11 #include "./vp9_rtcd.h" |
12 #include "vp9/common/vp9_filter.h" | 12 #include "vp9/common/vp9_filter.h" |
13 #include "vp9/common/vp9_scale.h" | 13 #include "vp9/common/vp9_scale.h" |
14 | 14 |
15 static INLINE int scaled_x(int val, const struct scale_factors *scale) { | 15 static INLINE int scaled_x(int val, const struct scale_factors_common *sfc) { |
16 return val * scale->x_scale_fp >> REF_SCALE_SHIFT; | 16 return val * sfc->x_scale_fp >> REF_SCALE_SHIFT; |
17 } | 17 } |
18 | 18 |
19 static INLINE int scaled_y(int val, const struct scale_factors *scale) { | 19 static INLINE int scaled_y(int val, const struct scale_factors_common *sfc) { |
20 return val * scale->y_scale_fp >> REF_SCALE_SHIFT; | 20 return val * sfc->y_scale_fp >> REF_SCALE_SHIFT; |
21 } | 21 } |
22 | 22 |
23 static int unscaled_value(int val, const struct scale_factors *scale) { | 23 static int unscaled_value(int val, const struct scale_factors_common *sfc) { |
24 (void) scale; | 24 (void) sfc; |
25 return val; | 25 return val; |
26 } | 26 } |
27 | 27 |
28 static MV32 scaled_mv(const MV *mv, const struct scale_factors *scale) { | 28 static MV32 scaled_mv(const MV *mv, const struct scale_factors *scale) { |
29 const MV32 res = { | 29 const MV32 res = { |
30 scaled_y(mv->row, scale) + scale->y_offset_q4, | 30 scaled_y(mv->row, scale->sfc) + scale->y_offset_q4, |
31 scaled_x(mv->col, scale) + scale->x_offset_q4 | 31 scaled_x(mv->col, scale->sfc) + scale->x_offset_q4 |
32 }; | 32 }; |
33 return res; | 33 return res; |
34 } | 34 } |
35 | 35 |
36 static MV32 unscaled_mv(const MV *mv, const struct scale_factors *scale) { | 36 static MV32 unscaled_mv(const MV *mv, const struct scale_factors *scale) { |
37 const MV32 res = { | 37 const MV32 res = { |
38 mv->row, | 38 mv->row, |
39 mv->col | 39 mv->col |
40 }; | 40 }; |
41 return res; | 41 return res; |
42 } | 42 } |
43 | 43 |
44 static void set_offsets_with_scaling(struct scale_factors *scale, | 44 static void set_offsets_with_scaling(struct scale_factors *scale, |
45 int row, int col) { | 45 int row, int col) { |
46 scale->x_offset_q4 = scaled_x(col << SUBPEL_BITS, scale) & SUBPEL_MASK; | 46 scale->x_offset_q4 = scaled_x(col << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK; |
47 scale->y_offset_q4 = scaled_y(row << SUBPEL_BITS, scale) & SUBPEL_MASK; | 47 scale->y_offset_q4 = scaled_y(row << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK; |
48 } | 48 } |
49 | 49 |
50 static void set_offsets_without_scaling(struct scale_factors *scale, | 50 static void set_offsets_without_scaling(struct scale_factors *scale, |
51 int row, int col) { | 51 int row, int col) { |
52 scale->x_offset_q4 = 0; | 52 scale->x_offset_q4 = 0; |
53 scale->y_offset_q4 = 0; | 53 scale->y_offset_q4 = 0; |
54 } | 54 } |
55 | 55 |
56 static int get_fixed_point_scale_factor(int other_size, int this_size) { | 56 static int get_fixed_point_scale_factor(int other_size, int this_size) { |
57 // Calculate scaling factor once for each reference frame | 57 // Calculate scaling factor once for each reference frame |
58 // and use fixed point scaling factors in decoding and encoding routines. | 58 // and use fixed point scaling factors in decoding and encoding routines. |
59 // Hardware implementations can calculate scale factor in device driver | 59 // Hardware implementations can calculate scale factor in device driver |
60 // and use multiplication and shifting on hardware instead of division. | 60 // and use multiplication and shifting on hardware instead of division. |
61 return (other_size << REF_SCALE_SHIFT) / this_size; | 61 return (other_size << REF_SCALE_SHIFT) / this_size; |
62 } | 62 } |
63 | 63 |
64 static int check_scale_factors(int other_w, int other_h, | 64 static int check_scale_factors(int other_w, int other_h, |
65 int this_w, int this_h) { | 65 int this_w, int this_h) { |
66 return 2 * this_w >= other_w && | 66 return 2 * this_w >= other_w && |
67 2 * this_h >= other_h && | 67 2 * this_h >= other_h && |
68 this_w <= 16 * other_w && | 68 this_w <= 16 * other_w && |
69 this_h <= 16 * other_h; | 69 this_h <= 16 * other_h; |
70 } | 70 } |
71 | 71 |
72 void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, | 72 void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, |
| 73 struct scale_factors_common *scale_comm, |
73 int other_w, int other_h, | 74 int other_w, int other_h, |
74 int this_w, int this_h) { | 75 int this_w, int this_h) { |
75 if (!check_scale_factors(other_w, other_h, this_w, this_h)) { | 76 if (!check_scale_factors(other_w, other_h, this_w, this_h)) { |
76 scale->x_scale_fp = REF_INVALID_SCALE; | 77 scale_comm->x_scale_fp = REF_INVALID_SCALE; |
77 scale->y_scale_fp = REF_INVALID_SCALE; | 78 scale_comm->y_scale_fp = REF_INVALID_SCALE; |
78 return; | 79 return; |
79 } | 80 } |
80 | 81 |
81 scale->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); | 82 scale_comm->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); |
82 scale->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); | 83 scale_comm->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); |
83 scale->x_step_q4 = scaled_x(16, scale); | 84 scale_comm->x_step_q4 = scaled_x(16, scale_comm); |
84 scale->y_step_q4 = scaled_y(16, scale); | 85 scale_comm->y_step_q4 = scaled_y(16, scale_comm); |
85 scale->x_offset_q4 = 0; // calculated per block | |
86 scale->y_offset_q4 = 0; // calculated per block | |
87 | 86 |
88 if (vp9_is_scaled(scale)) { | 87 if (vp9_is_scaled(scale_comm)) { |
89 scale->scale_value_x = scaled_x; | 88 scale_comm->scale_value_x = scaled_x; |
90 scale->scale_value_y = scaled_y; | 89 scale_comm->scale_value_y = scaled_y; |
91 scale->set_scaled_offsets = set_offsets_with_scaling; | 90 scale_comm->set_scaled_offsets = set_offsets_with_scaling; |
92 scale->scale_mv = scaled_mv; | 91 scale_comm->scale_mv = scaled_mv; |
93 } else { | 92 } else { |
94 scale->scale_value_x = unscaled_value; | 93 scale_comm->scale_value_x = unscaled_value; |
95 scale->scale_value_y = unscaled_value; | 94 scale_comm->scale_value_y = unscaled_value; |
96 scale->set_scaled_offsets = set_offsets_without_scaling; | 95 scale_comm->set_scaled_offsets = set_offsets_without_scaling; |
97 scale->scale_mv = unscaled_mv; | 96 scale_comm->scale_mv = unscaled_mv; |
98 } | 97 } |
99 | 98 |
100 // TODO(agrange): Investigate the best choice of functions to use here | 99 // TODO(agrange): Investigate the best choice of functions to use here |
101 // for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what | 100 // for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what |
102 // to do at full-pel offsets. The current selection, where the filter is | 101 // to do at full-pel offsets. The current selection, where the filter is |
103 // applied in one direction only, and not at all for 0,0, seems to give the | 102 // applied in one direction only, and not at all for 0,0, seems to give the |
104 // best quality, but it may be worth trying an additional mode that does | 103 // best quality, but it may be worth trying an additional mode that does |
105 // do the filtering on full-pel. | 104 // do the filtering on full-pel. |
106 if (scale->x_step_q4 == 16) { | 105 if (scale_comm->x_step_q4 == 16) { |
107 if (scale->y_step_q4 == 16) { | 106 if (scale_comm->y_step_q4 == 16) { |
108 // No scaling in either direction. | 107 // No scaling in either direction. |
109 scale->predict[0][0][0] = vp9_convolve_copy; | 108 scale_comm->predict[0][0][0] = vp9_convolve_copy; |
110 scale->predict[0][0][1] = vp9_convolve_avg; | 109 scale_comm->predict[0][0][1] = vp9_convolve_avg; |
111 scale->predict[0][1][0] = vp9_convolve8_vert; | 110 scale_comm->predict[0][1][0] = vp9_convolve8_vert; |
112 scale->predict[0][1][1] = vp9_convolve8_avg_vert; | 111 scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert; |
113 scale->predict[1][0][0] = vp9_convolve8_horiz; | 112 scale_comm->predict[1][0][0] = vp9_convolve8_horiz; |
114 scale->predict[1][0][1] = vp9_convolve8_avg_horiz; | 113 scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz; |
115 } else { | 114 } else { |
116 // No scaling in x direction. Must always scale in the y direction. | 115 // No scaling in x direction. Must always scale in the y direction. |
117 scale->predict[0][0][0] = vp9_convolve8_vert; | 116 scale_comm->predict[0][0][0] = vp9_convolve8_vert; |
118 scale->predict[0][0][1] = vp9_convolve8_avg_vert; | 117 scale_comm->predict[0][0][1] = vp9_convolve8_avg_vert; |
119 scale->predict[0][1][0] = vp9_convolve8_vert; | 118 scale_comm->predict[0][1][0] = vp9_convolve8_vert; |
120 scale->predict[0][1][1] = vp9_convolve8_avg_vert; | 119 scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert; |
121 scale->predict[1][0][0] = vp9_convolve8; | 120 scale_comm->predict[1][0][0] = vp9_convolve8; |
122 scale->predict[1][0][1] = vp9_convolve8_avg; | 121 scale_comm->predict[1][0][1] = vp9_convolve8_avg; |
123 } | 122 } |
124 } else { | 123 } else { |
125 if (scale->y_step_q4 == 16) { | 124 if (scale_comm->y_step_q4 == 16) { |
126 // No scaling in the y direction. Must always scale in the x direction. | 125 // No scaling in the y direction. Must always scale in the x direction. |
127 scale->predict[0][0][0] = vp9_convolve8_horiz; | 126 scale_comm->predict[0][0][0] = vp9_convolve8_horiz; |
128 scale->predict[0][0][1] = vp9_convolve8_avg_horiz; | 127 scale_comm->predict[0][0][1] = vp9_convolve8_avg_horiz; |
129 scale->predict[0][1][0] = vp9_convolve8; | 128 scale_comm->predict[0][1][0] = vp9_convolve8; |
130 scale->predict[0][1][1] = vp9_convolve8_avg; | 129 scale_comm->predict[0][1][1] = vp9_convolve8_avg; |
131 scale->predict[1][0][0] = vp9_convolve8_horiz; | 130 scale_comm->predict[1][0][0] = vp9_convolve8_horiz; |
132 scale->predict[1][0][1] = vp9_convolve8_avg_horiz; | 131 scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz; |
133 } else { | 132 } else { |
134 // Must always scale in both directions. | 133 // Must always scale in both directions. |
135 scale->predict[0][0][0] = vp9_convolve8; | 134 scale_comm->predict[0][0][0] = vp9_convolve8; |
136 scale->predict[0][0][1] = vp9_convolve8_avg; | 135 scale_comm->predict[0][0][1] = vp9_convolve8_avg; |
137 scale->predict[0][1][0] = vp9_convolve8; | 136 scale_comm->predict[0][1][0] = vp9_convolve8; |
138 scale->predict[0][1][1] = vp9_convolve8_avg; | 137 scale_comm->predict[0][1][1] = vp9_convolve8_avg; |
139 scale->predict[1][0][0] = vp9_convolve8; | 138 scale_comm->predict[1][0][0] = vp9_convolve8; |
140 scale->predict[1][0][1] = vp9_convolve8_avg; | 139 scale_comm->predict[1][0][1] = vp9_convolve8_avg; |
141 } | 140 } |
142 } | 141 } |
143 // 2D subpel motion always gets filtered in both directions | 142 // 2D subpel motion always gets filtered in both directions |
144 scale->predict[1][1][0] = vp9_convolve8; | 143 scale_comm->predict[1][1][0] = vp9_convolve8; |
145 scale->predict[1][1][1] = vp9_convolve8_avg; | 144 scale_comm->predict[1][1][1] = vp9_convolve8_avg; |
| 145 |
| 146 scale->sfc = scale_comm; |
| 147 scale->x_offset_q4 = 0; // calculated per block |
| 148 scale->y_offset_q4 = 0; // calculated per block |
146 } | 149 } |
OLD | NEW |