| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 The LibYuv Project Authors. All rights reserved. | 2 * Copyright 2011 The LibYuv Project Authors. All rights reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 127 "vqadd.s16 q8, q0, q13 \n" /* B */ \ | 127 "vqadd.s16 q8, q0, q13 \n" /* B */ \ |
| 128 "vqadd.s16 q9, q0, q14 \n" /* R */ \ | 128 "vqadd.s16 q9, q0, q14 \n" /* R */ \ |
| 129 "vqadd.s16 q0, q0, q4 \n" /* G */ \ | 129 "vqadd.s16 q0, q0, q4 \n" /* G */ \ |
| 130 "vqadd.s16 q8, q8, q1 \n" /* B */ \ | 130 "vqadd.s16 q8, q8, q1 \n" /* B */ \ |
| 131 "vqadd.s16 q9, q9, q10 \n" /* R */ \ | 131 "vqadd.s16 q9, q9, q10 \n" /* R */ \ |
| 132 "vqsub.s16 q0, q0, q3 \n" /* G */ \ | 132 "vqsub.s16 q0, q0, q3 \n" /* G */ \ |
| 133 "vqshrun.s16 d20, q8, #6 \n" /* B */ \ | 133 "vqshrun.s16 d20, q8, #6 \n" /* B */ \ |
| 134 "vqshrun.s16 d22, q9, #6 \n" /* R */ \ | 134 "vqshrun.s16 d22, q9, #6 \n" /* R */ \ |
| 135 "vqshrun.s16 d21, q0, #6 \n" /* G */ | 135 "vqshrun.s16 d21, q0, #6 \n" /* G */ |
| 136 | 136 |
| 137 // YUV to RGB conversion constants. | 137 |
| 138 // BT.601 YUV to RGB reference |
| 139 // R = (Y - 16) * 1.164 - V * -1.596 |
| 140 // G = (Y - 16) * 1.164 - U * 0.391 - V * 0.813 |
| 141 // B = (Y - 16) * 1.164 - U * -2.018 |
| 142 |
| 138 // Y contribution to R,G,B. Scale and bias. | 143 // Y contribution to R,G,B. Scale and bias. |
| 144 // TODO(fbarchard): Consider moving constants into a common header. |
| 139 #define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */ | 145 #define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */ |
| 140 #define YGB 1160 /* 1.164 * 64 * 16 - adjusted for even error distribution */ | 146 #define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */ |
| 141 | 147 |
| 142 // U and V contributions to R,G,B. | 148 // U and V contributions to R,G,B. |
| 143 #define UB -128 /* -min(128, round(2.018 * 64)) */ | 149 #define UB -128 /* max(-128, round(-2.018 * 64)) */ |
| 144 #define UG 25 /* -round(-0.391 * 64) */ | 150 #define UG 25 /* round(0.391 * 64) */ |
| 145 #define VG 52 /* -round(-0.813 * 64) */ | 151 #define VG 52 /* round(0.813 * 64) */ |
| 146 #define VR -102 /* -round(1.596 * 64) */ | 152 #define VR -102 /* round(-1.596 * 64) */ |
| 147 | 153 |
| 148 // Bias values to subtract 16 from Y and 128 from U and V. | 154 // Bias values to subtract 16 from Y and 128 from U and V. |
| 149 #define BB (UB * 128 - YGB) | 155 #define BB (UB * 128 + YGB) |
| 150 #define BG (UG * 128 + VG * 128 - YGB) | 156 #define BG (UG * 128 + VG * 128 + YGB) |
| 151 #define BR (VR * 128 - YGB) | 157 #define BR (VR * 128 + YGB) |
| 152 | 158 |
| 153 YuvConstantsNEON SIMD_ALIGNED(kYuvConstantsNEON) = { | 159 YuvConstantsNEON SIMD_ALIGNED(kYuvConstantsNEON) = { |
| 154 { 128, 128, 128, 128, 102, 102, 102, 102, 0, 0, 0, 0, 0, 0, 0, 0 }, | 160 { -UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 155 { 25, 25, 25, 25, 52, 52, 52, 52, 0, 0, 0, 0, 0, 0, 0, 0 }, | 161 { UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 156 { BB, BG, BR, 0, 0, 0, 0, 0 }, | 162 { BB, BG, BR, 0, 0, 0, 0, 0 }, |
| 157 { 0x0101 * YG, 0, 0, 0 } | 163 { 0x0101 * YG, 0, 0, 0 } |
| 158 }; | 164 }; |
| 159 | 165 |
| 160 static uvec8 kUVToRB = { 128, 128, 128, 128, 102, 102, 102, 102, | 166 // TODO(fbarchard): replace these with structure. |
| 161 0, 0, 0, 0, 0, 0, 0, 0 }; | 167 static uvec8 kUVToRB = { -UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, |
| 162 static uvec8 kUVToG = { 25, 25, 25, 25, 52, 52, 52, 52, | 168 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 169 static uvec8 kUVToG = { UG, UG, UG, UG, VG, VG, VG, VG, |
| 163 0, 0, 0, 0, 0, 0, 0, 0 }; | 170 0, 0, 0, 0, 0, 0, 0, 0 }; |
| 164 static vec16 kUVBiasBGR = { BB, BG, BR, 0, 0, 0, 0, 0 }; | 171 static vec16 kUVBiasBGR = { BB, BG, BR, 0, 0, 0, 0, 0 }; |
| 165 static vec32 kYToRgb = { 0x0101 * YG, 0, 0, 0 }; | 172 static vec32 kYToRgb = { 0x0101 * YG, 0, 0, 0 }; |
| 166 | 173 |
| 167 #undef YG | 174 #undef YG |
| 168 #undef YGB | 175 #undef YGB |
| 169 #undef UB | 176 #undef UB |
| 170 #undef UG | 177 #undef UG |
| 171 #undef VG | 178 #undef VG |
| 172 #undef VR | 179 #undef VR |
| (...skipping 2830 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3003 "r"(6) // %5 | 3010 "r"(6) // %5 |
| 3004 : "cc", "memory", "q0", "q1" // Clobber List | 3011 : "cc", "memory", "q0", "q1" // Clobber List |
| 3005 ); | 3012 ); |
| 3006 } | 3013 } |
| 3007 #endif // defined(__ARM_NEON__) && !defined(__aarch64__) | 3014 #endif // defined(__ARM_NEON__) && !defined(__aarch64__) |
| 3008 | 3015 |
| 3009 #ifdef __cplusplus | 3016 #ifdef __cplusplus |
| 3010 } // extern "C" | 3017 } // extern "C" |
| 3011 } // namespace libyuv | 3018 } // namespace libyuv |
| 3012 #endif | 3019 #endif |
| OLD | NEW |