| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 The LibYuv Project Authors. All rights reserved. | 2 * Copyright 2011 The LibYuv Project Authors. All rights reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 1001 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1012 { BB, BG, BR, 0, 0, 0, 0, 0 }, | 1012 { BB, BG, BR, 0, 0, 0, 0, 0 }, |
| 1013 { 0x0101 * YG, 0, 0, 0 } | 1013 { 0x0101 * YG, 0, 0, 0 } |
| 1014 }; | 1014 }; |
| 1015 const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = { | 1015 const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = { |
| 1016 { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, | 1016 { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 1017 { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, | 1017 { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 1018 { BR, BG, BB, 0, 0, 0, 0, 0 }, | 1018 { BR, BG, BB, 0, 0, 0, 0, 0 }, |
| 1019 { 0x0101 * YG, 0, 0, 0 } | 1019 { 0x0101 * YG, 0, 0, 0 } |
| 1020 }; | 1020 }; |
| 1021 #else | 1021 #else |
| 1022 const struct YuvConstants SIMD_ALIGNED32(kYuvI601Constants) = { | 1022 const struct YuvConstants SIMD_ALIGNED(kYuvI601Constants) = { |
| 1023 { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, | 1023 { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, |
| 1024 UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, | 1024 UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, |
| 1025 { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, | 1025 { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, |
| 1026 UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, | 1026 UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, |
| 1027 { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, | 1027 { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, |
| 1028 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, | 1028 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, |
| 1029 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, | 1029 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, |
| 1030 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, | 1030 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, |
| 1031 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, | 1031 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, |
| 1032 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } | 1032 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } |
| 1033 }; | 1033 }; |
| 1034 const struct YuvConstants SIMD_ALIGNED32(kYvuI601Constants) = { | 1034 const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = { |
| 1035 { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, | 1035 { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, |
| 1036 VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, | 1036 VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, |
| 1037 { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, | 1037 { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, |
| 1038 VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, | 1038 VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, |
| 1039 { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, | 1039 { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, |
| 1040 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, | 1040 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, |
| 1041 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, | 1041 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, |
| 1042 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, | 1042 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, |
| 1043 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, | 1043 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, |
| 1044 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } | 1044 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1099 { BB, BG, BR, 0, 0, 0, 0, 0 }, | 1099 { BB, BG, BR, 0, 0, 0, 0, 0 }, |
| 1100 { 0x0101 * YG, 0, 0, 0 } | 1100 { 0x0101 * YG, 0, 0, 0 } |
| 1101 }; | 1101 }; |
| 1102 const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = { | 1102 const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = { |
| 1103 { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, | 1103 { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 1104 { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, | 1104 { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 1105 { BR, BG, BB, 0, 0, 0, 0, 0 }, | 1105 { BR, BG, BB, 0, 0, 0, 0, 0 }, |
| 1106 { 0x0101 * YG, 0, 0, 0 } | 1106 { 0x0101 * YG, 0, 0, 0 } |
| 1107 }; | 1107 }; |
| 1108 #else | 1108 #else |
| 1109 const struct YuvConstants SIMD_ALIGNED32(kYuvJPEGConstants) = { | 1109 const struct YuvConstants SIMD_ALIGNED(kYuvJPEGConstants) = { |
| 1110 { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, | 1110 { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, |
| 1111 UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, | 1111 UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, |
| 1112 { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, | 1112 { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, |
| 1113 UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, | 1113 UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, |
| 1114 { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, | 1114 { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, |
| 1115 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, | 1115 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, |
| 1116 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, | 1116 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, |
| 1117 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, | 1117 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, |
| 1118 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, | 1118 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, |
| 1119 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } | 1119 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } |
| 1120 }; | 1120 }; |
| 1121 const struct YuvConstants SIMD_ALIGNED32(kYvuJPEGConstants) = { | 1121 const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = { |
| 1122 { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, | 1122 { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, |
| 1123 VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, | 1123 VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, |
| 1124 { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, | 1124 { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, |
| 1125 VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, | 1125 VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, |
| 1126 { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, | 1126 { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, |
| 1127 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, | 1127 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, |
| 1128 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, | 1128 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, |
| 1129 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, | 1129 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, |
| 1130 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, | 1130 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, |
| 1131 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } | 1131 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1187 { BB, BG, BR, 0, 0, 0, 0, 0 }, | 1187 { BB, BG, BR, 0, 0, 0, 0, 0 }, |
| 1188 { 0x0101 * YG, 0, 0, 0 } | 1188 { 0x0101 * YG, 0, 0, 0 } |
| 1189 }; | 1189 }; |
| 1190 const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = { | 1190 const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = { |
| 1191 { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, | 1191 { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 1192 { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, | 1192 { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, |
| 1193 { BR, BG, BB, 0, 0, 0, 0, 0 }, | 1193 { BR, BG, BB, 0, 0, 0, 0, 0 }, |
| 1194 { 0x0101 * YG, 0, 0, 0 } | 1194 { 0x0101 * YG, 0, 0, 0 } |
| 1195 }; | 1195 }; |
| 1196 #else | 1196 #else |
| 1197 const struct YuvConstants SIMD_ALIGNED32(kYuvH709Constants) = { | 1197 const struct YuvConstants SIMD_ALIGNED(kYuvH709Constants) = { |
| 1198 { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, | 1198 { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, |
| 1199 UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, | 1199 UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, |
| 1200 { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, | 1200 { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, |
| 1201 UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, | 1201 UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, |
| 1202 { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, | 1202 { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, |
| 1203 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, | 1203 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, |
| 1204 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, | 1204 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, |
| 1205 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, | 1205 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, |
| 1206 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, | 1206 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, |
| 1207 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } | 1207 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } |
| 1208 }; | 1208 }; |
| 1209 const struct YuvConstants SIMD_ALIGNED32(kYvuH709Constants) = { | 1209 const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = { |
| 1210 { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, | 1210 { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, |
| 1211 VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, | 1211 VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, |
| 1212 { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, | 1212 { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, |
| 1213 VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, | 1213 VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, |
| 1214 { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, | 1214 { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, |
| 1215 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, | 1215 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, |
| 1216 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, | 1216 { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, |
| 1217 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, | 1217 { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, |
| 1218 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, | 1218 { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, |
| 1219 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } | 1219 { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } |
| (...skipping 1277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2497 } | 2497 } |
| 2498 #endif | 2498 #endif |
| 2499 | 2499 |
| 2500 #if defined(HAS_I422TORGB565ROW_AVX2) | 2500 #if defined(HAS_I422TORGB565ROW_AVX2) |
| 2501 void I422ToRGB565Row_AVX2(const uint8* src_y, | 2501 void I422ToRGB565Row_AVX2(const uint8* src_y, |
| 2502 const uint8* src_u, | 2502 const uint8* src_u, |
| 2503 const uint8* src_v, | 2503 const uint8* src_v, |
| 2504 uint8* dst_rgb565, | 2504 uint8* dst_rgb565, |
| 2505 const struct YuvConstants* yuvconstants, | 2505 const struct YuvConstants* yuvconstants, |
| 2506 int width) { | 2506 int width) { |
| 2507 SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); | 2507 SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); |
| 2508 while (width > 0) { | 2508 while (width > 0) { |
| 2509 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; | 2509 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; |
| 2510 I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); | 2510 I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); |
| 2511 #if defined(HAS_ARGBTORGB565ROW_AVX2) | 2511 #if defined(HAS_ARGBTORGB565ROW_AVX2) |
| 2512 ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); | 2512 ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); |
| 2513 #else | 2513 #else |
| 2514 ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); | 2514 ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); |
| 2515 #endif | 2515 #endif |
| 2516 src_y += twidth; | 2516 src_y += twidth; |
| 2517 src_u += twidth / 2; | 2517 src_u += twidth / 2; |
| 2518 src_v += twidth / 2; | 2518 src_v += twidth / 2; |
| 2519 dst_rgb565 += twidth * 2; | 2519 dst_rgb565 += twidth * 2; |
| 2520 width -= twidth; | 2520 width -= twidth; |
| 2521 } | 2521 } |
| 2522 } | 2522 } |
| 2523 #endif | 2523 #endif |
| 2524 | 2524 |
| 2525 #if defined(HAS_I422TOARGB1555ROW_AVX2) | 2525 #if defined(HAS_I422TOARGB1555ROW_AVX2) |
| 2526 void I422ToARGB1555Row_AVX2(const uint8* src_y, | 2526 void I422ToARGB1555Row_AVX2(const uint8* src_y, |
| 2527 const uint8* src_u, | 2527 const uint8* src_u, |
| 2528 const uint8* src_v, | 2528 const uint8* src_v, |
| 2529 uint8* dst_argb1555, | 2529 uint8* dst_argb1555, |
| 2530 const struct YuvConstants* yuvconstants, | 2530 const struct YuvConstants* yuvconstants, |
| 2531 int width) { | 2531 int width) { |
| 2532 // Row buffer for intermediate ARGB pixels. | 2532 // Row buffer for intermediate ARGB pixels. |
| 2533 SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); | 2533 SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); |
| 2534 while (width > 0) { | 2534 while (width > 0) { |
| 2535 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; | 2535 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; |
| 2536 I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); | 2536 I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); |
| 2537 #if defined(HAS_ARGBTOARGB1555ROW_AVX2) | 2537 #if defined(HAS_ARGBTOARGB1555ROW_AVX2) |
| 2538 ARGBToARGB1555Row_AVX2(row, dst_argb1555, twidth); | 2538 ARGBToARGB1555Row_AVX2(row, dst_argb1555, twidth); |
| 2539 #else | 2539 #else |
| 2540 ARGBToARGB1555Row_SSE2(row, dst_argb1555, twidth); | 2540 ARGBToARGB1555Row_SSE2(row, dst_argb1555, twidth); |
| 2541 #endif | 2541 #endif |
| 2542 src_y += twidth; | 2542 src_y += twidth; |
| 2543 src_u += twidth / 2; | 2543 src_u += twidth / 2; |
| 2544 src_v += twidth / 2; | 2544 src_v += twidth / 2; |
| 2545 dst_argb1555 += twidth * 2; | 2545 dst_argb1555 += twidth * 2; |
| 2546 width -= twidth; | 2546 width -= twidth; |
| 2547 } | 2547 } |
| 2548 } | 2548 } |
| 2549 #endif | 2549 #endif |
| 2550 | 2550 |
| 2551 #if defined(HAS_I422TOARGB4444ROW_AVX2) | 2551 #if defined(HAS_I422TOARGB4444ROW_AVX2) |
| 2552 void I422ToARGB4444Row_AVX2(const uint8* src_y, | 2552 void I422ToARGB4444Row_AVX2(const uint8* src_y, |
| 2553 const uint8* src_u, | 2553 const uint8* src_u, |
| 2554 const uint8* src_v, | 2554 const uint8* src_v, |
| 2555 uint8* dst_argb4444, | 2555 uint8* dst_argb4444, |
| 2556 const struct YuvConstants* yuvconstants, | 2556 const struct YuvConstants* yuvconstants, |
| 2557 int width) { | 2557 int width) { |
| 2558 // Row buffer for intermediate ARGB pixels. | 2558 // Row buffer for intermediate ARGB pixels. |
| 2559 SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); | 2559 SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); |
| 2560 while (width > 0) { | 2560 while (width > 0) { |
| 2561 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; | 2561 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; |
| 2562 I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); | 2562 I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); |
| 2563 #if defined(HAS_ARGBTOARGB4444ROW_AVX2) | 2563 #if defined(HAS_ARGBTOARGB4444ROW_AVX2) |
| 2564 ARGBToARGB4444Row_AVX2(row, dst_argb4444, twidth); | 2564 ARGBToARGB4444Row_AVX2(row, dst_argb4444, twidth); |
| 2565 #else | 2565 #else |
| 2566 ARGBToARGB4444Row_SSE2(row, dst_argb4444, twidth); | 2566 ARGBToARGB4444Row_SSE2(row, dst_argb4444, twidth); |
| 2567 #endif | 2567 #endif |
| 2568 src_y += twidth; | 2568 src_y += twidth; |
| 2569 src_u += twidth / 2; | 2569 src_u += twidth / 2; |
| 2570 src_v += twidth / 2; | 2570 src_v += twidth / 2; |
| 2571 dst_argb4444 += twidth * 2; | 2571 dst_argb4444 += twidth * 2; |
| 2572 width -= twidth; | 2572 width -= twidth; |
| 2573 } | 2573 } |
| 2574 } | 2574 } |
| 2575 #endif | 2575 #endif |
| 2576 | 2576 |
| 2577 #if defined(HAS_I422TORGB24ROW_AVX2) | 2577 #if defined(HAS_I422TORGB24ROW_AVX2) |
| 2578 void I422ToRGB24Row_AVX2(const uint8* src_y, | 2578 void I422ToRGB24Row_AVX2(const uint8* src_y, |
| 2579 const uint8* src_u, | 2579 const uint8* src_u, |
| 2580 const uint8* src_v, | 2580 const uint8* src_v, |
| 2581 uint8* dst_rgb24, | 2581 uint8* dst_rgb24, |
| 2582 const struct YuvConstants* yuvconstants, | 2582 const struct YuvConstants* yuvconstants, |
| 2583 int width) { | 2583 int width) { |
| 2584 // Row buffer for intermediate ARGB pixels. | 2584 // Row buffer for intermediate ARGB pixels. |
| 2585 SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); | 2585 SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); |
| 2586 while (width > 0) { | 2586 while (width > 0) { |
| 2587 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; | 2587 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; |
| 2588 I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); | 2588 I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); |
| 2589 // TODO(fbarchard): ARGBToRGB24Row_AVX2 | 2589 // TODO(fbarchard): ARGBToRGB24Row_AVX2 |
| 2590 ARGBToRGB24Row_SSSE3(row, dst_rgb24, twidth); | 2590 ARGBToRGB24Row_SSSE3(row, dst_rgb24, twidth); |
| 2591 src_y += twidth; | 2591 src_y += twidth; |
| 2592 src_u += twidth / 2; | 2592 src_u += twidth / 2; |
| 2593 src_v += twidth / 2; | 2593 src_v += twidth / 2; |
| 2594 dst_rgb24 += twidth * 3; | 2594 dst_rgb24 += twidth * 3; |
| 2595 width -= twidth; | 2595 width -= twidth; |
| 2596 } | 2596 } |
| 2597 } | 2597 } |
| 2598 #endif | 2598 #endif |
| 2599 | 2599 |
| 2600 #if defined(HAS_NV12TORGB565ROW_AVX2) | 2600 #if defined(HAS_NV12TORGB565ROW_AVX2) |
| 2601 void NV12ToRGB565Row_AVX2(const uint8* src_y, | 2601 void NV12ToRGB565Row_AVX2(const uint8* src_y, |
| 2602 const uint8* src_uv, | 2602 const uint8* src_uv, |
| 2603 uint8* dst_rgb565, | 2603 uint8* dst_rgb565, |
| 2604 const struct YuvConstants* yuvconstants, | 2604 const struct YuvConstants* yuvconstants, |
| 2605 int width) { | 2605 int width) { |
| 2606 // Row buffer for intermediate ARGB pixels. | 2606 // Row buffer for intermediate ARGB pixels. |
| 2607 SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); | 2607 SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); |
| 2608 while (width > 0) { | 2608 while (width > 0) { |
| 2609 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; | 2609 int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; |
| 2610 NV12ToARGBRow_AVX2(src_y, src_uv, row, yuvconstants, twidth); | 2610 NV12ToARGBRow_AVX2(src_y, src_uv, row, yuvconstants, twidth); |
| 2611 #if defined(HAS_ARGBTORGB565ROW_AVX2) | 2611 #if defined(HAS_ARGBTORGB565ROW_AVX2) |
| 2612 ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); | 2612 ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); |
| 2613 #else | 2613 #else |
| 2614 ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); | 2614 ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); |
| 2615 #endif | 2615 #endif |
| 2616 src_y += twidth; | 2616 src_y += twidth; |
| 2617 src_uv += twidth; | 2617 src_uv += twidth; |
| 2618 dst_rgb565 += twidth * 2; | 2618 dst_rgb565 += twidth * 2; |
| 2619 width -= twidth; | 2619 width -= twidth; |
| 2620 } | 2620 } |
| 2621 } | 2621 } |
| 2622 #endif | 2622 #endif |
| 2623 | 2623 |
| 2624 #ifdef __cplusplus | 2624 #ifdef __cplusplus |
| 2625 } // extern "C" | 2625 } // extern "C" |
| 2626 } // namespace libyuv | 2626 } // namespace libyuv |
| 2627 #endif | 2627 #endif |
| OLD | NEW |