Index: source/planar_functions.cc |
diff --git a/source/planar_functions.cc b/source/planar_functions.cc |
index 811ee5b72ccfa5eaf719f785444ad1ef04140a44..f89838c110f71a7917f42bd62615aa14739836ba 100644 |
--- a/source/planar_functions.cc |
+++ b/source/planar_functions.cc |
@@ -31,6 +31,12 @@ void CopyPlane(const uint8* src_y, int src_stride_y, |
int width, int height) { |
int y; |
void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C; |
+ // Negative height means invert the image. |
fbarchard1
2016/08/24 23:04:11
remove
|
+ if (height < 0) { |
+ height = -height; |
+ dst_y = dst_y + (height - 1) * dst_stride_y; |
+ dst_stride_y = -dst_stride_y; |
+ } |
// Coalesce rows. |
if (src_stride_y == width && |
dst_stride_y == width) { |
@@ -224,6 +230,77 @@ int I420ToI400(const uint8* src_y, int src_stride_y, |
return 0; |
} |
+LIBYUV_API |
+int SplitUVPlane(const uint8* src_uv, int src_stride_uv, |
+ uint8* dst_u, int dst_stride_u, |
+ uint8* dst_v, int dst_stride_v, |
+ int width, int height) { |
+ if (!src_uv || !dst_u || !dst_v || width <= 0 || height == 0) { |
+ return -1; |
+ } |
+ // Negative height means invert the image. |
+ if (height < 0) { |
+ height = -height; |
+ dst_u = dst_u + (height - 1) * dst_stride_u; |
+ dst_v = dst_v + (height - 1) * dst_stride_v; |
+ dst_stride_u = -dst_stride_u; |
+ dst_stride_v = -dst_stride_v; |
+ } |
+ // Coalesce rows. |
+ if (src_stride_uv == width * 2 && |
+ dst_stride_u == width && |
+ dst_stride_v == width) { |
+ width *= height; |
+ height = 1; |
+ src_stride_uv = dst_stride_u = dst_stride_v = 0; |
+ } |
+ SplitUVRowFunction SplitUVRow = GetOptimizedSplitUVRowFunction( |
+ src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, dst_stride_v, width); |
+ |
+ for (int y = 0; y < height; ++y) { |
+ // Copy a row of UV. |
+ SplitUVRow(src_uv, dst_u, dst_v, width); |
+ dst_u += dst_stride_u; |
+ dst_v += dst_stride_v; |
+ src_uv += src_stride_uv; |
+ } |
+ return 0; |
+} |
+ |
+LIBYUV_API |
+int MergeUVPlanes(const uint8* src_u, int src_stride_u, |
fbarchard1
2016/08/24 23:04:11
consider void return
|
+ const uint8* src_v, int src_stride_v, |
+ uint8* dst_uv, int dst_stride_uv, |
+ int width, int height) { |
+ if (!src_u || !src_v || !dst_uv || width <= 0 || height == 0) { |
+ return -1; |
+ } |
+ // Negative height means invert the image. |
+ if (height < 0) { |
+ height = -height; |
+ dst_uv = dst_uv + (height - 1) * dst_stride_uv; |
+ dst_stride_uv = -dst_stride_uv; |
+ } |
+ // Coalesce rows. |
+ if (src_stride_u == width && |
+ src_stride_v == width && |
+ dst_stride_uv == width * 2) { |
+ width *= height; |
+ height = 1; |
+ src_stride_u = src_stride_v = dst_stride_uv = 0; |
+ } |
+ MergeUVRowFunction MergeUVRow_ = GetOptimizedMergeUVRowFunction(width); |
+ |
+ for (int y = 0; y < height; ++y) { |
+ // Merge a row of U and V into a row of UV. |
+ MergeUVRow_(src_u, src_v, dst_uv, width); |
+ src_u += src_stride_u; |
+ src_v += src_stride_v; |
+ dst_uv += dst_stride_uv; |
+ } |
+ return 0; |
+} |
+ |
// Mirror a plane of data. |
void MirrorPlane(const uint8* src_y, int src_stride_y, |
uint8* dst_y, int dst_stride_y, |
@@ -2482,8 +2559,6 @@ int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2, |
int width, int height) { |
int y; |
int halfwidth = (width + 1) >> 1; |
- void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, |
fbarchard1
2016/08/24 23:04:11
change not needed for yuy2
|
- int width) = SplitUVRow_C; |
void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr, |
ptrdiff_t src_stride, int dst_width, |
int source_y_fraction) = InterpolateRow_C; |
@@ -2498,30 +2573,6 @@ int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2, |
src_yuy2 = src_yuy2 + (height - 1) * src_stride_yuy2; |
src_stride_yuy2 = -src_stride_yuy2; |
} |
-#if defined(HAS_SPLITUVROW_SSE2) |
- if (TestCpuFlag(kCpuHasSSE2)) { |
- SplitUVRow = SplitUVRow_Any_SSE2; |
- if (IS_ALIGNED(width, 16)) { |
- SplitUVRow = SplitUVRow_SSE2; |
- } |
- } |
-#endif |
-#if defined(HAS_SPLITUVROW_AVX2) |
- if (TestCpuFlag(kCpuHasAVX2)) { |
- SplitUVRow = SplitUVRow_Any_AVX2; |
- if (IS_ALIGNED(width, 32)) { |
- SplitUVRow = SplitUVRow_AVX2; |
- } |
- } |
-#endif |
-#if defined(HAS_SPLITUVROW_NEON) |
- if (TestCpuFlag(kCpuHasNEON)) { |
- SplitUVRow = SplitUVRow_Any_NEON; |
- if (IS_ALIGNED(width, 16)) { |
- SplitUVRow = SplitUVRow_NEON; |
- } |
- } |
-#endif |
#if defined(HAS_INTERPOLATEROW_SSSE3) |
if (TestCpuFlag(kCpuHasSSSE3)) { |
InterpolateRow = InterpolateRow_Any_SSSE3; |
@@ -2552,6 +2603,12 @@ int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2, |
// row of y and 2 rows of uv |
align_buffer_64(rows, awidth * 3); |
+ SplitUVRowFunction SplitUVRow = GetOptimizedSplitUVRowFunction( |
+ src_yuy2, src_stride_yuy2, |
+ rows, awidth, |
+ rows, awidth, |
+ awidth); |
+ |
for (y = 0; y < height - 1; y += 2) { |
// Split Y from UV. |
SplitUVRow(src_yuy2, rows, rows + awidth, awidth); |
@@ -2565,8 +2622,9 @@ int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2, |
} |
if (height & 1) { |
// Split Y from UV. |
- SplitUVRow(src_yuy2, rows, dst_uv, awidth); |
+ SplitUVRow(src_yuy2, rows, rows + awidth, awidth); |
memcpy(dst_y, rows, width); |
+ memcpy(dst_uv, rows + awidth, awidth); |
} |
free_aligned_buffer_64(rows); |
} |
@@ -2580,8 +2638,6 @@ int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy, |
int width, int height) { |
int y; |
int halfwidth = (width + 1) >> 1; |
- void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, |
- int width) = SplitUVRow_C; |
void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr, |
ptrdiff_t src_stride, int dst_width, |
int source_y_fraction) = InterpolateRow_C; |
@@ -2596,30 +2652,6 @@ int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy, |
src_uyvy = src_uyvy + (height - 1) * src_stride_uyvy; |
src_stride_uyvy = -src_stride_uyvy; |
} |
-#if defined(HAS_SPLITUVROW_SSE2) |
- if (TestCpuFlag(kCpuHasSSE2)) { |
- SplitUVRow = SplitUVRow_Any_SSE2; |
- if (IS_ALIGNED(width, 16)) { |
- SplitUVRow = SplitUVRow_SSE2; |
- } |
- } |
-#endif |
-#if defined(HAS_SPLITUVROW_AVX2) |
- if (TestCpuFlag(kCpuHasAVX2)) { |
- SplitUVRow = SplitUVRow_Any_AVX2; |
- if (IS_ALIGNED(width, 32)) { |
- SplitUVRow = SplitUVRow_AVX2; |
- } |
- } |
-#endif |
-#if defined(HAS_SPLITUVROW_NEON) |
- if (TestCpuFlag(kCpuHasNEON)) { |
- SplitUVRow = SplitUVRow_Any_NEON; |
- if (IS_ALIGNED(width, 16)) { |
- SplitUVRow = SplitUVRow_NEON; |
- } |
- } |
-#endif |
#if defined(HAS_INTERPOLATEROW_SSSE3) |
if (TestCpuFlag(kCpuHasSSSE3)) { |
InterpolateRow = InterpolateRow_Any_SSSE3; |
@@ -2650,6 +2682,12 @@ int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy, |
// row of y and 2 rows of uv |
align_buffer_64(rows, awidth * 3); |
+ SplitUVRowFunction SplitUVRow = GetOptimizedSplitUVRowFunction( |
+ src_uyvy, src_stride_uyvy, |
+ rows, awidth, |
+ rows, awidth, |
+ awidth); |
+ |
for (y = 0; y < height - 1; y += 2) { |
// Split Y from UV. |
SplitUVRow(src_uyvy, rows + awidth, rows, awidth); |
@@ -2663,8 +2701,9 @@ int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy, |
} |
if (height & 1) { |
// Split Y from UV. |
- SplitUVRow(src_uyvy, dst_uv, rows, awidth); |
+ SplitUVRow(src_uyvy, rows + awidth, rows, awidth); |
memcpy(dst_y, rows, width); |
+ memcpy(dst_uv, rows + awidth, awidth); |
} |
free_aligned_buffer_64(rows); |
} |