Index: third_party/qcms/transform-sse2.c |
diff --git a/third_party/qcms/transform-sse2.c b/third_party/qcms/transform-sse2.c |
new file mode 100644 |
index 0000000000000000000000000000000000000000..6a5faf989014f4a00c6cf3df8b59842f016a4336 |
--- /dev/null |
+++ b/third_party/qcms/transform-sse2.c |
@@ -0,0 +1,262 @@ |
+// qcms |
+// Copyright (C) 2009 Mozilla Foundation |
+// |
+// Permission is hereby granted, free of charge, to any person obtaining |
+// a copy of this software and associated documentation files (the "Software"), |
+// to deal in the Software without restriction, including without limitation |
+// the rights to use, copy, modify, merge, publish, distribute, sublicense, |
+// and/or sell copies of the Software, and to permit persons to whom the Software |
+// is furnished to do so, subject to the following conditions: |
+// |
+// The above copyright notice and this permission notice shall be included in |
+// all copies or substantial portions of the Software. |
+// |
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO |
+// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
+ |
+#include <emmintrin.h> |
+ |
+#include "qcmsint.h" |
+ |
+/* pre-shuffled: just load these into XMM reg instead of load-scalar/shufps sequence */ |
+#define FLOATSCALE (float)(PRECACHE_OUTPUT_SIZE) |
+#define CLAMPMAXVAL ( ((float) (PRECACHE_OUTPUT_SIZE - 1)) / PRECACHE_OUTPUT_SIZE ) |
+static const ALIGN float floatScaleX4[4] = |
+ { FLOATSCALE, FLOATSCALE, FLOATSCALE, FLOATSCALE}; |
+static const ALIGN float clampMaxValueX4[4] = |
+ { CLAMPMAXVAL, CLAMPMAXVAL, CLAMPMAXVAL, CLAMPMAXVAL}; |
+ |
+void qcms_transform_data_rgb_out_lut_sse2(qcms_transform *transform, |
+ unsigned char *src, |
+ unsigned char *dest, |
+ size_t length) |
+{ |
+ unsigned int i; |
+ float (*mat)[4] = transform->matrix; |
+ char input_back[32]; |
+ /* Ensure we have a buffer that's 16 byte aligned regardless of the original |
+ * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32)) |
+ * because they don't work on stack variables. gcc 4.4 does do the right thing |
+ * on x86 but that's too new for us right now. For more info: gcc bug #16660 */ |
+ float const * input = (float*)(((uintptr_t)&input_back[16]) & ~0xf); |
+ /* share input and output locations to save having to keep the |
+ * locations in separate registers */ |
+ uint32_t const * output = (uint32_t*)input; |
+ |
+ /* deref *transform now to avoid it in loop */ |
+ const float *igtbl_r = transform->input_gamma_table_r; |
+ const float *igtbl_g = transform->input_gamma_table_g; |
+ const float *igtbl_b = transform->input_gamma_table_b; |
+ |
+ /* deref *transform now to avoid it in loop */ |
+ const uint8_t *otdata_r = &transform->output_table_r->data[0]; |
+ const uint8_t *otdata_g = &transform->output_table_g->data[0]; |
+ const uint8_t *otdata_b = &transform->output_table_b->data[0]; |
+ |
+ /* input matrix values never change */ |
+ const __m128 mat0 = _mm_load_ps(mat[0]); |
+ const __m128 mat1 = _mm_load_ps(mat[1]); |
+ const __m128 mat2 = _mm_load_ps(mat[2]); |
+ |
+ /* these values don't change, either */ |
+ const __m128 max = _mm_load_ps(clampMaxValueX4); |
+ const __m128 min = _mm_setzero_ps(); |
+ const __m128 scale = _mm_load_ps(floatScaleX4); |
+ |
+ /* working variables */ |
+ __m128 vec_r, vec_g, vec_b, result; |
+ |
+ /* CYA */ |
+ if (!length) |
+ return; |
+ |
+ /* one pixel is handled outside of the loop */ |
+ length--; |
+ |
+ /* setup for transforming 1st pixel */ |
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]); |
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]); |
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]); |
+ src += 3; |
+ |
+ /* transform all but final pixel */ |
+ |
+ for (i=0; i<length; i++) |
+ { |
+ /* position values from gamma tables */ |
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0); |
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0); |
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0); |
+ |
+ /* gamma * matrix */ |
+ vec_r = _mm_mul_ps(vec_r, mat0); |
+ vec_g = _mm_mul_ps(vec_g, mat1); |
+ vec_b = _mm_mul_ps(vec_b, mat2); |
+ |
+ /* crunch, crunch, crunch */ |
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b)); |
+ vec_r = _mm_max_ps(min, vec_r); |
+ vec_r = _mm_min_ps(max, vec_r); |
+ result = _mm_mul_ps(vec_r, scale); |
+ |
+ /* store calc'd output tables indices */ |
+ _mm_store_si128((__m128i*)output, _mm_cvtps_epi32(result)); |
+ |
+ /* load for next loop while store completes */ |
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]); |
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]); |
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]); |
+ src += 3; |
+ |
+ /* use calc'd indices to output RGB values */ |
+ dest[0] = otdata_r[output[0]]; |
+ dest[1] = otdata_g[output[1]]; |
+ dest[2] = otdata_b[output[2]]; |
+ dest += 3; |
+ } |
+ |
+ /* handle final (maybe only) pixel */ |
+ |
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0); |
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0); |
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0); |
+ |
+ vec_r = _mm_mul_ps(vec_r, mat0); |
+ vec_g = _mm_mul_ps(vec_g, mat1); |
+ vec_b = _mm_mul_ps(vec_b, mat2); |
+ |
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b)); |
+ vec_r = _mm_max_ps(min, vec_r); |
+ vec_r = _mm_min_ps(max, vec_r); |
+ result = _mm_mul_ps(vec_r, scale); |
+ |
+ _mm_store_si128((__m128i*)output, _mm_cvtps_epi32(result)); |
+ |
+ dest[0] = otdata_r[output[0]]; |
+ dest[1] = otdata_g[output[1]]; |
+ dest[2] = otdata_b[output[2]]; |
+} |
+ |
+void qcms_transform_data_rgba_out_lut_sse2(qcms_transform *transform, |
+ unsigned char *src, |
+ unsigned char *dest, |
+ size_t length) |
+{ |
+ unsigned int i; |
+ float (*mat)[4] = transform->matrix; |
+ char input_back[32]; |
+ /* Ensure we have a buffer that's 16 byte aligned regardless of the original |
+ * stack alignment. We can't use __attribute__((aligned(16))) or __declspec(align(32)) |
+ * because they don't work on stack variables. gcc 4.4 does do the right thing |
+ * on x86 but that's too new for us right now. For more info: gcc bug #16660 */ |
+ float const * input = (float*)(((uintptr_t)&input_back[16]) & ~0xf); |
+ /* share input and output locations to save having to keep the |
+ * locations in separate registers */ |
+ uint32_t const * output = (uint32_t*)input; |
+ |
+ /* deref *transform now to avoid it in loop */ |
+ const float *igtbl_r = transform->input_gamma_table_r; |
+ const float *igtbl_g = transform->input_gamma_table_g; |
+ const float *igtbl_b = transform->input_gamma_table_b; |
+ |
+ /* deref *transform now to avoid it in loop */ |
+ const uint8_t *otdata_r = &transform->output_table_r->data[0]; |
+ const uint8_t *otdata_g = &transform->output_table_g->data[0]; |
+ const uint8_t *otdata_b = &transform->output_table_b->data[0]; |
+ |
+ /* input matrix values never change */ |
+ const __m128 mat0 = _mm_load_ps(mat[0]); |
+ const __m128 mat1 = _mm_load_ps(mat[1]); |
+ const __m128 mat2 = _mm_load_ps(mat[2]); |
+ |
+ /* these values don't change, either */ |
+ const __m128 max = _mm_load_ps(clampMaxValueX4); |
+ const __m128 min = _mm_setzero_ps(); |
+ const __m128 scale = _mm_load_ps(floatScaleX4); |
+ |
+ /* working variables */ |
+ __m128 vec_r, vec_g, vec_b, result; |
+ unsigned char alpha; |
+ |
+ /* CYA */ |
+ if (!length) |
+ return; |
+ |
+ /* one pixel is handled outside of the loop */ |
+ length--; |
+ |
+ /* setup for transforming 1st pixel */ |
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]); |
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]); |
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]); |
+ alpha = src[3]; |
+ src += 4; |
+ |
+ /* transform all but final pixel */ |
+ |
+ for (i=0; i<length; i++) |
+ { |
+ /* position values from gamma tables */ |
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0); |
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0); |
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0); |
+ |
+ /* gamma * matrix */ |
+ vec_r = _mm_mul_ps(vec_r, mat0); |
+ vec_g = _mm_mul_ps(vec_g, mat1); |
+ vec_b = _mm_mul_ps(vec_b, mat2); |
+ |
+ /* store alpha for this pixel; load alpha for next */ |
+ dest[3] = alpha; |
+ alpha = src[3]; |
+ |
+ /* crunch, crunch, crunch */ |
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b)); |
+ vec_r = _mm_max_ps(min, vec_r); |
+ vec_r = _mm_min_ps(max, vec_r); |
+ result = _mm_mul_ps(vec_r, scale); |
+ |
+ /* store calc'd output tables indices */ |
+ _mm_store_si128((__m128i*)output, _mm_cvtps_epi32(result)); |
+ |
+ /* load gamma values for next loop while store completes */ |
+ vec_r = _mm_load_ss(&igtbl_r[src[0]]); |
+ vec_g = _mm_load_ss(&igtbl_g[src[1]]); |
+ vec_b = _mm_load_ss(&igtbl_b[src[2]]); |
+ src += 4; |
+ |
+ /* use calc'd indices to output RGB values */ |
+ dest[0] = otdata_r[output[0]]; |
+ dest[1] = otdata_g[output[1]]; |
+ dest[2] = otdata_b[output[2]]; |
+ dest += 4; |
+ } |
+ |
+ /* handle final (maybe only) pixel */ |
+ |
+ vec_r = _mm_shuffle_ps(vec_r, vec_r, 0); |
+ vec_g = _mm_shuffle_ps(vec_g, vec_g, 0); |
+ vec_b = _mm_shuffle_ps(vec_b, vec_b, 0); |
+ |
+ vec_r = _mm_mul_ps(vec_r, mat0); |
+ vec_g = _mm_mul_ps(vec_g, mat1); |
+ vec_b = _mm_mul_ps(vec_b, mat2); |
+ |
+ dest[3] = alpha; |
+ |
+ vec_r = _mm_add_ps(vec_r, _mm_add_ps(vec_g, vec_b)); |
+ vec_r = _mm_max_ps(min, vec_r); |
+ vec_r = _mm_min_ps(max, vec_r); |
+ result = _mm_mul_ps(vec_r, scale); |
+ |
+ _mm_store_si128((__m128i*)output, _mm_cvtps_epi32(result)); |
+ |
+ dest[0] = otdata_r[output[0]]; |
+ dest[1] = otdata_g[output[1]]; |
+ dest[2] = otdata_b[output[2]]; |
+} |