OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The LibYuv project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The LibYuv project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 697 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
708 [width] "+r" (width), | 708 [width] "+r" (width), |
709 [rgb_buf] "+r" (rgb_buf) | 709 [rgb_buf] "+r" (rgb_buf) |
710 : | 710 : |
711 : "t0", "t1", "t2", "t3", "t4", "t5", | 711 : "t0", "t1", "t2", "t3", "t4", "t5", |
712 "t6", "t7", "t8", "t9", | 712 "t6", "t7", "t8", "t9", |
713 "s0", "s1", "s2", "s3", | 713 "s0", "s1", "s2", "s3", |
714 "s4", "s5", "s6" | 714 "s4", "s5", "s6" |
715 ); | 715 ); |
716 } | 716 } |
717 | 717 |
718 void I422ToBGRARow_MIPS_DSPR2(const uint8* y_buf, | |
719 const uint8* u_buf, | |
720 const uint8* v_buf, | |
721 uint8* rgb_buf, | |
722 const struct YuvConstants* yuvconstants, | |
723 int width) { | |
724 __asm__ __volatile__ ( | |
725 ".set push \n" | |
726 ".set noreorder \n" | |
727 "beqz %[width], 2f \n" | |
728 " repl.ph $s0, 74 \n" // |YG|YG| = |74 |74 | | |
729 "repl.ph $s1, -25 \n" // |UG|UG| = |-25|-25| | |
730 "repl.ph $s2, -52 \n" // |VG|VG| = |-52|-52| | |
731 "repl.ph $s3, 102 \n" // |VR|VR| = |102|102| | |
732 "repl.ph $s4, 16 \n" // |0|16|0|16| | |
733 "repl.ph $s5, 128 \n" // |128|128| | |
734 "lui $s6, 0xff \n" | |
735 "ori $s6, 0xff \n" // |00|ff|00|ff| | |
736 | |
737 "1: \n" | |
738 YUVTORGB | |
739 // Arranging into bgra format | |
740 "precr.qb.ph $t4, $t4, $t8 \n" // |B1|b1|G1|g1| | |
741 "precr.qb.ph $t5, $t5, $t9 \n" // |B0|b0|G0|g0| | |
742 "precrq.qb.ph $t8, $t4, $t5 \n" // |B1|G1|B0|G0| | |
743 "precr.qb.ph $t9, $t4, $t5 \n" // |b1|g1|b0|g0| | |
744 | |
745 "precr.qb.ph $t2, $t1, $t2 \n" // |R1|r1|R0|r0| | |
746 "addiu %[width], -4 \n" | |
747 "addiu %[y_buf], 4 \n" | |
748 "preceu.ph.qbla $t1, $t2 \n" // |0 |R1|0 |R0| | |
749 "preceu.ph.qbra $t2, $t2 \n" // |0 |r1|0 |r0| | |
750 "sll $t1, $t1, 8 \n" // |R1|0 |R0|0 | | |
751 "sll $t2, $t2, 8 \n" // |r1|0 |r0|0 | | |
752 "or $t1, $t1, $s6 \n" // |R1|ff|R0|ff| | |
753 "or $t2, $t2, $s6 \n" // |r1|ff|r0|ff| | |
754 "precrq.ph.w $t0, $t9, $t2 \n" // |b1|g1|r1|ff| | |
755 "precrq.ph.w $t3, $t8, $t1 \n" // |B1|G1|R1|ff| | |
756 "sll $t1, $t1, 16 \n" | |
757 "sll $t2, $t2, 16 \n" | |
758 "packrl.ph $t2, $t9, $t2 \n" // |b0|g0|r0|ff| | |
759 "packrl.ph $t1, $t8, $t1 \n" // |B0|G0|R0|ff| | |
760 // Store results. | |
761 "sw $t2, 0(%[rgb_buf]) \n" | |
762 "sw $t0, 4(%[rgb_buf]) \n" | |
763 "sw $t1, 8(%[rgb_buf]) \n" | |
764 "sw $t3, 12(%[rgb_buf]) \n" | |
765 "bnez %[width], 1b \n" | |
766 " addiu %[rgb_buf], 16 \n" | |
767 "2: \n" | |
768 ".set pop \n" | |
769 :[y_buf] "+r" (y_buf), | |
770 [u_buf] "+r" (u_buf), | |
771 [v_buf] "+r" (v_buf), | |
772 [width] "+r" (width), | |
773 [rgb_buf] "+r" (rgb_buf) | |
774 : | |
775 : "t0", "t1", "t2", "t3", "t4", "t5", | |
776 "t6", "t7", "t8", "t9", | |
777 "s0", "s1", "s2", "s3", | |
778 "s4", "s5", "s6" | |
779 ); | |
780 } | |
781 | |
782 // Bilinear filter 8x2 -> 8x1 | 718 // Bilinear filter 8x2 -> 8x1 |
783 void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr, | 719 void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr, |
784 ptrdiff_t src_stride, int dst_width, | 720 ptrdiff_t src_stride, int dst_width, |
785 int source_y_fraction) { | 721 int source_y_fraction) { |
786 int y0_fraction = 256 - source_y_fraction; | 722 int y0_fraction = 256 - source_y_fraction; |
787 const uint8* src_ptr1 = src_ptr + src_stride; | 723 const uint8* src_ptr1 = src_ptr + src_stride; |
788 | 724 |
789 __asm__ __volatile__ ( | 725 __asm__ __volatile__ ( |
790 ".set push \n" | 726 ".set push \n" |
791 ".set noreorder \n" | 727 ".set noreorder \n" |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
837 ); | 773 ); |
838 } | 774 } |
839 #endif // __mips_dsp_rev >= 2 | 775 #endif // __mips_dsp_rev >= 2 |
840 | 776 |
841 #endif // defined(__mips__) | 777 #endif // defined(__mips__) |
842 | 778 |
843 #ifdef __cplusplus | 779 #ifdef __cplusplus |
844 } // extern "C" | 780 } // extern "C" |
845 } // namespace libyuv | 781 } // namespace libyuv |
846 #endif | 782 #endif |
OLD | NEW |