OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 Google Inc. All Rights Reserved. |
| 2 // |
| 3 // Use of this source code is governed by a BSD-style license |
| 4 // that can be found in the COPYING file in the root of the source |
| 5 // tree. An additional intellectual property rights grant can be found |
| 6 // in the file PATENTS. All contributing project authors may |
| 7 // be found in the AUTHORS file in the root of the source tree. |
| 8 // ----------------------------------------------------------------------------- |
| 9 // |
| 10 // MIPS version of speed-critical encoding functions. |
| 11 // |
| 12 // Author(s): Djordje Pesut (djordje.pesut@imgtec.com) |
| 13 // Jovan Zelincevic (jovan.zelincevic@imgtec.com) |
| 14 // Slobodan Prijic (slobodan.prijic@imgtec.com) |
| 15 |
| 16 #include "./dsp.h" |
| 17 |
| 18 #if defined(WEBP_USE_MIPS32) |
| 19 |
| 20 #include "../enc/vp8enci.h" |
| 21 #include "../enc/cost.h" |
| 22 |
| 23 #if defined(__GNUC__) && defined(__ANDROID__) && LOCAL_GCC_VERSION == 0x409 |
| 24 #define WORK_AROUND_GCC |
| 25 #endif |
| 26 |
| 27 static const int kC1 = 20091 + (1 << 16); |
| 28 static const int kC2 = 35468; |
| 29 |
| 30 // macro for one vertical pass in ITransformOne |
| 31 // MUL macro inlined |
| 32 // temp0..temp15 holds tmp[0]..tmp[15] |
| 33 // A..D - offsets in bytes to load from in buffer |
| 34 // TEMP0..TEMP3 - registers for corresponding tmp elements |
| 35 // TEMP4..TEMP5 - temporary registers |
| 36 #define VERTICAL_PASS(A, B, C, D, TEMP4, TEMP0, TEMP1, TEMP2, TEMP3) \ |
| 37 "lh %[temp16], "#A"(%[temp20]) \n\t" \ |
| 38 "lh %[temp18], "#B"(%[temp20]) \n\t" \ |
| 39 "lh %[temp17], "#C"(%[temp20]) \n\t" \ |
| 40 "lh %[temp19], "#D"(%[temp20]) \n\t" \ |
| 41 "addu %["#TEMP4"], %[temp16], %[temp18] \n\t" \ |
| 42 "subu %[temp16], %[temp16], %[temp18] \n\t" \ |
| 43 "mul %["#TEMP0"], %[temp17], %[kC2] \n\t" \ |
| 44 "mul %[temp18], %[temp19], %[kC1] \n\t" \ |
| 45 "mul %[temp17], %[temp17], %[kC1] \n\t" \ |
| 46 "mul %[temp19], %[temp19], %[kC2] \n\t" \ |
| 47 "sra %["#TEMP0"], %["#TEMP0"], 16 \n\n" \ |
| 48 "sra %[temp18], %[temp18], 16 \n\n" \ |
| 49 "sra %[temp17], %[temp17], 16 \n\n" \ |
| 50 "sra %[temp19], %[temp19], 16 \n\n" \ |
| 51 "subu %["#TEMP2"], %["#TEMP0"], %[temp18] \n\t" \ |
| 52 "addu %["#TEMP3"], %[temp17], %[temp19] \n\t" \ |
| 53 "addu %["#TEMP0"], %["#TEMP4"], %["#TEMP3"] \n\t" \ |
| 54 "addu %["#TEMP1"], %[temp16], %["#TEMP2"] \n\t" \ |
| 55 "subu %["#TEMP2"], %[temp16], %["#TEMP2"] \n\t" \ |
| 56 "subu %["#TEMP3"], %["#TEMP4"], %["#TEMP3"] \n\t" |
| 57 |
| 58 // macro for one horizontal pass in ITransformOne |
| 59 // MUL and STORE macros inlined |
| 60 // a = clip_8b(a) is replaced with: a = max(a, 0); a = min(a, 255) |
| 61 // temp0..temp15 holds tmp[0]..tmp[15] |
| 62 // A..D - offsets in bytes to load from ref and store to dst buffer |
| 63 // TEMP0, TEMP4, TEMP8 and TEMP12 - registers for corresponding tmp elements |
| 64 #define HORIZONTAL_PASS(A, B, C, D, TEMP0, TEMP4, TEMP8, TEMP12) \ |
| 65 "addiu %["#TEMP0"], %["#TEMP0"], 4 \n\t" \ |
| 66 "addu %[temp16], %["#TEMP0"], %["#TEMP8"] \n\t" \ |
| 67 "subu %[temp17], %["#TEMP0"], %["#TEMP8"] \n\t" \ |
| 68 "mul %["#TEMP0"], %["#TEMP4"], %[kC2] \n\t" \ |
| 69 "mul %["#TEMP8"], %["#TEMP12"], %[kC1] \n\t" \ |
| 70 "mul %["#TEMP4"], %["#TEMP4"], %[kC1] \n\t" \ |
| 71 "mul %["#TEMP12"], %["#TEMP12"], %[kC2] \n\t" \ |
| 72 "sra %["#TEMP0"], %["#TEMP0"], 16 \n\t" \ |
| 73 "sra %["#TEMP8"], %["#TEMP8"], 16 \n\t" \ |
| 74 "sra %["#TEMP4"], %["#TEMP4"], 16 \n\t" \ |
| 75 "sra %["#TEMP12"], %["#TEMP12"], 16 \n\t" \ |
| 76 "subu %[temp18], %["#TEMP0"], %["#TEMP8"] \n\t" \ |
| 77 "addu %[temp19], %["#TEMP4"], %["#TEMP12"] \n\t" \ |
| 78 "addu %["#TEMP0"], %[temp16], %[temp19] \n\t" \ |
| 79 "addu %["#TEMP4"], %[temp17], %[temp18] \n\t" \ |
| 80 "subu %["#TEMP8"], %[temp17], %[temp18] \n\t" \ |
| 81 "subu %["#TEMP12"], %[temp16], %[temp19] \n\t" \ |
| 82 "lw %[temp20], 0(%[args]) \n\t" \ |
| 83 "sra %["#TEMP0"], %["#TEMP0"], 3 \n\t" \ |
| 84 "sra %["#TEMP4"], %["#TEMP4"], 3 \n\t" \ |
| 85 "sra %["#TEMP8"], %["#TEMP8"], 3 \n\t" \ |
| 86 "sra %["#TEMP12"], %["#TEMP12"], 3 \n\t" \ |
| 87 "lbu %[temp16], "#A"(%[temp20]) \n\t" \ |
| 88 "lbu %[temp17], "#B"(%[temp20]) \n\t" \ |
| 89 "lbu %[temp18], "#C"(%[temp20]) \n\t" \ |
| 90 "lbu %[temp19], "#D"(%[temp20]) \n\t" \ |
| 91 "addu %["#TEMP0"], %[temp16], %["#TEMP0"] \n\t" \ |
| 92 "addu %["#TEMP4"], %[temp17], %["#TEMP4"] \n\t" \ |
| 93 "addu %["#TEMP8"], %[temp18], %["#TEMP8"] \n\t" \ |
| 94 "addu %["#TEMP12"], %[temp19], %["#TEMP12"] \n\t" \ |
| 95 "slt %[temp16], %["#TEMP0"], $zero \n\t" \ |
| 96 "slt %[temp17], %["#TEMP4"], $zero \n\t" \ |
| 97 "slt %[temp18], %["#TEMP8"], $zero \n\t" \ |
| 98 "slt %[temp19], %["#TEMP12"], $zero \n\t" \ |
| 99 "movn %["#TEMP0"], $zero, %[temp16] \n\t" \ |
| 100 "movn %["#TEMP4"], $zero, %[temp17] \n\t" \ |
| 101 "movn %["#TEMP8"], $zero, %[temp18] \n\t" \ |
| 102 "movn %["#TEMP12"], $zero, %[temp19] \n\t" \ |
| 103 "addiu %[temp20], $zero, 255 \n\t" \ |
| 104 "slt %[temp16], %["#TEMP0"], %[temp20] \n\t" \ |
| 105 "slt %[temp17], %["#TEMP4"], %[temp20] \n\t" \ |
| 106 "slt %[temp18], %["#TEMP8"], %[temp20] \n\t" \ |
| 107 "slt %[temp19], %["#TEMP12"], %[temp20] \n\t" \ |
| 108 "movz %["#TEMP0"], %[temp20], %[temp16] \n\t" \ |
| 109 "movz %["#TEMP4"], %[temp20], %[temp17] \n\t" \ |
| 110 "lw %[temp16], 8(%[args]) \n\t" \ |
| 111 "movz %["#TEMP8"], %[temp20], %[temp18] \n\t" \ |
| 112 "movz %["#TEMP12"], %[temp20], %[temp19] \n\t" \ |
| 113 "sb %["#TEMP0"], "#A"(%[temp16]) \n\t" \ |
| 114 "sb %["#TEMP4"], "#B"(%[temp16]) \n\t" \ |
| 115 "sb %["#TEMP8"], "#C"(%[temp16]) \n\t" \ |
| 116 "sb %["#TEMP12"], "#D"(%[temp16]) \n\t" |
| 117 |
| 118 // Does one or two inverse transforms. |
| 119 static WEBP_INLINE void ITransformOne(const uint8_t* ref, const int16_t* in, |
| 120 uint8_t* dst) { |
| 121 int temp0, temp1, temp2, temp3, temp4, temp5, temp6; |
| 122 int temp7, temp8, temp9, temp10, temp11, temp12, temp13; |
| 123 int temp14, temp15, temp16, temp17, temp18, temp19, temp20; |
| 124 const int* args[3] = {(const int*)ref, (const int*)in, (const int*)dst}; |
| 125 |
| 126 __asm__ volatile( |
| 127 "lw %[temp20], 4(%[args]) \n\t" |
| 128 VERTICAL_PASS(0, 16, 8, 24, temp4, temp0, temp1, temp2, temp3) |
| 129 VERTICAL_PASS(2, 18, 10, 26, temp8, temp4, temp5, temp6, temp7) |
| 130 VERTICAL_PASS(4, 20, 12, 28, temp12, temp8, temp9, temp10, temp11) |
| 131 VERTICAL_PASS(6, 22, 14, 30, temp20, temp12, temp13, temp14, temp15) |
| 132 |
| 133 HORIZONTAL_PASS( 0, 1, 2, 3, temp0, temp4, temp8, temp12) |
| 134 HORIZONTAL_PASS(16, 17, 18, 19, temp1, temp5, temp9, temp13) |
| 135 HORIZONTAL_PASS(32, 33, 34, 35, temp2, temp6, temp10, temp14) |
| 136 HORIZONTAL_PASS(48, 49, 50, 51, temp3, temp7, temp11, temp15) |
| 137 |
| 138 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), |
| 139 [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), |
| 140 [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), |
| 141 [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11), |
| 142 [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14), |
| 143 [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17), |
| 144 [temp18]"=&r"(temp18), [temp19]"=&r"(temp19), [temp20]"=&r"(temp20) |
| 145 : [args]"r"(args), [kC1]"r"(kC1), [kC2]"r"(kC2) |
| 146 : "memory", "hi", "lo" |
| 147 ); |
| 148 } |
| 149 |
| 150 static void ITransform(const uint8_t* ref, const int16_t* in, |
| 151 uint8_t* dst, int do_two) { |
| 152 ITransformOne(ref, in, dst); |
| 153 if (do_two) { |
| 154 ITransformOne(ref + 4, in + 16, dst + 4); |
| 155 } |
| 156 } |
| 157 |
| 158 #undef VERTICAL_PASS |
| 159 #undef HORIZONTAL_PASS |
| 160 |
| 161 // macro for one pass through for loop in QuantizeBlock |
| 162 // QUANTDIV macro inlined |
| 163 // J - offset in bytes (kZigzag[n] * 2) |
| 164 // K - offset in bytes (kZigzag[n] * 4) |
| 165 // N - offset in bytes (n * 2) |
| 166 #define QUANTIZE_ONE(J, K, N) \ |
| 167 "lh %[temp0], "#J"(%[ppin]) \n\t" \ |
| 168 "lhu %[temp1], "#J"(%[ppsharpen]) \n\t" \ |
| 169 "lw %[temp2], "#K"(%[ppzthresh]) \n\t" \ |
| 170 "sra %[sign], %[temp0], 15 \n\t" \ |
| 171 "xor %[coeff], %[temp0], %[sign] \n\t" \ |
| 172 "subu %[coeff], %[coeff], %[sign] \n\t" \ |
| 173 "addu %[coeff], %[coeff], %[temp1] \n\t" \ |
| 174 "slt %[temp4], %[temp2], %[coeff] \n\t" \ |
| 175 "addiu %[temp5], $zero, 0 \n\t" \ |
| 176 "addiu %[level], $zero, 0 \n\t" \ |
| 177 "beqz %[temp4], 2f \n\t" \ |
| 178 "lhu %[temp1], "#J"(%[ppiq]) \n\t" \ |
| 179 "lw %[temp2], "#K"(%[ppbias]) \n\t" \ |
| 180 "lhu %[temp3], "#J"(%[ppq]) \n\t" \ |
| 181 "mul %[level], %[coeff], %[temp1] \n\t" \ |
| 182 "addu %[level], %[level], %[temp2] \n\t" \ |
| 183 "sra %[level], %[level], 17 \n\t" \ |
| 184 "slt %[temp4], %[max_level], %[level] \n\t" \ |
| 185 "movn %[level], %[max_level], %[temp4] \n\t" \ |
| 186 "xor %[level], %[level], %[sign] \n\t" \ |
| 187 "subu %[level], %[level], %[sign] \n\t" \ |
| 188 "mul %[temp5], %[level], %[temp3] \n\t" \ |
| 189 "2: \n\t" \ |
| 190 "sh %[temp5], "#J"(%[ppin]) \n\t" \ |
| 191 "sh %[level], "#N"(%[pout]) \n\t" |
| 192 |
| 193 static int QuantizeBlock(int16_t in[16], int16_t out[16], |
| 194 const VP8Matrix* const mtx) { |
| 195 int temp0, temp1, temp2, temp3, temp4, temp5; |
| 196 int sign, coeff, level, i; |
| 197 int max_level = MAX_LEVEL; |
| 198 |
| 199 int16_t* ppin = &in[0]; |
| 200 int16_t* pout = &out[0]; |
| 201 const uint16_t* ppsharpen = &mtx->sharpen_[0]; |
| 202 const uint32_t* ppzthresh = &mtx->zthresh_[0]; |
| 203 const uint16_t* ppq = &mtx->q_[0]; |
| 204 const uint16_t* ppiq = &mtx->iq_[0]; |
| 205 const uint32_t* ppbias = &mtx->bias_[0]; |
| 206 |
| 207 __asm__ volatile( |
| 208 QUANTIZE_ONE( 0, 0, 0) |
| 209 QUANTIZE_ONE( 2, 4, 2) |
| 210 QUANTIZE_ONE( 8, 16, 4) |
| 211 QUANTIZE_ONE(16, 32, 6) |
| 212 QUANTIZE_ONE(10, 20, 8) |
| 213 QUANTIZE_ONE( 4, 8, 10) |
| 214 QUANTIZE_ONE( 6, 12, 12) |
| 215 QUANTIZE_ONE(12, 24, 14) |
| 216 QUANTIZE_ONE(18, 36, 16) |
| 217 QUANTIZE_ONE(24, 48, 18) |
| 218 QUANTIZE_ONE(26, 52, 20) |
| 219 QUANTIZE_ONE(20, 40, 22) |
| 220 QUANTIZE_ONE(14, 28, 24) |
| 221 QUANTIZE_ONE(22, 44, 26) |
| 222 QUANTIZE_ONE(28, 56, 28) |
| 223 QUANTIZE_ONE(30, 60, 30) |
| 224 |
| 225 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), |
| 226 [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), |
| 227 [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), |
| 228 [sign]"=&r"(sign), [coeff]"=&r"(coeff), |
| 229 [level]"=&r"(level) |
| 230 : [pout]"r"(pout), [ppin]"r"(ppin), |
| 231 [ppiq]"r"(ppiq), [max_level]"r"(max_level), |
| 232 [ppbias]"r"(ppbias), [ppzthresh]"r"(ppzthresh), |
| 233 [ppsharpen]"r"(ppsharpen), [ppq]"r"(ppq) |
| 234 : "memory", "hi", "lo" |
| 235 ); |
| 236 |
| 237 // moved out from macro to increase possibility for earlier breaking |
| 238 for (i = 15; i >= 0; i--) { |
| 239 if (out[i]) return 1; |
| 240 } |
| 241 return 0; |
| 242 } |
| 243 |
| 244 #undef QUANTIZE_ONE |
| 245 |
| 246 // macro for one horizontal pass in Disto4x4 (TTransform) |
| 247 // two calls of function TTransform are merged into single one |
| 248 // A..D - offsets in bytes to load from a and b buffers |
| 249 // E..H - offsets in bytes to store first results to tmp buffer |
| 250 // E1..H1 - offsets in bytes to store second results to tmp buffer |
| 251 #define HORIZONTAL_PASS(A, B, C, D, E, F, G, H, E1, F1, G1, H1) \ |
| 252 "lbu %[temp0], "#A"(%[a]) \n\t" \ |
| 253 "lbu %[temp1], "#B"(%[a]) \n\t" \ |
| 254 "lbu %[temp2], "#C"(%[a]) \n\t" \ |
| 255 "lbu %[temp3], "#D"(%[a]) \n\t" \ |
| 256 "lbu %[temp4], "#A"(%[b]) \n\t" \ |
| 257 "lbu %[temp5], "#B"(%[b]) \n\t" \ |
| 258 "lbu %[temp6], "#C"(%[b]) \n\t" \ |
| 259 "lbu %[temp7], "#D"(%[b]) \n\t" \ |
| 260 "addu %[temp8], %[temp0], %[temp2] \n\t" \ |
| 261 "subu %[temp0], %[temp0], %[temp2] \n\t" \ |
| 262 "addu %[temp2], %[temp1], %[temp3] \n\t" \ |
| 263 "subu %[temp1], %[temp1], %[temp3] \n\t" \ |
| 264 "addu %[temp3], %[temp4], %[temp6] \n\t" \ |
| 265 "subu %[temp4], %[temp4], %[temp6] \n\t" \ |
| 266 "addu %[temp6], %[temp5], %[temp7] \n\t" \ |
| 267 "subu %[temp5], %[temp5], %[temp7] \n\t" \ |
| 268 "addu %[temp7], %[temp8], %[temp2] \n\t" \ |
| 269 "subu %[temp2], %[temp8], %[temp2] \n\t" \ |
| 270 "addu %[temp8], %[temp0], %[temp1] \n\t" \ |
| 271 "subu %[temp0], %[temp0], %[temp1] \n\t" \ |
| 272 "addu %[temp1], %[temp3], %[temp6] \n\t" \ |
| 273 "subu %[temp3], %[temp3], %[temp6] \n\t" \ |
| 274 "addu %[temp6], %[temp4], %[temp5] \n\t" \ |
| 275 "subu %[temp4], %[temp4], %[temp5] \n\t" \ |
| 276 "sw %[temp7], "#E"(%[tmp]) \n\t" \ |
| 277 "sw %[temp2], "#H"(%[tmp]) \n\t" \ |
| 278 "sw %[temp8], "#F"(%[tmp]) \n\t" \ |
| 279 "sw %[temp0], "#G"(%[tmp]) \n\t" \ |
| 280 "sw %[temp1], "#E1"(%[tmp]) \n\t" \ |
| 281 "sw %[temp3], "#H1"(%[tmp]) \n\t" \ |
| 282 "sw %[temp6], "#F1"(%[tmp]) \n\t" \ |
| 283 "sw %[temp4], "#G1"(%[tmp]) \n\t" |
| 284 |
| 285 // macro for one vertical pass in Disto4x4 (TTransform) |
| 286 // two calls of function TTransform are merged into single one |
| 287 // since only one accu is available in mips32r1 instruction set |
| 288 // first is done second call of function TTransform and after |
| 289 // that first one. |
| 290 // const int sum1 = TTransform(a, w); |
| 291 // const int sum2 = TTransform(b, w); |
| 292 // return abs(sum2 - sum1) >> 5; |
| 293 // (sum2 - sum1) is calculated with madds (sub2) and msubs (sub1) |
| 294 // A..D - offsets in bytes to load first results from tmp buffer |
| 295 // A1..D1 - offsets in bytes to load second results from tmp buffer |
| 296 // E..H - offsets in bytes to load from w buffer |
| 297 #define VERTICAL_PASS(A, B, C, D, A1, B1, C1, D1, E, F, G, H) \ |
| 298 "lw %[temp0], "#A1"(%[tmp]) \n\t" \ |
| 299 "lw %[temp1], "#C1"(%[tmp]) \n\t" \ |
| 300 "lw %[temp2], "#B1"(%[tmp]) \n\t" \ |
| 301 "lw %[temp3], "#D1"(%[tmp]) \n\t" \ |
| 302 "addu %[temp8], %[temp0], %[temp1] \n\t" \ |
| 303 "subu %[temp0], %[temp0], %[temp1] \n\t" \ |
| 304 "addu %[temp1], %[temp2], %[temp3] \n\t" \ |
| 305 "subu %[temp2], %[temp2], %[temp3] \n\t" \ |
| 306 "addu %[temp3], %[temp8], %[temp1] \n\t" \ |
| 307 "subu %[temp8], %[temp8], %[temp1] \n\t" \ |
| 308 "addu %[temp1], %[temp0], %[temp2] \n\t" \ |
| 309 "subu %[temp0], %[temp0], %[temp2] \n\t" \ |
| 310 "sra %[temp4], %[temp3], 31 \n\t" \ |
| 311 "sra %[temp5], %[temp1], 31 \n\t" \ |
| 312 "sra %[temp6], %[temp0], 31 \n\t" \ |
| 313 "sra %[temp7], %[temp8], 31 \n\t" \ |
| 314 "xor %[temp3], %[temp3], %[temp4] \n\t" \ |
| 315 "xor %[temp1], %[temp1], %[temp5] \n\t" \ |
| 316 "xor %[temp0], %[temp0], %[temp6] \n\t" \ |
| 317 "xor %[temp8], %[temp8], %[temp7] \n\t" \ |
| 318 "subu %[temp3], %[temp3], %[temp4] \n\t" \ |
| 319 "subu %[temp1], %[temp1], %[temp5] \n\t" \ |
| 320 "subu %[temp0], %[temp0], %[temp6] \n\t" \ |
| 321 "subu %[temp8], %[temp8], %[temp7] \n\t" \ |
| 322 "lhu %[temp4], "#E"(%[w]) \n\t" \ |
| 323 "lhu %[temp5], "#F"(%[w]) \n\t" \ |
| 324 "lhu %[temp6], "#G"(%[w]) \n\t" \ |
| 325 "lhu %[temp7], "#H"(%[w]) \n\t" \ |
| 326 "madd %[temp4], %[temp3] \n\t" \ |
| 327 "madd %[temp5], %[temp1] \n\t" \ |
| 328 "madd %[temp6], %[temp0] \n\t" \ |
| 329 "madd %[temp7], %[temp8] \n\t" \ |
| 330 "lw %[temp0], "#A"(%[tmp]) \n\t" \ |
| 331 "lw %[temp1], "#C"(%[tmp]) \n\t" \ |
| 332 "lw %[temp2], "#B"(%[tmp]) \n\t" \ |
| 333 "lw %[temp3], "#D"(%[tmp]) \n\t" \ |
| 334 "addu %[temp8], %[temp0], %[temp1] \n\t" \ |
| 335 "subu %[temp0], %[temp0], %[temp1] \n\t" \ |
| 336 "addu %[temp1], %[temp2], %[temp3] \n\t" \ |
| 337 "subu %[temp2], %[temp2], %[temp3] \n\t" \ |
| 338 "addu %[temp3], %[temp8], %[temp1] \n\t" \ |
| 339 "subu %[temp1], %[temp8], %[temp1] \n\t" \ |
| 340 "addu %[temp8], %[temp0], %[temp2] \n\t" \ |
| 341 "subu %[temp0], %[temp0], %[temp2] \n\t" \ |
| 342 "sra %[temp2], %[temp3], 31 \n\t" \ |
| 343 "xor %[temp3], %[temp3], %[temp2] \n\t" \ |
| 344 "subu %[temp3], %[temp3], %[temp2] \n\t" \ |
| 345 "msub %[temp4], %[temp3] \n\t" \ |
| 346 "sra %[temp2], %[temp8], 31 \n\t" \ |
| 347 "sra %[temp3], %[temp0], 31 \n\t" \ |
| 348 "sra %[temp4], %[temp1], 31 \n\t" \ |
| 349 "xor %[temp8], %[temp8], %[temp2] \n\t" \ |
| 350 "xor %[temp0], %[temp0], %[temp3] \n\t" \ |
| 351 "xor %[temp1], %[temp1], %[temp4] \n\t" \ |
| 352 "subu %[temp8], %[temp8], %[temp2] \n\t" \ |
| 353 "subu %[temp0], %[temp0], %[temp3] \n\t" \ |
| 354 "subu %[temp1], %[temp1], %[temp4] \n\t" \ |
| 355 "msub %[temp5], %[temp8] \n\t" \ |
| 356 "msub %[temp6], %[temp0] \n\t" \ |
| 357 "msub %[temp7], %[temp1] \n\t" |
| 358 |
| 359 static int Disto4x4(const uint8_t* const a, const uint8_t* const b, |
| 360 const uint16_t* const w) { |
| 361 int tmp[32]; |
| 362 int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8; |
| 363 |
| 364 __asm__ volatile( |
| 365 HORIZONTAL_PASS( 0, 1, 2, 3, 0, 4, 8, 12, 64, 68, 72, 76) |
| 366 HORIZONTAL_PASS(16, 17, 18, 19, 16, 20, 24, 28, 80, 84, 88, 92) |
| 367 HORIZONTAL_PASS(32, 33, 34, 35, 32, 36, 40, 44, 96, 100, 104, 108) |
| 368 HORIZONTAL_PASS(48, 49, 50, 51, 48, 52, 56, 60, 112, 116, 120, 124) |
| 369 "mthi $zero \n\t" |
| 370 "mtlo $zero \n\t" |
| 371 VERTICAL_PASS( 0, 16, 32, 48, 64, 80, 96, 112, 0, 8, 16, 24) |
| 372 VERTICAL_PASS( 4, 20, 36, 52, 68, 84, 100, 116, 2, 10, 18, 26) |
| 373 VERTICAL_PASS( 8, 24, 40, 56, 72, 88, 104, 120, 4, 12, 20, 28) |
| 374 VERTICAL_PASS(12, 28, 44, 60, 76, 92, 108, 124, 6, 14, 22, 30) |
| 375 "mflo %[temp0] \n\t" |
| 376 "sra %[temp1], %[temp0], 31 \n\t" |
| 377 "xor %[temp0], %[temp0], %[temp1] \n\t" |
| 378 "subu %[temp0], %[temp0], %[temp1] \n\t" |
| 379 "sra %[temp0], %[temp0], 5 \n\t" |
| 380 |
| 381 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), |
| 382 [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), |
| 383 [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8) |
| 384 : [a]"r"(a), [b]"r"(b), [w]"r"(w), [tmp]"r"(tmp) |
| 385 : "memory", "hi", "lo" |
| 386 ); |
| 387 |
| 388 return temp0; |
| 389 } |
| 390 |
| 391 #undef VERTICAL_PASS |
| 392 #undef HORIZONTAL_PASS |
| 393 |
| 394 static int Disto16x16(const uint8_t* const a, const uint8_t* const b, |
| 395 const uint16_t* const w) { |
| 396 int D = 0; |
| 397 int x, y; |
| 398 for (y = 0; y < 16 * BPS; y += 4 * BPS) { |
| 399 for (x = 0; x < 16; x += 4) { |
| 400 D += Disto4x4(a + x + y, b + x + y, w); |
| 401 } |
| 402 } |
| 403 return D; |
| 404 } |
| 405 |
| 406 // macro for one horizontal pass in FTransform |
| 407 // temp0..temp15 holds tmp[0]..tmp[15] |
| 408 // A..D - offsets in bytes to load from src and ref buffers |
| 409 // TEMP0..TEMP3 - registers for corresponding tmp elements |
| 410 #define HORIZONTAL_PASS(A, B, C, D, TEMP0, TEMP1, TEMP2, TEMP3) \ |
| 411 "lw %["#TEMP1"], 0(%[args]) \n\t" \ |
| 412 "lw %["#TEMP2"], 4(%[args]) \n\t" \ |
| 413 "lbu %[temp16], "#A"(%["#TEMP1"]) \n\t" \ |
| 414 "lbu %[temp17], "#A"(%["#TEMP2"]) \n\t" \ |
| 415 "lbu %[temp18], "#B"(%["#TEMP1"]) \n\t" \ |
| 416 "lbu %[temp19], "#B"(%["#TEMP2"]) \n\t" \ |
| 417 "subu %[temp20], %[temp16], %[temp17] \n\t" \ |
| 418 "lbu %[temp16], "#C"(%["#TEMP1"]) \n\t" \ |
| 419 "lbu %[temp17], "#C"(%["#TEMP2"]) \n\t" \ |
| 420 "subu %["#TEMP0"], %[temp18], %[temp19] \n\t" \ |
| 421 "lbu %[temp18], "#D"(%["#TEMP1"]) \n\t" \ |
| 422 "lbu %[temp19], "#D"(%["#TEMP2"]) \n\t" \ |
| 423 "subu %["#TEMP1"], %[temp16], %[temp17] \n\t" \ |
| 424 "subu %["#TEMP2"], %[temp18], %[temp19] \n\t" \ |
| 425 "addu %["#TEMP3"], %[temp20], %["#TEMP2"] \n\t" \ |
| 426 "subu %["#TEMP2"], %[temp20], %["#TEMP2"] \n\t" \ |
| 427 "addu %[temp20], %["#TEMP0"], %["#TEMP1"] \n\t" \ |
| 428 "subu %["#TEMP0"], %["#TEMP0"], %["#TEMP1"] \n\t" \ |
| 429 "mul %[temp16], %["#TEMP2"], %[c5352] \n\t" \ |
| 430 "mul %[temp17], %["#TEMP2"], %[c2217] \n\t" \ |
| 431 "mul %[temp18], %["#TEMP0"], %[c5352] \n\t" \ |
| 432 "mul %[temp19], %["#TEMP0"], %[c2217] \n\t" \ |
| 433 "addu %["#TEMP1"], %["#TEMP3"], %[temp20] \n\t" \ |
| 434 "subu %[temp20], %["#TEMP3"], %[temp20] \n\t" \ |
| 435 "sll %["#TEMP0"], %["#TEMP1"], 3 \n\t" \ |
| 436 "sll %["#TEMP2"], %[temp20], 3 \n\t" \ |
| 437 "addiu %[temp16], %[temp16], 1812 \n\t" \ |
| 438 "addiu %[temp17], %[temp17], 937 \n\t" \ |
| 439 "addu %[temp16], %[temp16], %[temp19] \n\t" \ |
| 440 "subu %[temp17], %[temp17], %[temp18] \n\t" \ |
| 441 "sra %["#TEMP1"], %[temp16], 9 \n\t" \ |
| 442 "sra %["#TEMP3"], %[temp17], 9 \n\t" |
| 443 |
| 444 // macro for one vertical pass in FTransform |
| 445 // temp0..temp15 holds tmp[0]..tmp[15] |
| 446 // A..D - offsets in bytes to store to out buffer |
| 447 // TEMP0, TEMP4, TEMP8 and TEMP12 - registers for corresponding tmp elements |
| 448 #define VERTICAL_PASS(A, B, C, D, TEMP0, TEMP4, TEMP8, TEMP12) \ |
| 449 "addu %[temp16], %["#TEMP0"], %["#TEMP12"] \n\t" \ |
| 450 "subu %[temp19], %["#TEMP0"], %["#TEMP12"] \n\t" \ |
| 451 "addu %[temp17], %["#TEMP4"], %["#TEMP8"] \n\t" \ |
| 452 "subu %[temp18], %["#TEMP4"], %["#TEMP8"] \n\t" \ |
| 453 "mul %["#TEMP8"], %[temp19], %[c2217] \n\t" \ |
| 454 "mul %["#TEMP12"], %[temp18], %[c2217] \n\t" \ |
| 455 "mul %["#TEMP4"], %[temp19], %[c5352] \n\t" \ |
| 456 "mul %[temp18], %[temp18], %[c5352] \n\t" \ |
| 457 "addiu %[temp16], %[temp16], 7 \n\t" \ |
| 458 "addu %["#TEMP0"], %[temp16], %[temp17] \n\t" \ |
| 459 "sra %["#TEMP0"], %["#TEMP0"], 4 \n\t" \ |
| 460 "addu %["#TEMP12"], %["#TEMP12"], %["#TEMP4"] \n\t" \ |
| 461 "subu %["#TEMP4"], %[temp16], %[temp17] \n\t" \ |
| 462 "sra %["#TEMP4"], %["#TEMP4"], 4 \n\t" \ |
| 463 "addiu %["#TEMP8"], %["#TEMP8"], 30000 \n\t" \ |
| 464 "addiu %["#TEMP12"], %["#TEMP12"], 12000 \n\t" \ |
| 465 "addiu %["#TEMP8"], %["#TEMP8"], 21000 \n\t" \ |
| 466 "subu %["#TEMP8"], %["#TEMP8"], %[temp18] \n\t" \ |
| 467 "sra %["#TEMP12"], %["#TEMP12"], 16 \n\t" \ |
| 468 "sra %["#TEMP8"], %["#TEMP8"], 16 \n\t" \ |
| 469 "addiu %[temp16], %["#TEMP12"], 1 \n\t" \ |
| 470 "movn %["#TEMP12"], %[temp16], %[temp19] \n\t" \ |
| 471 "sh %["#TEMP0"], "#A"(%[temp20]) \n\t" \ |
| 472 "sh %["#TEMP4"], "#C"(%[temp20]) \n\t" \ |
| 473 "sh %["#TEMP8"], "#D"(%[temp20]) \n\t" \ |
| 474 "sh %["#TEMP12"], "#B"(%[temp20]) \n\t" |
| 475 |
| 476 static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) { |
| 477 int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8; |
| 478 int temp9, temp10, temp11, temp12, temp13, temp14, temp15, temp16; |
| 479 int temp17, temp18, temp19, temp20; |
| 480 const int c2217 = 2217; |
| 481 const int c5352 = 5352; |
| 482 const int* const args[3] = |
| 483 { (const int*)src, (const int*)ref, (const int*)out }; |
| 484 |
| 485 __asm__ volatile( |
| 486 HORIZONTAL_PASS( 0, 1, 2, 3, temp0, temp1, temp2, temp3) |
| 487 HORIZONTAL_PASS(16, 17, 18, 19, temp4, temp5, temp6, temp7) |
| 488 HORIZONTAL_PASS(32, 33, 34, 35, temp8, temp9, temp10, temp11) |
| 489 HORIZONTAL_PASS(48, 49, 50, 51, temp12, temp13, temp14, temp15) |
| 490 "lw %[temp20], 8(%[args]) \n\t" |
| 491 VERTICAL_PASS(0, 8, 16, 24, temp0, temp4, temp8, temp12) |
| 492 VERTICAL_PASS(2, 10, 18, 26, temp1, temp5, temp9, temp13) |
| 493 VERTICAL_PASS(4, 12, 20, 28, temp2, temp6, temp10, temp14) |
| 494 VERTICAL_PASS(6, 14, 22, 30, temp3, temp7, temp11, temp15) |
| 495 |
| 496 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), |
| 497 [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), |
| 498 [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), |
| 499 [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11), |
| 500 [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14), |
| 501 [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17), |
| 502 [temp18]"=&r"(temp18), [temp19]"=&r"(temp19), [temp20]"=&r"(temp20) |
| 503 : [args]"r"(args), [c2217]"r"(c2217), [c5352]"r"(c5352) |
| 504 : "memory", "hi", "lo" |
| 505 ); |
| 506 } |
| 507 |
| 508 #undef VERTICAL_PASS |
| 509 #undef HORIZONTAL_PASS |
| 510 |
| 511 // Forward declaration. |
| 512 extern int VP8GetResidualCostMIPS32(int ctx0, const VP8Residual* const res); |
| 513 |
| 514 int VP8GetResidualCostMIPS32(int ctx0, const VP8Residual* const res) { |
| 515 int n = res->first; |
| 516 // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1 |
| 517 int p0 = res->prob[n][ctx0][0]; |
| 518 const uint16_t* t = res->cost[n][ctx0]; |
| 519 int cost; |
| 520 const int const_2 = 2; |
| 521 const int const_255 = 255; |
| 522 const int const_max_level = MAX_VARIABLE_LEVEL; |
| 523 int res_cost; |
| 524 int res_prob; |
| 525 int res_coeffs; |
| 526 int res_last; |
| 527 int v_reg; |
| 528 int b_reg; |
| 529 int ctx_reg; |
| 530 int cost_add, temp_1, temp_2, temp_3; |
| 531 |
| 532 if (res->last < 0) { |
| 533 return VP8BitCost(0, p0); |
| 534 } |
| 535 |
| 536 cost = (ctx0 == 0) ? VP8BitCost(1, p0) : 0; |
| 537 |
| 538 res_cost = (int)res->cost; |
| 539 res_prob = (int)res->prob; |
| 540 res_coeffs = (int)res->coeffs; |
| 541 res_last = (int)res->last; |
| 542 |
| 543 __asm__ volatile( |
| 544 ".set push \n\t" |
| 545 ".set noreorder \n\t" |
| 546 |
| 547 "sll %[temp_1], %[n], 1 \n\t" |
| 548 "addu %[res_coeffs], %[res_coeffs], %[temp_1] \n\t" |
| 549 "slt %[temp_2], %[n], %[res_last] \n\t" |
| 550 "bnez %[temp_2], 1f \n\t" |
| 551 " li %[cost_add], 0 \n\t" |
| 552 "b 2f \n\t" |
| 553 " nop \n\t" |
| 554 "1: \n\t" |
| 555 "lh %[v_reg], 0(%[res_coeffs]) \n\t" |
| 556 "addu %[b_reg], %[n], %[VP8EncBands] \n\t" |
| 557 "move %[temp_1], %[const_max_level] \n\t" |
| 558 "addu %[cost], %[cost], %[cost_add] \n\t" |
| 559 "negu %[temp_2], %[v_reg] \n\t" |
| 560 "slti %[temp_3], %[v_reg], 0 \n\t" |
| 561 "movn %[v_reg], %[temp_2], %[temp_3] \n\t" |
| 562 "lbu %[b_reg], 1(%[b_reg]) \n\t" |
| 563 "li %[cost_add], 0 \n\t" |
| 564 |
| 565 "sltiu %[temp_3], %[v_reg], 2 \n\t" |
| 566 "move %[ctx_reg], %[v_reg] \n\t" |
| 567 "movz %[ctx_reg], %[const_2], %[temp_3] \n\t" |
| 568 // cost += VP8LevelCost(t, v); |
| 569 "slt %[temp_3], %[v_reg], %[const_max_level] \n\t" |
| 570 "movn %[temp_1], %[v_reg], %[temp_3] \n\t" |
| 571 "sll %[temp_2], %[v_reg], 1 \n\t" |
| 572 "addu %[temp_2], %[temp_2], %[VP8LevelFixedCosts] \n\t" |
| 573 "lhu %[temp_2], 0(%[temp_2]) \n\t" |
| 574 "sll %[temp_1], %[temp_1], 1 \n\t" |
| 575 "addu %[temp_1], %[temp_1], %[t] \n\t" |
| 576 "lhu %[temp_3], 0(%[temp_1]) \n\t" |
| 577 "addu %[cost], %[cost], %[temp_2] \n\t" |
| 578 |
| 579 // t = res->cost[b][ctx]; |
| 580 "sll %[temp_1], %[ctx_reg], 7 \n\t" |
| 581 "sll %[temp_2], %[ctx_reg], 3 \n\t" |
| 582 "addu %[cost], %[cost], %[temp_3] \n\t" |
| 583 "addu %[temp_1], %[temp_1], %[temp_2] \n\t" |
| 584 "sll %[temp_2], %[b_reg], 3 \n\t" |
| 585 "sll %[temp_3], %[b_reg], 5 \n\t" |
| 586 "sub %[temp_2], %[temp_3], %[temp_2] \n\t" |
| 587 "sll %[temp_3], %[temp_2], 4 \n\t" |
| 588 "addu %[temp_1], %[temp_1], %[temp_3] \n\t" |
| 589 "addu %[temp_2], %[temp_2], %[res_cost] \n\t" |
| 590 "addiu %[n], %[n], 1 \n\t" |
| 591 "addu %[t], %[temp_1], %[temp_2] \n\t" |
| 592 "slt %[temp_1], %[n], %[res_last] \n\t" |
| 593 "bnez %[temp_1], 1b \n\t" |
| 594 " addiu %[res_coeffs], %[res_coeffs], 2 \n\t" |
| 595 "2: \n\t" |
| 596 |
| 597 ".set pop \n\t" |
| 598 : [cost]"+r"(cost), [t]"+r"(t), [n]"+r"(n), [v_reg]"=&r"(v_reg), |
| 599 [ctx_reg]"=&r"(ctx_reg), [b_reg]"=&r"(b_reg), [cost_add]"=&r"(cost_add), |
| 600 [temp_1]"=&r"(temp_1), [temp_2]"=&r"(temp_2), [temp_3]"=&r"(temp_3) |
| 601 : [const_2]"r"(const_2), [const_255]"r"(const_255), [res_last]"r"(res_last), |
| 602 [VP8EntropyCost]"r"(VP8EntropyCost), [VP8EncBands]"r"(VP8EncBands), |
| 603 [const_max_level]"r"(const_max_level), [res_prob]"r"(res_prob), |
| 604 [VP8LevelFixedCosts]"r"(VP8LevelFixedCosts), [res_coeffs]"r"(res_coeffs), |
| 605 [res_cost]"r"(res_cost) |
| 606 : "memory" |
| 607 ); |
| 608 |
| 609 // Last coefficient is always non-zero |
| 610 { |
| 611 const int v = abs(res->coeffs[n]); |
| 612 assert(v != 0); |
| 613 cost += VP8LevelCost(t, v); |
| 614 if (n < 15) { |
| 615 const int b = VP8EncBands[n + 1]; |
| 616 const int ctx = (v == 1) ? 1 : 2; |
| 617 const int last_p0 = res->prob[b][ctx][0]; |
| 618 cost += VP8BitCost(0, last_p0); |
| 619 } |
| 620 } |
| 621 return cost; |
| 622 } |
| 623 |
| 624 #define GET_SSE_INNER(A, B, C, D) \ |
| 625 "lbu %[temp0], "#A"(%[a]) \n\t" \ |
| 626 "lbu %[temp1], "#A"(%[b]) \n\t" \ |
| 627 "lbu %[temp2], "#B"(%[a]) \n\t" \ |
| 628 "lbu %[temp3], "#B"(%[b]) \n\t" \ |
| 629 "lbu %[temp4], "#C"(%[a]) \n\t" \ |
| 630 "lbu %[temp5], "#C"(%[b]) \n\t" \ |
| 631 "lbu %[temp6], "#D"(%[a]) \n\t" \ |
| 632 "lbu %[temp7], "#D"(%[b]) \n\t" \ |
| 633 "subu %[temp0], %[temp0], %[temp1] \n\t" \ |
| 634 "subu %[temp2], %[temp2], %[temp3] \n\t" \ |
| 635 "subu %[temp4], %[temp4], %[temp5] \n\t" \ |
| 636 "subu %[temp6], %[temp6], %[temp7] \n\t" \ |
| 637 "madd %[temp0], %[temp0] \n\t" \ |
| 638 "madd %[temp2], %[temp2] \n\t" \ |
| 639 "madd %[temp4], %[temp4] \n\t" \ |
| 640 "madd %[temp6], %[temp6] \n\t" |
| 641 |
| 642 #define GET_SSE(A, B, C, D) \ |
| 643 GET_SSE_INNER(A, A + 1, A + 2, A + 3) \ |
| 644 GET_SSE_INNER(B, B + 1, B + 2, B + 3) \ |
| 645 GET_SSE_INNER(C, C + 1, C + 2, C + 3) \ |
| 646 GET_SSE_INNER(D, D + 1, D + 2, D + 3) |
| 647 |
| 648 #if !defined(WORK_AROUND_GCC) |
| 649 static int SSE16x16(const uint8_t* a, const uint8_t* b) { |
| 650 int count; |
| 651 int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; |
| 652 |
| 653 __asm__ volatile( |
| 654 "mult $zero, $zero \n\t" |
| 655 |
| 656 GET_SSE( 0, 4, 8, 12) |
| 657 GET_SSE( 16, 20, 24, 28) |
| 658 GET_SSE( 32, 36, 40, 44) |
| 659 GET_SSE( 48, 52, 56, 60) |
| 660 GET_SSE( 64, 68, 72, 76) |
| 661 GET_SSE( 80, 84, 88, 92) |
| 662 GET_SSE( 96, 100, 104, 108) |
| 663 GET_SSE(112, 116, 120, 124) |
| 664 GET_SSE(128, 132, 136, 140) |
| 665 GET_SSE(144, 148, 152, 156) |
| 666 GET_SSE(160, 164, 168, 172) |
| 667 GET_SSE(176, 180, 184, 188) |
| 668 GET_SSE(192, 196, 200, 204) |
| 669 GET_SSE(208, 212, 216, 220) |
| 670 GET_SSE(224, 228, 232, 236) |
| 671 GET_SSE(240, 244, 248, 252) |
| 672 |
| 673 "mflo %[count] \n\t" |
| 674 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), |
| 675 [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), |
| 676 [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count) |
| 677 : [a]"r"(a), [b]"r"(b) |
| 678 : "memory", "hi" , "lo" |
| 679 ); |
| 680 return count; |
| 681 } |
| 682 |
| 683 static int SSE16x8(const uint8_t* a, const uint8_t* b) { |
| 684 int count; |
| 685 int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; |
| 686 |
| 687 __asm__ volatile( |
| 688 "mult $zero, $zero \n\t" |
| 689 |
| 690 GET_SSE( 0, 4, 8, 12) |
| 691 GET_SSE( 16, 20, 24, 28) |
| 692 GET_SSE( 32, 36, 40, 44) |
| 693 GET_SSE( 48, 52, 56, 60) |
| 694 GET_SSE( 64, 68, 72, 76) |
| 695 GET_SSE( 80, 84, 88, 92) |
| 696 GET_SSE( 96, 100, 104, 108) |
| 697 GET_SSE(112, 116, 120, 124) |
| 698 |
| 699 "mflo %[count] \n\t" |
| 700 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), |
| 701 [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), |
| 702 [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count) |
| 703 : [a]"r"(a), [b]"r"(b) |
| 704 : "memory", "hi" , "lo" |
| 705 ); |
| 706 return count; |
| 707 } |
| 708 |
| 709 static int SSE8x8(const uint8_t* a, const uint8_t* b) { |
| 710 int count; |
| 711 int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; |
| 712 |
| 713 __asm__ volatile( |
| 714 "mult $zero, $zero \n\t" |
| 715 |
| 716 GET_SSE( 0, 4, 16, 20) |
| 717 GET_SSE(32, 36, 48, 52) |
| 718 GET_SSE(64, 68, 80, 84) |
| 719 GET_SSE(96, 100, 112, 116) |
| 720 |
| 721 "mflo %[count] \n\t" |
| 722 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), |
| 723 [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), |
| 724 [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count) |
| 725 : [a]"r"(a), [b]"r"(b) |
| 726 : "memory", "hi" , "lo" |
| 727 ); |
| 728 return count; |
| 729 } |
| 730 |
| 731 static int SSE4x4(const uint8_t* a, const uint8_t* b) { |
| 732 int count; |
| 733 int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; |
| 734 |
| 735 __asm__ volatile( |
| 736 "mult $zero, $zero \n\t" |
| 737 |
| 738 GET_SSE(0, 16, 32, 48) |
| 739 |
| 740 "mflo %[count] \n\t" |
| 741 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), |
| 742 [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), |
| 743 [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count) |
| 744 : [a]"r"(a), [b]"r"(b) |
| 745 : "memory", "hi" , "lo" |
| 746 ); |
| 747 return count; |
| 748 } |
| 749 |
| 750 #endif // WORK_AROUND_GCC |
| 751 |
| 752 #undef GET_SSE_MIPS32 |
| 753 #undef GET_SSE_MIPS32_INNER |
| 754 |
| 755 #endif // WEBP_USE_MIPS32 |
| 756 |
| 757 //------------------------------------------------------------------------------ |
| 758 // Entry point |
| 759 |
| 760 extern void VP8EncDspInitMIPS32(void); |
| 761 |
| 762 void VP8EncDspInitMIPS32(void) { |
| 763 #if defined(WEBP_USE_MIPS32) |
| 764 VP8ITransform = ITransform; |
| 765 VP8EncQuantizeBlock = QuantizeBlock; |
| 766 VP8TDisto4x4 = Disto4x4; |
| 767 VP8TDisto16x16 = Disto16x16; |
| 768 VP8FTransform = FTransform; |
| 769 #if !defined(WORK_AROUND_GCC) |
| 770 VP8SSE16x16 = SSE16x16; |
| 771 VP8SSE8x8 = SSE8x8; |
| 772 VP8SSE16x8 = SSE16x8; |
| 773 VP8SSE4x4 = SSE4x4; |
| 774 #endif |
| 775 #endif // WEBP_USE_MIPS32 |
| 776 } |
OLD | NEW |