Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(475)

Side by Side Diff: source/row_msa.cc

Issue 2600713002: Add MSA optimized RAW/RGB/ARGB to ARGB/Y/UV row functions (Closed)
Patch Set: Resolved merge conflicts Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « source/row_any.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2016 The LibYuv Project Authors. All rights reserved. 2 * Copyright 2016 The LibYuv Project Authors. All rights reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 1287 matching lines...) Expand 10 before | Expand all | Expand 10 after
1298 dst0 = (v16u8)__msa_ilvr_b((v16i8)vec2, (v16i8)vec0); 1298 dst0 = (v16u8)__msa_ilvr_b((v16i8)vec2, (v16i8)vec0);
1299 dst1 = (v16u8)__msa_ilvl_b((v16i8)vec2, (v16i8)vec0); 1299 dst1 = (v16u8)__msa_ilvl_b((v16i8)vec2, (v16i8)vec0);
1300 dst2 = (v16u8)__msa_ilvr_b((v16i8)vec3, (v16i8)vec1); 1300 dst2 = (v16u8)__msa_ilvr_b((v16i8)vec3, (v16i8)vec1);
1301 dst3 = (v16u8)__msa_ilvl_b((v16i8)vec3, (v16i8)vec1); 1301 dst3 = (v16u8)__msa_ilvl_b((v16i8)vec3, (v16i8)vec1);
1302 ST_UB4(dst0, dst1, dst2, dst3, dst_argb, 16); 1302 ST_UB4(dst0, dst1, dst2, dst3, dst_argb, 16);
1303 src_argb4444 += 32; 1303 src_argb4444 += 32;
1304 dst_argb += 64; 1304 dst_argb += 64;
1305 } 1305 }
1306 } 1306 }
1307 1307
1308 void ARGB1555ToARGBRow_MSA(const uint8* src_argb1555,
1309 uint8* dst_argb,
1310 int width) {
1311 int x;
1312 v8u16 src0, src1;
1313 v8u16 vec0, vec1, vec2, vec3, vec4, vec5;
1314 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6;
1315 v16u8 dst0, dst1, dst2, dst3;
1316 v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F);
1317
1318 for (x = 0; x < width; x += 16) {
1319 src0 = (v8u16)__msa_ld_h((v8u16*)src_argb1555, 0);
1320 src1 = (v8u16)__msa_ld_h((v8u16*)src_argb1555, 16);
1321 vec0 = src0 & const_0x1F;
1322 vec1 = src1 & const_0x1F;
1323 src0 = (v8u16)__msa_srli_h((v8i16)src0, 5);
1324 src1 = (v8u16)__msa_srli_h((v8i16)src1, 5);
1325 vec2 = src0 & const_0x1F;
1326 vec3 = src1 & const_0x1F;
1327 src0 = (v8u16)__msa_srli_h((v8i16)src0, 5);
1328 src1 = (v8u16)__msa_srli_h((v8i16)src1, 5);
1329 vec4 = src0 & const_0x1F;
1330 vec5 = src1 & const_0x1F;
1331 src0 = (v8u16)__msa_srli_h((v8i16)src0, 5);
1332 src1 = (v8u16)__msa_srli_h((v8i16)src1, 5);
1333 reg0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
1334 reg1 = (v16u8)__msa_pckev_b((v16i8)vec3, (v16i8)vec2);
1335 reg2 = (v16u8)__msa_pckev_b((v16i8)vec5, (v16i8)vec4);
1336 reg3 = (v16u8)__msa_pckev_b((v16i8)src1, (v16i8)src0);
1337 reg4 = (v16u8)__msa_slli_b((v16i8)reg0, 3);
1338 reg5 = (v16u8)__msa_slli_b((v16i8)reg1, 3);
1339 reg6 = (v16u8)__msa_slli_b((v16i8)reg2, 3);
1340 reg4 |= (v16u8)__msa_srai_b((v16i8)reg0, 2);
1341 reg5 |= (v16u8)__msa_srai_b((v16i8)reg1, 2);
1342 reg6 |= (v16u8)__msa_srai_b((v16i8)reg2, 2);
1343 reg3 = -reg3;
1344 reg0 = (v16u8)__msa_ilvr_b((v16i8)reg6, (v16i8)reg4);
1345 reg1 = (v16u8)__msa_ilvl_b((v16i8)reg6, (v16i8)reg4);
1346 reg2 = (v16u8)__msa_ilvr_b((v16i8)reg3, (v16i8)reg5);
1347 reg3 = (v16u8)__msa_ilvl_b((v16i8)reg3, (v16i8)reg5);
1348 dst0 = (v16u8)__msa_ilvr_b((v16i8)reg2, (v16i8)reg0);
1349 dst1 = (v16u8)__msa_ilvl_b((v16i8)reg2, (v16i8)reg0);
1350 dst2 = (v16u8)__msa_ilvr_b((v16i8)reg3, (v16i8)reg1);
1351 dst3 = (v16u8)__msa_ilvl_b((v16i8)reg3, (v16i8)reg1);
1352 ST_UB4(dst0, dst1, dst2, dst3, dst_argb, 16);
1353 src_argb1555 += 32;
1354 dst_argb += 64;
1355 }
1356 }
1357
1358 void RGB565ToARGBRow_MSA(const uint8* src_rgb565, uint8* dst_argb, int width) {
1359 int x;
1360 v8u16 src0, src1, vec0, vec1, vec2, vec3, vec4, vec5;
1361 v8u16 reg0, reg1, reg2, reg3, reg4, reg5;
1362 v16u8 res0, res1, res2, res3, dst0, dst1, dst2, dst3;
1363 v16u8 const_0xFF = (v16u8)__msa_ldi_b(0xFF);
1364 v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F);
1365 v8u16 const_0x7E0 = (v8u16)__msa_fill_h(0x7E0);
1366 v8u16 const_0xF800 = (v8u16)__msa_fill_h(0xF800);
1367
1368 for (x = 0; x < width; x += 16) {
1369 src0 = (v8u16)__msa_ld_h((v8u16*)src_rgb565, 0);
1370 src1 = (v8u16)__msa_ld_h((v8u16*)src_rgb565, 16);
1371 vec0 = src0 & const_0x1F;
1372 vec1 = src0 & const_0x7E0;
1373 vec2 = src0 & const_0xF800;
1374 vec3 = src1 & const_0x1F;
1375 vec4 = src1 & const_0x7E0;
1376 vec5 = src1 & const_0xF800;
1377 reg0 = (v8u16)__msa_slli_h((v8i16)vec0, 3);
1378 reg1 = (v8u16)__msa_srli_h((v8i16)vec1, 3);
1379 reg2 = (v8u16)__msa_srli_h((v8i16)vec2, 8);
1380 reg3 = (v8u16)__msa_slli_h((v8i16)vec3, 3);
1381 reg4 = (v8u16)__msa_srli_h((v8i16)vec4, 3);
1382 reg5 = (v8u16)__msa_srli_h((v8i16)vec5, 8);
1383 reg0 |= (v8u16)__msa_srli_h((v8i16)vec0, 2);
1384 reg1 |= (v8u16)__msa_srli_h((v8i16)vec1, 9);
1385 reg2 |= (v8u16)__msa_srli_h((v8i16)vec2, 13);
1386 reg3 |= (v8u16)__msa_srli_h((v8i16)vec3, 2);
1387 reg4 |= (v8u16)__msa_srli_h((v8i16)vec4, 9);
1388 reg5 |= (v8u16)__msa_srli_h((v8i16)vec5, 13);
1389 res0 = (v16u8)__msa_ilvev_b((v16i8)reg2, (v16i8)reg0);
1390 res1 = (v16u8)__msa_ilvev_b((v16i8)const_0xFF, (v16i8)reg1);
1391 res2 = (v16u8)__msa_ilvev_b((v16i8)reg5, (v16i8)reg3);
1392 res3 = (v16u8)__msa_ilvev_b((v16i8)const_0xFF, (v16i8)reg4);
1393 dst0 = (v16u8)__msa_ilvr_b((v16i8)res1, (v16i8)res0);
1394 dst1 = (v16u8)__msa_ilvl_b((v16i8)res1, (v16i8)res0);
1395 dst2 = (v16u8)__msa_ilvr_b((v16i8)res3, (v16i8)res2);
1396 dst3 = (v16u8)__msa_ilvl_b((v16i8)res3, (v16i8)res2);
1397 ST_UB4(dst0, dst1, dst2, dst3, dst_argb, 16);
1398 src_rgb565 += 32;
1399 dst_argb += 64;
1400 }
1401 }
1402
1403 void RGB24ToARGBRow_MSA(const uint8* src_rgb24, uint8* dst_argb, int width) {
1404 int x;
1405 v16u8 src0, src1, src2;
1406 v16u8 vec0, vec1, vec2;
1407 v16u8 dst0, dst1, dst2, dst3;
1408 v16u8 const_0xFF = (v16u8)__msa_ldi_b(0xFF);
1409 v16i8 shuffler = {0, 1, 2, 16, 3, 4, 5, 17, 6, 7, 8, 18, 9, 10, 11, 19};
1410
1411 for (x = 0; x < width; x += 16) {
1412 src0 = (v16u8)__msa_ld_b((v16i8*)src_rgb24, 0);
1413 src1 = (v16u8)__msa_ld_b((v16i8*)src_rgb24, 16);
1414 src2 = (v16u8)__msa_ld_b((v16i8*)src_rgb24, 32);
1415 vec0 = (v16u8)__msa_sldi_b((v16i8)src1, (v16i8)src0, 12);
1416 vec1 = (v16u8)__msa_sldi_b((v16i8)src2, (v16i8)src1, 8);
1417 vec2 = (v16u8)__msa_sldi_b((v16i8)src2, (v16i8)src2, 4);
1418 dst0 = (v16u8)__msa_vshf_b(shuffler, (v16i8)const_0xFF, (v16i8)src0);
1419 dst1 = (v16u8)__msa_vshf_b(shuffler, (v16i8)const_0xFF, (v16i8)vec0);
1420 dst2 = (v16u8)__msa_vshf_b(shuffler, (v16i8)const_0xFF, (v16i8)vec1);
1421 dst3 = (v16u8)__msa_vshf_b(shuffler, (v16i8)const_0xFF, (v16i8)vec2);
1422 ST_UB4(dst0, dst1, dst2, dst3, dst_argb, 16);
1423 src_rgb24 += 48;
1424 dst_argb += 64;
1425 }
1426 }
1427
1428 void RAWToARGBRow_MSA(const uint8* src_raw, uint8* dst_argb, int width) {
1429 int x;
1430 v16u8 src0, src1, src2;
1431 v16u8 vec0, vec1, vec2;
1432 v16u8 dst0, dst1, dst2, dst3;
1433 v16u8 const_0xFF = (v16u8)__msa_ldi_b(0xFF);
1434 v16i8 mask = {2, 1, 0, 16, 5, 4, 3, 17, 8, 7, 6, 18, 11, 10, 9, 19};
1435
1436 for (x = 0; x < width; x += 16) {
1437 src0 = (v16u8)__msa_ld_b((v16i8*)src_raw, 0);
1438 src1 = (v16u8)__msa_ld_b((v16i8*)src_raw, 16);
1439 src2 = (v16u8)__msa_ld_b((v16i8*)src_raw, 32);
1440 vec0 = (v16u8)__msa_sldi_b((v16i8)src1, (v16i8)src0, 12);
1441 vec1 = (v16u8)__msa_sldi_b((v16i8)src2, (v16i8)src1, 8);
1442 vec2 = (v16u8)__msa_sldi_b((v16i8)src2, (v16i8)src2, 4);
1443 dst0 = (v16u8)__msa_vshf_b(mask, (v16i8)const_0xFF, (v16i8)src0);
1444 dst1 = (v16u8)__msa_vshf_b(mask, (v16i8)const_0xFF, (v16i8)vec0);
1445 dst2 = (v16u8)__msa_vshf_b(mask, (v16i8)const_0xFF, (v16i8)vec1);
1446 dst3 = (v16u8)__msa_vshf_b(mask, (v16i8)const_0xFF, (v16i8)vec2);
1447 ST_UB4(dst0, dst1, dst2, dst3, dst_argb, 16);
1448 src_raw += 48;
1449 dst_argb += 64;
1450 }
1451 }
1452
1453 void ARGB1555ToYRow_MSA(const uint8* src_argb1555, uint8* dst_y, int width) {
1454 int x;
1455 v8u16 src0, src1, vec0, vec1, vec2, vec3, vec4, vec5;
1456 v8u16 reg0, reg1, reg2, reg3, reg4, reg5;
1457 v16u8 dst0;
1458 v8u16 const_0x19 = (v8u16)__msa_ldi_h(0x19);
1459 v8u16 const_0x81 = (v8u16)__msa_ldi_h(0x81);
1460 v8u16 const_0x42 = (v8u16)__msa_ldi_h(0x42);
1461 v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F);
1462 v8u16 const_0x1080 = (v8u16)__msa_fill_h(0x1080);
1463
1464 for (x = 0; x < width; x += 16) {
1465 src0 = (v8u16)__msa_ld_b((v8i16*)src_argb1555, 0);
1466 src1 = (v8u16)__msa_ld_b((v8i16*)src_argb1555, 16);
1467 vec0 = src0 & const_0x1F;
1468 vec1 = src1 & const_0x1F;
1469 src0 = (v8u16)__msa_srai_h((v8i16)src0, 5);
1470 src1 = (v8u16)__msa_srai_h((v8i16)src1, 5);
1471 vec2 = src0 & const_0x1F;
1472 vec3 = src1 & const_0x1F;
1473 src0 = (v8u16)__msa_srai_h((v8i16)src0, 5);
1474 src1 = (v8u16)__msa_srai_h((v8i16)src1, 5);
1475 vec4 = src0 & const_0x1F;
1476 vec5 = src1 & const_0x1F;
1477 reg0 = (v8u16)__msa_slli_h((v8i16)vec0, 3);
1478 reg1 = (v8u16)__msa_slli_h((v8i16)vec1, 3);
1479 reg0 |= (v8u16)__msa_srai_h((v8i16)vec0, 2);
1480 reg1 |= (v8u16)__msa_srai_h((v8i16)vec1, 2);
1481 reg2 = (v8u16)__msa_slli_h((v8i16)vec2, 3);
1482 reg3 = (v8u16)__msa_slli_h((v8i16)vec3, 3);
1483 reg2 |= (v8u16)__msa_srai_h((v8i16)vec2, 2);
1484 reg3 |= (v8u16)__msa_srai_h((v8i16)vec3, 2);
1485 reg4 = (v8u16)__msa_slli_h((v8i16)vec4, 3);
1486 reg5 = (v8u16)__msa_slli_h((v8i16)vec5, 3);
1487 reg4 |= (v8u16)__msa_srai_h((v8i16)vec4, 2);
1488 reg5 |= (v8u16)__msa_srai_h((v8i16)vec5, 2);
1489 reg0 *= const_0x19;
1490 reg1 *= const_0x19;
1491 reg2 *= const_0x81;
1492 reg3 *= const_0x81;
1493 reg4 *= const_0x42;
1494 reg5 *= const_0x42;
1495 reg0 += reg2;
1496 reg1 += reg3;
1497 reg0 += reg4;
1498 reg1 += reg5;
1499 reg0 += const_0x1080;
1500 reg1 += const_0x1080;
1501 reg0 = (v8u16)__msa_srai_h((v8i16)reg0, 8);
1502 reg1 = (v8u16)__msa_srai_h((v8i16)reg1, 8);
1503 dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
1504 ST_UB(dst0, dst_y);
1505 src_argb1555 += 32;
1506 dst_y += 16;
1507 }
1508 }
1509
1510 void RGB565ToYRow_MSA(const uint8* src_rgb565, uint8* dst_y, int width) {
1511 int x;
1512 v8u16 src0, src1, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1513 v8u16 reg0, reg1, reg2, reg3, reg4, reg5;
1514 v4u32 res0, res1, res2, res3;
1515 v16u8 dst0;
1516 v4u32 const_0x810019 = (v4u32)__msa_fill_w(0x810019);
1517 v4u32 const_0x010042 = (v4u32)__msa_fill_w(0x010042);
1518 v8i16 const_0x1080 = __msa_fill_h(0x1080);
1519 v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F);
1520 v8u16 const_0x7E0 = (v8u16)__msa_fill_h(0x7E0);
1521 v8u16 const_0xF800 = (v8u16)__msa_fill_h(0xF800);
1522
1523 for (x = 0; x < width; x += 16) {
1524 src0 = (v8u16)__msa_ld_b((v8i16*)src_rgb565, 0);
1525 src1 = (v8u16)__msa_ld_b((v8i16*)src_rgb565, 16);
1526 vec0 = src0 & const_0x1F;
1527 vec1 = src0 & const_0x7E0;
1528 vec2 = src0 & const_0xF800;
1529 vec3 = src1 & const_0x1F;
1530 vec4 = src1 & const_0x7E0;
1531 vec5 = src1 & const_0xF800;
1532 reg0 = (v8u16)__msa_slli_h((v8i16)vec0, 3);
1533 reg1 = (v8u16)__msa_srli_h((v8i16)vec1, 3);
1534 reg2 = (v8u16)__msa_srli_h((v8i16)vec2, 8);
1535 reg3 = (v8u16)__msa_slli_h((v8i16)vec3, 3);
1536 reg4 = (v8u16)__msa_srli_h((v8i16)vec4, 3);
1537 reg5 = (v8u16)__msa_srli_h((v8i16)vec5, 8);
1538 reg0 |= (v8u16)__msa_srli_h((v8i16)vec0, 2);
1539 reg1 |= (v8u16)__msa_srli_h((v8i16)vec1, 9);
1540 reg2 |= (v8u16)__msa_srli_h((v8i16)vec2, 13);
1541 reg3 |= (v8u16)__msa_srli_h((v8i16)vec3, 2);
1542 reg4 |= (v8u16)__msa_srli_h((v8i16)vec4, 9);
1543 reg5 |= (v8u16)__msa_srli_h((v8i16)vec5, 13);
1544 vec0 = (v8u16)__msa_ilvr_h((v8i16)reg1, (v8i16)reg0);
1545 vec1 = (v8u16)__msa_ilvl_h((v8i16)reg1, (v8i16)reg0);
1546 vec2 = (v8u16)__msa_ilvr_h((v8i16)reg4, (v8i16)reg3);
1547 vec3 = (v8u16)__msa_ilvl_h((v8i16)reg4, (v8i16)reg3);
1548 vec4 = (v8u16)__msa_ilvr_h(const_0x1080, (v8i16)reg2);
1549 vec5 = (v8u16)__msa_ilvl_h(const_0x1080, (v8i16)reg2);
1550 vec6 = (v8u16)__msa_ilvr_h(const_0x1080, (v8i16)reg5);
1551 vec7 = (v8u16)__msa_ilvl_h(const_0x1080, (v8i16)reg5);
1552 res0 = __msa_dotp_u_w(vec0, (v8u16)const_0x810019);
1553 res1 = __msa_dotp_u_w(vec1, (v8u16)const_0x810019);
1554 res2 = __msa_dotp_u_w(vec2, (v8u16)const_0x810019);
1555 res3 = __msa_dotp_u_w(vec3, (v8u16)const_0x810019);
1556 res0 = __msa_dpadd_u_w(res0, vec4, (v8u16)const_0x010042);
1557 res1 = __msa_dpadd_u_w(res1, vec5, (v8u16)const_0x010042);
1558 res2 = __msa_dpadd_u_w(res2, vec6, (v8u16)const_0x010042);
1559 res3 = __msa_dpadd_u_w(res3, vec7, (v8u16)const_0x010042);
1560 res0 = (v4u32)__msa_srai_w((v4i32)res0, 8);
1561 res1 = (v4u32)__msa_srai_w((v4i32)res1, 8);
1562 res2 = (v4u32)__msa_srai_w((v4i32)res2, 8);
1563 res3 = (v4u32)__msa_srai_w((v4i32)res3, 8);
1564 vec0 = (v8u16)__msa_pckev_h((v8i16)res1, (v8i16)res0);
1565 vec1 = (v8u16)__msa_pckev_h((v8i16)res3, (v8i16)res2);
1566 dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
1567 ST_UB(dst0, dst_y);
1568 src_rgb565 += 32;
1569 dst_y += 16;
1570 }
1571 }
1572
1573 void RGB24ToYRow_MSA(const uint8* src_argb0, uint8* dst_y, int width) {
1574 int x;
1575 v16u8 src0, src1, src2, reg0, reg1, reg2, reg3, dst0;
1576 v8u16 vec0, vec1, vec2, vec3;
1577 v8u16 const_0x8119 = (v8u16)__msa_fill_h(0x8119);
1578 v8u16 const_0x42 = (v8u16)__msa_fill_h(0x42);
1579 v8u16 const_0x1080 = (v8u16)__msa_fill_h(0x1080);
1580 v16i8 mask0 = {0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12};
1581 v16i8 mask1 = {12, 13, 14, 15, 15, 16, 17, 18,
1582 18, 19, 20, 21, 21, 22, 23, 24};
1583 v16i8 mask2 = {8, 9, 10, 11, 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20};
1584 v16i8 mask3 = {4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16};
1585 v16i8 zero = {0};
1586
1587 for (x = 0; x < width; x += 16) {
1588 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb0, 0);
1589 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb0, 16);
1590 src2 = (v16u8)__msa_ld_b((v16i8*)src_argb0, 32);
1591 reg0 = (v16u8)__msa_vshf_b(mask0, zero, (v16i8)src0);
1592 reg1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
1593 reg2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src2, (v16i8)src1);
1594 reg3 = (v16u8)__msa_vshf_b(mask3, zero, (v16i8)src2);
1595 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
1596 vec1 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2);
1597 vec2 = (v8u16)__msa_pckod_h((v8i16)reg1, (v8i16)reg0);
1598 vec3 = (v8u16)__msa_pckod_h((v8i16)reg3, (v8i16)reg2);
1599 vec0 = __msa_dotp_u_h((v16u8)vec0, (v16u8)const_0x8119);
1600 vec1 = __msa_dotp_u_h((v16u8)vec1, (v16u8)const_0x8119);
1601 vec0 = __msa_dpadd_u_h(vec0, (v16u8)vec2, (v16u8)const_0x42);
1602 vec1 = __msa_dpadd_u_h(vec1, (v16u8)vec3, (v16u8)const_0x42);
1603 vec0 += const_0x1080;
1604 vec1 += const_0x1080;
1605 vec0 = (v8u16)__msa_srai_h((v8i16)vec0, 8);
1606 vec1 = (v8u16)__msa_srai_h((v8i16)vec1, 8);
1607 dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
1608 ST_UB(dst0, dst_y);
1609 src_argb0 += 48;
1610 dst_y += 16;
1611 }
1612 }
1613
1614 void RAWToYRow_MSA(const uint8* src_argb0, uint8* dst_y, int width) {
1615 int x;
1616 v16u8 src0, src1, src2, reg0, reg1, reg2, reg3, dst0;
1617 v8u16 vec0, vec1, vec2, vec3;
1618 v8u16 const_0x8142 = (v8u16)__msa_fill_h(0x8142);
1619 v8u16 const_0x19 = (v8u16)__msa_fill_h(0x19);
1620 v8u16 const_0x1080 = (v8u16)__msa_fill_h(0x1080);
1621 v16i8 mask0 = {0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12};
1622 v16i8 mask1 = {12, 13, 14, 15, 15, 16, 17, 18,
1623 18, 19, 20, 21, 21, 22, 23, 24};
1624 v16i8 mask2 = {8, 9, 10, 11, 11, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20};
1625 v16i8 mask3 = {4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16};
1626 v16i8 zero = {0};
1627
1628 for (x = 0; x < width; x += 16) {
1629 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb0, 0);
1630 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb0, 16);
1631 src2 = (v16u8)__msa_ld_b((v16i8*)src_argb0, 32);
1632 reg0 = (v16u8)__msa_vshf_b(mask0, zero, (v16i8)src0);
1633 reg1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
1634 reg2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src2, (v16i8)src1);
1635 reg3 = (v16u8)__msa_vshf_b(mask3, zero, (v16i8)src2);
1636 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
1637 vec1 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2);
1638 vec2 = (v8u16)__msa_pckod_h((v8i16)reg1, (v8i16)reg0);
1639 vec3 = (v8u16)__msa_pckod_h((v8i16)reg3, (v8i16)reg2);
1640 vec0 = __msa_dotp_u_h((v16u8)vec0, (v16u8)const_0x8142);
1641 vec1 = __msa_dotp_u_h((v16u8)vec1, (v16u8)const_0x8142);
1642 vec0 = __msa_dpadd_u_h(vec0, (v16u8)vec2, (v16u8)const_0x19);
1643 vec1 = __msa_dpadd_u_h(vec1, (v16u8)vec3, (v16u8)const_0x19);
1644 vec0 += const_0x1080;
1645 vec1 += const_0x1080;
1646 vec0 = (v8u16)__msa_srai_h((v8i16)vec0, 8);
1647 vec1 = (v8u16)__msa_srai_h((v8i16)vec1, 8);
1648 dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
1649 ST_UB(dst0, dst_y);
1650 src_argb0 += 48;
1651 dst_y += 16;
1652 }
1653 }
1654
1655 void ARGB1555ToUVRow_MSA(const uint8* src_argb1555,
1656 int src_stride_argb1555,
1657 uint8* dst_u,
1658 uint8* dst_v,
1659 int width) {
1660 int x;
1661 const uint16* s = (const uint16*)src_argb1555;
1662 const uint16* t = (const uint16*)(src_argb1555 + src_stride_argb1555);
1663 int64_t res0, res1;
1664 v8u16 src0, src1, src2, src3, reg0, reg1, reg2, reg3;
1665 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6;
1666 v16u8 dst0;
1667 v8u16 const_0x70 = (v8u16)__msa_ldi_h(0x70);
1668 v8u16 const_0x4A = (v8u16)__msa_ldi_h(0x4A);
1669 v8u16 const_0x26 = (v8u16)__msa_ldi_h(0x26);
1670 v8u16 const_0x5E = (v8u16)__msa_ldi_h(0x5E);
1671 v8u16 const_0x12 = (v8u16)__msa_ldi_h(0x12);
1672 v8u16 const_0x8080 = (v8u16)__msa_fill_h(0x8080);
1673 v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F);
1674
1675 for (x = 0; x < width; x += 16) {
1676 src0 = (v8u16)__msa_ld_b((v8i16*)s, 0);
1677 src1 = (v8u16)__msa_ld_b((v8i16*)s, 16);
1678 src2 = (v8u16)__msa_ld_b((v8i16*)t, 0);
1679 src3 = (v8u16)__msa_ld_b((v8i16*)t, 16);
1680 vec0 = src0 & const_0x1F;
1681 vec1 = src1 & const_0x1F;
1682 vec0 += src2 & const_0x1F;
1683 vec1 += src3 & const_0x1F;
1684 vec0 = (v8u16)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
1685 src0 = (v8u16)__msa_srai_h((v8i16)src0, 5);
1686 src1 = (v8u16)__msa_srai_h((v8i16)src1, 5);
1687 src2 = (v8u16)__msa_srai_h((v8i16)src2, 5);
1688 src3 = (v8u16)__msa_srai_h((v8i16)src3, 5);
1689 vec2 = src0 & const_0x1F;
1690 vec3 = src1 & const_0x1F;
1691 vec2 += src2 & const_0x1F;
1692 vec3 += src3 & const_0x1F;
1693 vec2 = (v8u16)__msa_pckev_b((v16i8)vec3, (v16i8)vec2);
1694 src0 = (v8u16)__msa_srai_h((v8i16)src0, 5);
1695 src1 = (v8u16)__msa_srai_h((v8i16)src1, 5);
1696 src2 = (v8u16)__msa_srai_h((v8i16)src2, 5);
1697 src3 = (v8u16)__msa_srai_h((v8i16)src3, 5);
1698 vec4 = src0 & const_0x1F;
1699 vec5 = src1 & const_0x1F;
1700 vec4 += src2 & const_0x1F;
1701 vec5 += src3 & const_0x1F;
1702 vec4 = (v8u16)__msa_pckev_b((v16i8)vec5, (v16i8)vec4);
1703 vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
1704 vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
1705 vec4 = __msa_hadd_u_h((v16u8)vec4, (v16u8)vec4);
1706 vec6 = (v8u16)__msa_slli_h((v8i16)vec0, 1);
1707 vec6 |= (v8u16)__msa_srai_h((v8i16)vec0, 6);
1708 vec0 = (v8u16)__msa_slli_h((v8i16)vec2, 1);
1709 vec0 |= (v8u16)__msa_srai_h((v8i16)vec2, 6);
1710 vec2 = (v8u16)__msa_slli_h((v8i16)vec4, 1);
1711 vec2 |= (v8u16)__msa_srai_h((v8i16)vec4, 6);
1712 reg0 = vec6 * const_0x70;
1713 reg1 = vec0 * const_0x4A;
1714 reg2 = vec2 * const_0x70;
1715 reg3 = vec0 * const_0x5E;
1716 reg0 += const_0x8080;
1717 reg1 += vec2 * const_0x26;
1718 reg2 += const_0x8080;
1719 reg3 += vec6 * const_0x12;
1720 reg0 -= reg1;
1721 reg2 -= reg3;
1722 reg0 = (v8u16)__msa_srai_h((v8i16)reg0, 8);
1723 reg2 = (v8u16)__msa_srai_h((v8i16)reg2, 8);
1724 dst0 = (v16u8)__msa_pckev_b((v16i8)reg2, (v16i8)reg0);
1725 res0 = __msa_copy_u_d((v2i64)dst0, 0);
1726 res1 = __msa_copy_u_d((v2i64)dst0, 1);
1727 SD(res0, dst_u);
1728 SD(res1, dst_v);
1729 s += 16;
1730 t += 16;
1731 dst_u += 8;
1732 dst_v += 8;
1733 }
1734 }
1735
1736 void RGB565ToUVRow_MSA(const uint8* src_rgb565,
1737 int src_stride_rgb565,
1738 uint8* dst_u,
1739 uint8* dst_v,
1740 int width) {
1741 int x;
1742 const uint16* s = (const uint16*)src_rgb565;
1743 const uint16* t = (const uint16*)(src_rgb565 + src_stride_rgb565);
1744 int64_t res0, res1;
1745 v8u16 src0, src1, src2, src3, reg0, reg1, reg2, reg3;
1746 v8u16 vec0, vec1, vec2, vec3, vec4, vec5;
1747 v16u8 dst0;
1748 v8u16 const_0x70 = (v8u16)__msa_ldi_h(0x70);
1749 v8u16 const_0x4A = (v8u16)__msa_ldi_h(0x4A);
1750 v8u16 const_0x26 = (v8u16)__msa_ldi_h(0x26);
1751 v8u16 const_0x5E = (v8u16)__msa_ldi_h(0x5E);
1752 v8u16 const_0x12 = (v8u16)__msa_ldi_h(0x12);
1753 v8u16 const_32896 = (v8u16)__msa_fill_h(0x8080);
1754 v8u16 const_0x1F = (v8u16)__msa_ldi_h(0x1F);
1755 v8u16 const_0x3F = (v8u16)__msa_fill_h(0x3F);
1756
1757 for (x = 0; x < width; x += 16) {
1758 src0 = (v8u16)__msa_ld_b((v8i16*)s, 0);
1759 src1 = (v8u16)__msa_ld_b((v8i16*)s, 16);
1760 src2 = (v8u16)__msa_ld_b((v8i16*)t, 0);
1761 src3 = (v8u16)__msa_ld_b((v8i16*)t, 16);
1762 vec0 = src0 & const_0x1F;
1763 vec1 = src1 & const_0x1F;
1764 vec0 += src2 & const_0x1F;
1765 vec1 += src3 & const_0x1F;
1766 vec0 = (v8u16)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
1767 src0 = (v8u16)__msa_srai_h((v8i16)src0, 5);
1768 src1 = (v8u16)__msa_srai_h((v8i16)src1, 5);
1769 src2 = (v8u16)__msa_srai_h((v8i16)src2, 5);
1770 src3 = (v8u16)__msa_srai_h((v8i16)src3, 5);
1771 vec2 = src0 & const_0x3F;
1772 vec3 = src1 & const_0x3F;
1773 vec2 += src2 & const_0x3F;
1774 vec3 += src3 & const_0x3F;
1775 vec1 = (v8u16)__msa_pckev_b((v16i8)vec3, (v16i8)vec2);
1776 src0 = (v8u16)__msa_srai_h((v8i16)src0, 6);
1777 src1 = (v8u16)__msa_srai_h((v8i16)src1, 6);
1778 src2 = (v8u16)__msa_srai_h((v8i16)src2, 6);
1779 src3 = (v8u16)__msa_srai_h((v8i16)src3, 6);
1780 vec4 = src0 & const_0x1F;
1781 vec5 = src1 & const_0x1F;
1782 vec4 += src2 & const_0x1F;
1783 vec5 += src3 & const_0x1F;
1784 vec2 = (v8u16)__msa_pckev_b((v16i8)vec5, (v16i8)vec4);
1785 vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
1786 vec1 = __msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
1787 vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
1788 vec3 = (v8u16)__msa_slli_h((v8i16)vec0, 1);
1789 vec3 |= (v8u16)__msa_srai_h((v8i16)vec0, 6);
1790 vec4 = (v8u16)__msa_slli_h((v8i16)vec2, 1);
1791 vec4 |= (v8u16)__msa_srai_h((v8i16)vec2, 6);
1792 reg0 = vec3 * const_0x70;
1793 reg1 = vec1 * const_0x4A;
1794 reg2 = vec4 * const_0x70;
1795 reg3 = vec1 * const_0x5E;
1796 reg0 += const_32896;
1797 reg1 += vec4 * const_0x26;
1798 reg2 += const_32896;
1799 reg3 += vec3 * const_0x12;
1800 reg0 -= reg1;
1801 reg2 -= reg3;
1802 reg0 = (v8u16)__msa_srai_h((v8i16)reg0, 8);
1803 reg2 = (v8u16)__msa_srai_h((v8i16)reg2, 8);
1804 dst0 = (v16u8)__msa_pckev_b((v16i8)reg2, (v16i8)reg0);
1805 res0 = __msa_copy_u_d((v2i64)dst0, 0);
1806 res1 = __msa_copy_u_d((v2i64)dst0, 1);
1807 SD(res0, dst_u);
1808 SD(res1, dst_v);
1809 s += 16;
1810 t += 16;
1811 dst_u += 8;
1812 dst_v += 8;
1813 }
1814 }
1815
1816 void RGB24ToUVRow_MSA(const uint8* src_rgb0,
1817 int src_stride_rgb,
1818 uint8* dst_u,
1819 uint8* dst_v,
1820 int width) {
1821 int x;
1822 const uint8* s = src_rgb0;
1823 const uint8* t = src_rgb0 + src_stride_rgb;
1824 int64 res0, res1;
1825 v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
1826 v16u8 inp0, inp1, inp2, inp3, inp4, inp5;
1827 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1828 v8i16 reg0, reg1, reg2, reg3;
1829 v16u8 dst0;
1830 v8u16 const_0x70 = (v8u16)__msa_fill_h(0x70);
1831 v8u16 const_0x4A = (v8u16)__msa_fill_h(0x4A);
1832 v8u16 const_0x26 = (v8u16)__msa_fill_h(0x26);
1833 v8u16 const_0x5E = (v8u16)__msa_fill_h(0x5E);
1834 v8u16 const_0x12 = (v8u16)__msa_fill_h(0x12);
1835 v8u16 const_0x8080 = (v8u16)__msa_fill_h(0x8080);
1836 v16i8 mask = {0, 1, 2, 16, 3, 4, 5, 17, 6, 7, 8, 18, 9, 10, 11, 19};
1837 v16i8 zero = {0};
1838
1839 for (x = 0; x < width; x += 16) {
1840 inp0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
1841 inp1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
1842 inp2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
1843 inp3 = (v16u8)__msa_ld_b((v16i8*)t, 0);
1844 inp4 = (v16u8)__msa_ld_b((v16i8*)t, 16);
1845 inp5 = (v16u8)__msa_ld_b((v16i8*)t, 32);
1846 src1 = (v16u8)__msa_sldi_b((v16i8)inp1, (v16i8)inp0, 12);
1847 src5 = (v16u8)__msa_sldi_b((v16i8)inp4, (v16i8)inp3, 12);
1848 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8);
1849 src6 = (v16u8)__msa_sldi_b((v16i8)inp5, (v16i8)inp4, 8);
1850 src3 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp2, 4);
1851 src7 = (v16u8)__msa_sldi_b((v16i8)inp5, (v16i8)inp5, 4);
1852 src0 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)inp0);
1853 src1 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src1);
1854 src2 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src2);
1855 src3 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src3);
1856 src4 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)inp3);
1857 src5 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src5);
1858 src6 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src6);
1859 src7 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src7);
1860 vec0 = (v8u16)__msa_ilvr_b((v16i8)src4, (v16i8)src0);
1861 vec1 = (v8u16)__msa_ilvl_b((v16i8)src4, (v16i8)src0);
1862 vec2 = (v8u16)__msa_ilvr_b((v16i8)src5, (v16i8)src1);
1863 vec3 = (v8u16)__msa_ilvl_b((v16i8)src5, (v16i8)src1);
1864 vec4 = (v8u16)__msa_ilvr_b((v16i8)src6, (v16i8)src2);
1865 vec5 = (v8u16)__msa_ilvl_b((v16i8)src6, (v16i8)src2);
1866 vec6 = (v8u16)__msa_ilvr_b((v16i8)src7, (v16i8)src3);
1867 vec7 = (v8u16)__msa_ilvl_b((v16i8)src7, (v16i8)src3);
1868 vec0 = (v8u16)__msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
1869 vec1 = (v8u16)__msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
1870 vec2 = (v8u16)__msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
1871 vec3 = (v8u16)__msa_hadd_u_h((v16u8)vec3, (v16u8)vec3);
1872 vec4 = (v8u16)__msa_hadd_u_h((v16u8)vec4, (v16u8)vec4);
1873 vec5 = (v8u16)__msa_hadd_u_h((v16u8)vec5, (v16u8)vec5);
1874 vec6 = (v8u16)__msa_hadd_u_h((v16u8)vec6, (v16u8)vec6);
1875 vec7 = (v8u16)__msa_hadd_u_h((v16u8)vec7, (v16u8)vec7);
1876 reg0 = (v8i16)__msa_pckev_d((v2i64)vec1, (v2i64)vec0);
1877 reg1 = (v8i16)__msa_pckev_d((v2i64)vec3, (v2i64)vec2);
1878 reg2 = (v8i16)__msa_pckev_d((v2i64)vec5, (v2i64)vec4);
1879 reg3 = (v8i16)__msa_pckev_d((v2i64)vec7, (v2i64)vec6);
1880 reg0 += (v8i16)__msa_pckod_d((v2i64)vec1, (v2i64)vec0);
1881 reg1 += (v8i16)__msa_pckod_d((v2i64)vec3, (v2i64)vec2);
1882 reg2 += (v8i16)__msa_pckod_d((v2i64)vec5, (v2i64)vec4);
1883 reg3 += (v8i16)__msa_pckod_d((v2i64)vec7, (v2i64)vec6);
1884 reg0 = __msa_srai_h((v8i16)reg0, 2);
1885 reg1 = __msa_srai_h((v8i16)reg1, 2);
1886 reg2 = __msa_srai_h((v8i16)reg2, 2);
1887 reg3 = __msa_srai_h((v8i16)reg3, 2);
1888 vec4 = (v8u16)__msa_pckev_h(reg1, reg0);
1889 vec5 = (v8u16)__msa_pckev_h(reg3, reg2);
1890 vec6 = (v8u16)__msa_pckod_h(reg1, reg0);
1891 vec7 = (v8u16)__msa_pckod_h(reg3, reg2);
1892 vec0 = (v8u16)__msa_pckev_h((v8i16)vec5, (v8i16)vec4);
1893 vec1 = (v8u16)__msa_pckev_h((v8i16)vec7, (v8i16)vec6);
1894 vec2 = (v8u16)__msa_pckod_h((v8i16)vec5, (v8i16)vec4);
1895 vec3 = vec0 * const_0x70;
1896 vec4 = vec1 * const_0x4A;
1897 vec5 = vec2 * const_0x26;
1898 vec2 *= const_0x70;
1899 vec1 *= const_0x5E;
1900 vec0 *= const_0x12;
1901 reg0 = __msa_subv_h((v8i16)vec3, (v8i16)vec4);
1902 reg1 = __msa_subv_h((v8i16)const_0x8080, (v8i16)vec5);
1903 reg2 = __msa_subv_h((v8i16)vec2, (v8i16)vec1);
1904 reg3 = __msa_subv_h((v8i16)const_0x8080, (v8i16)vec0);
1905 reg0 += reg1;
1906 reg2 += reg3;
1907 reg0 = __msa_srai_h(reg0, 8);
1908 reg2 = __msa_srai_h(reg2, 8);
1909 dst0 = (v16u8)__msa_pckev_b((v16i8)reg2, (v16i8)reg0);
1910 res0 = __msa_copy_u_d((v2i64)dst0, 0);
1911 res1 = __msa_copy_u_d((v2i64)dst0, 1);
1912 SD(res0, dst_u);
1913 SD(res1, dst_v);
1914 t += 48;
1915 s += 48;
1916 dst_u += 8;
1917 dst_v += 8;
1918 }
1919 }
1920
1921 void RAWToUVRow_MSA(const uint8* src_rgb0,
1922 int src_stride_rgb,
1923 uint8* dst_u,
1924 uint8* dst_v,
1925 int width) {
1926 int x;
1927 const uint8* s = src_rgb0;
1928 const uint8* t = src_rgb0 + src_stride_rgb;
1929 int64 res0, res1;
1930 v16u8 inp0, inp1, inp2, inp3, inp4, inp5;
1931 v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
1932 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1933 v8i16 reg0, reg1, reg2, reg3;
1934 v16u8 dst0;
1935 v8u16 const_0x70 = (v8u16)__msa_fill_h(0x70);
1936 v8u16 const_0x4A = (v8u16)__msa_fill_h(0x4A);
1937 v8u16 const_0x26 = (v8u16)__msa_fill_h(0x26);
1938 v8u16 const_0x5E = (v8u16)__msa_fill_h(0x5E);
1939 v8u16 const_0x12 = (v8u16)__msa_fill_h(0x12);
1940 v8u16 const_0x8080 = (v8u16)__msa_fill_h(0x8080);
1941 v16i8 mask = {0, 1, 2, 16, 3, 4, 5, 17, 6, 7, 8, 18, 9, 10, 11, 19};
1942 v16i8 zero = {0};
1943
1944 for (x = 0; x < width; x += 16) {
1945 inp0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
1946 inp1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
1947 inp2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
1948 inp3 = (v16u8)__msa_ld_b((v16i8*)t, 0);
1949 inp4 = (v16u8)__msa_ld_b((v16i8*)t, 16);
1950 inp5 = (v16u8)__msa_ld_b((v16i8*)t, 32);
1951 src1 = (v16u8)__msa_sldi_b((v16i8)inp1, (v16i8)inp0, 12);
1952 src5 = (v16u8)__msa_sldi_b((v16i8)inp4, (v16i8)inp3, 12);
1953 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8);
1954 src6 = (v16u8)__msa_sldi_b((v16i8)inp5, (v16i8)inp4, 8);
1955 src3 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp2, 4);
1956 src7 = (v16u8)__msa_sldi_b((v16i8)inp5, (v16i8)inp5, 4);
1957 src0 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)inp0);
1958 src1 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src1);
1959 src2 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src2);
1960 src3 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src3);
1961 src4 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)inp3);
1962 src5 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src5);
1963 src6 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src6);
1964 src7 = (v16u8)__msa_vshf_b(mask, (v16i8)zero, (v16i8)src7);
1965 vec0 = (v8u16)__msa_ilvr_b((v16i8)src4, (v16i8)src0);
1966 vec1 = (v8u16)__msa_ilvl_b((v16i8)src4, (v16i8)src0);
1967 vec2 = (v8u16)__msa_ilvr_b((v16i8)src5, (v16i8)src1);
1968 vec3 = (v8u16)__msa_ilvl_b((v16i8)src5, (v16i8)src1);
1969 vec4 = (v8u16)__msa_ilvr_b((v16i8)src6, (v16i8)src2);
1970 vec5 = (v8u16)__msa_ilvl_b((v16i8)src6, (v16i8)src2);
1971 vec6 = (v8u16)__msa_ilvr_b((v16i8)src7, (v16i8)src3);
1972 vec7 = (v8u16)__msa_ilvl_b((v16i8)src7, (v16i8)src3);
1973 vec0 = (v8u16)__msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
1974 vec1 = (v8u16)__msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
1975 vec2 = (v8u16)__msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
1976 vec3 = (v8u16)__msa_hadd_u_h((v16u8)vec3, (v16u8)vec3);
1977 vec4 = (v8u16)__msa_hadd_u_h((v16u8)vec4, (v16u8)vec4);
1978 vec5 = (v8u16)__msa_hadd_u_h((v16u8)vec5, (v16u8)vec5);
1979 vec6 = (v8u16)__msa_hadd_u_h((v16u8)vec6, (v16u8)vec6);
1980 vec7 = (v8u16)__msa_hadd_u_h((v16u8)vec7, (v16u8)vec7);
1981 reg0 = (v8i16)__msa_pckev_d((v2i64)vec1, (v2i64)vec0);
1982 reg1 = (v8i16)__msa_pckev_d((v2i64)vec3, (v2i64)vec2);
1983 reg2 = (v8i16)__msa_pckev_d((v2i64)vec5, (v2i64)vec4);
1984 reg3 = (v8i16)__msa_pckev_d((v2i64)vec7, (v2i64)vec6);
1985 reg0 += (v8i16)__msa_pckod_d((v2i64)vec1, (v2i64)vec0);
1986 reg1 += (v8i16)__msa_pckod_d((v2i64)vec3, (v2i64)vec2);
1987 reg2 += (v8i16)__msa_pckod_d((v2i64)vec5, (v2i64)vec4);
1988 reg3 += (v8i16)__msa_pckod_d((v2i64)vec7, (v2i64)vec6);
1989 reg0 = __msa_srai_h(reg0, 2);
1990 reg1 = __msa_srai_h(reg1, 2);
1991 reg2 = __msa_srai_h(reg2, 2);
1992 reg3 = __msa_srai_h(reg3, 2);
1993 vec4 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
1994 vec5 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2);
1995 vec6 = (v8u16)__msa_pckod_h((v8i16)reg1, (v8i16)reg0);
1996 vec7 = (v8u16)__msa_pckod_h((v8i16)reg3, (v8i16)reg2);
1997 vec0 = (v8u16)__msa_pckod_h((v8i16)vec5, (v8i16)vec4);
1998 vec1 = (v8u16)__msa_pckev_h((v8i16)vec7, (v8i16)vec6);
1999 vec2 = (v8u16)__msa_pckev_h((v8i16)vec5, (v8i16)vec4);
2000 vec3 = vec0 * const_0x70;
2001 vec4 = vec1 * const_0x4A;
2002 vec5 = vec2 * const_0x26;
2003 vec2 *= const_0x70;
2004 vec1 *= const_0x5E;
2005 vec0 *= const_0x12;
2006 reg0 = __msa_subv_h((v8i16)vec3, (v8i16)vec4);
2007 reg1 = __msa_subv_h((v8i16)const_0x8080, (v8i16)vec5);
2008 reg2 = __msa_subv_h((v8i16)vec2, (v8i16)vec1);
2009 reg3 = __msa_subv_h((v8i16)const_0x8080, (v8i16)vec0);
2010 reg0 += reg1;
2011 reg2 += reg3;
2012 reg0 = __msa_srai_h(reg0, 8);
2013 reg2 = __msa_srai_h(reg2, 8);
2014 dst0 = (v16u8)__msa_pckev_b((v16i8)reg2, (v16i8)reg0);
2015 res0 = __msa_copy_u_d((v2i64)dst0, 0);
2016 res1 = __msa_copy_u_d((v2i64)dst0, 1);
2017 SD(res0, dst_u);
2018 SD(res1, dst_v);
2019 t += 48;
2020 s += 48;
2021 dst_u += 8;
2022 dst_v += 8;
2023 }
2024 }
2025
1308 #ifdef __cplusplus 2026 #ifdef __cplusplus
1309 } // extern "C" 2027 } // extern "C"
1310 } // namespace libyuv 2028 } // namespace libyuv
1311 #endif 2029 #endif
1312 2030
1313 #endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa) 2031 #endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
OLDNEW
« no previous file with comments | « source/row_any.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698