Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(97)

Side by Side Diff: third_party/WebKit/Source/platform/transforms/TransformationMatrix.cpp

Issue 2392493002: Add MSA (MIPS SIMD Arch) optimized matrix transforms functions (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved. 2 * Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved.
3 * Copyright (C) 2009 Torch Mobile, Inc. 3 * Copyright (C) 2009 Torch Mobile, Inc.
4 * Copyright (C) 2013 Google Inc. All rights reserved. 4 * Copyright (C) 2013 Google Inc. All rights reserved.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY 15 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR 18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */ 26 */
27 27
28 #include "platform/transforms/TransformationMatrix.h" 28 #include "platform/transforms/TransformationMatrix.h"
29 #if HAVE(MIPS_MSA_INTRINSICS)
30 #include "platform/cpu/mips/CommonMacrosMSA.h"
31 #endif
29 32
30 #include "platform/geometry/FloatBox.h" 33 #include "platform/geometry/FloatBox.h"
31 #include "platform/geometry/FloatQuad.h" 34 #include "platform/geometry/FloatQuad.h"
32 #include "platform/geometry/FloatRect.h" 35 #include "platform/geometry/FloatRect.h"
33 #include "platform/geometry/IntRect.h" 36 #include "platform/geometry/IntRect.h"
34 #include "platform/geometry/LayoutRect.h" 37 #include "platform/geometry/LayoutRect.h"
35 #include "platform/transforms/AffineTransform.h" 38 #include "platform/transforms/AffineTransform.h"
36 #include "platform/transforms/Rotation.h" 39 #include "platform/transforms/Rotation.h"
37 40
38 #include "wtf/Assertions.h" 41 #include "wtf/Assertions.h"
(...skipping 298 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 "fmul v24.2d, v24.2d, v30.d[0] \n\t" 340 "fmul v24.2d, v24.2d, v30.d[0] \n\t"
338 "fmul v25.2d, v25.2d, v30.d[0] \n\t" 341 "fmul v25.2d, v25.2d, v30.d[0] \n\t"
339 "fmul v26.2d, v26.2d, v30.d[0] \n\t" 342 "fmul v26.2d, v26.2d, v30.d[0] \n\t"
340 "fmul v27.2d, v27.2d, v30.d[0] \n\t" 343 "fmul v27.2d, v27.2d, v30.d[0] \n\t"
341 "st1 {v24.2d - v27.2d}, [%[pr]] \n\t" 344 "st1 {v24.2d - v27.2d}, [%[pr]] \n\t"
342 : [mat] "+r"(mat), [pr] "+r"(pr) 345 : [mat] "+r"(mat), [pr] "+r"(pr)
343 : [rdet] "r"(rdet) 346 : [rdet] "r"(rdet)
344 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", 347 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17",
345 "v18", "v19", "v20", "v21", "v22", "v23", "24", "25", "v26", "v27", 348 "v18", "v19", "v20", "v21", "v22", "v23", "24", "25", "v26", "v27",
346 "v28", "v29", "v30"); 349 "v28", "v29", "v30");
350 #elif HAVE(MIPS_MSA_INTRINSICS)
351 const double rDet = 1/det;
352 const double* mat = &(matrix[0][0]);
353 v2f64 mat0, mat1, mat2, mat3, mat4, mat5, mat6, mat7;
354 v2f64 rev2, rev3, rev4, rev5, rev6, rev7;
355 v2f64 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
356 v2f64 det0, det1, det2, tmp8, tmp9, tmp10, tmp11;
357 const v2f64 rdet = COPY_DOUBLE_TO_VECTOR(rDet);
358 // mat0 mat1 --> m00 m01 m02 m03
359 // mat2 mat3 --> m10 m11 m12 m13
360 // mat4 mat5 --> m20 m21 m22 m23
361 // mat6 mat7 --> m30 m31 m32 m33
362 LD_DP8(mat, 2, mat0, mat1, mat2, mat3, mat4, mat5, mat6, mat7);
363
364 // Right half
365 rev3 = SLDI_D(mat3, mat3, 8); // m13 m12
366 rev5 = SLDI_D(mat5, mat5, 8); // m23 m22
367 rev7 = SLDI_D(mat7, mat7, 8); // m33 m32
368
369 // 2*2 Determinants
370 // for A00 & A01
371 tmp0 = mat5 * rev7;
372 tmp1 = mat3 * rev7;
373 tmp2 = mat3 * rev5;
374 // for A10 & A11
375 tmp3 = mat1 * rev7;
376 tmp4 = mat1 * rev5;
377 // for A20 & A21
378 tmp5 = mat1 * rev3;
379 // for A30 & A31
380 tmp6 = (v2f64) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
381 tmp7 = (v2f64) __msa_ilvl_d((v2i64) tmp1, (v2i64) tmp0);
382 det0 = tmp6 - tmp7;
383 tmp6 = (v2f64) __msa_ilvr_d((v2i64) tmp3, (v2i64) tmp2);
384 tmp7 = (v2f64) __msa_ilvl_d((v2i64) tmp3, (v2i64) tmp2);
385 det1 = tmp6 - tmp7;
386 tmp6 = (v2f64) __msa_ilvr_d((v2i64) tmp5, (v2i64) tmp4);
387 tmp7 = (v2f64) __msa_ilvl_d((v2i64) tmp5, (v2i64) tmp4);
388 det2 = tmp6 - tmp7;
389
390 // Co-factors
391 tmp0 = mat0 * (v2f64) __msa_splati_d((v2i64) det0, 0);
392 tmp1 = mat0 * (v2f64) __msa_splati_d((v2i64) det0, 1);
393 tmp2 = mat0 * (v2f64) __msa_splati_d((v2i64) det1, 0);
394 tmp3 = mat2 * (v2f64) __msa_splati_d((v2i64) det0, 0);
395 tmp4 = mat2 * (v2f64) __msa_splati_d((v2i64) det1, 1);
396 tmp5 = mat2 * (v2f64) __msa_splati_d((v2i64) det2, 0);
397 tmp6 = mat4 * (v2f64) __msa_splati_d((v2i64) det0, 1);
398 tmp7 = mat4 * (v2f64) __msa_splati_d((v2i64) det1, 1);
399 tmp8 = mat4 * (v2f64) __msa_splati_d((v2i64) det2, 1);
400 tmp9 = mat6 * (v2f64) __msa_splati_d((v2i64) det1, 0);
401 tmp10 = mat6 * (v2f64) __msa_splati_d((v2i64) det2, 0);
402 tmp11 = mat6 * (v2f64) __msa_splati_d((v2i64) det2, 1);
403
404 tmp0 -= tmp7;
405 tmp1 -= tmp4;
406 tmp2 -= tmp5;
407 tmp3 -= tmp6;
408 tmp0 += tmp10;
409 tmp1 += tmp11;
410 tmp2 += tmp8;
411 tmp3 += tmp9;
412
413 // Multiply with 1/det
414 tmp0 *= rdet;
415 tmp1 *= rdet;
416 tmp2 *= rdet;
417 tmp3 *= rdet;
418
419 // Inverse: Upper half
420 result[0][0] = tmp3[1];
421 result[0][1] = -tmp0[1];
422 result[0][2] = tmp1[1];
423 result[0][3] = -tmp2[1];
424 result[1][0] = -tmp3[0];
425 result[1][1] = tmp0[0];
426 result[1][2] = -tmp1[0];
427 result[1][3] = tmp2[0];
428 // Left half
429 rev2 = SLDI_D(mat2, mat2, 8); // m13 m12
430 rev4 = SLDI_D(mat4, mat4, 8); // m23 m22
431 rev6 = SLDI_D(mat6, mat6, 8); // m33 m32
432
433 // 2*2 Determinants
434 // for A00 & A01
435 tmp0 = mat4 * rev6;
436 tmp1 = mat2 * rev6;
437 tmp2 = mat2 * rev4;
438 // for A10 & A11
439 tmp3 = mat0 * rev6;
440 tmp4 = mat0 * rev4;
441 // for A20 & A21
442 tmp5 = mat0 * rev2;
443 // for A30 & A31
444 tmp6 = (v2f64) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
445 tmp7 = (v2f64) __msa_ilvl_d((v2i64) tmp1, (v2i64) tmp0);
446 det0 = tmp6 - tmp7;
447 tmp6 = (v2f64) __msa_ilvr_d((v2i64) tmp3, (v2i64) tmp2);
448 tmp7 = (v2f64) __msa_ilvl_d((v2i64) tmp3, (v2i64) tmp2);
449 det1 = tmp6 - tmp7;
450 tmp6 = (v2f64) __msa_ilvr_d((v2i64) tmp5, (v2i64) tmp4);
451 tmp7 = (v2f64) __msa_ilvl_d((v2i64) tmp5, (v2i64) tmp4);
452 det2 = tmp6 - tmp7;
453
454 // Co-factors
455 tmp0 = mat3 * (v2f64) __msa_splati_d((v2i64) det0, 0);
456 tmp1 = mat1 * (v2f64) __msa_splati_d((v2i64) det0, 1);
457 tmp2 = mat1 * (v2f64) __msa_splati_d((v2i64) det0, 0);
458 tmp3 = mat1 * (v2f64) __msa_splati_d((v2i64) det1, 0);
459 tmp4 = mat3 * (v2f64) __msa_splati_d((v2i64) det1, 1);
460 tmp5 = mat3 * (v2f64) __msa_splati_d((v2i64) det2, 0);
461 tmp6 = mat5 * (v2f64) __msa_splati_d((v2i64) det0, 1);
462 tmp7 = mat5 * (v2f64) __msa_splati_d((v2i64) det1, 1);
463 tmp8 = mat5 * (v2f64) __msa_splati_d((v2i64) det2, 1);
464 tmp9 = mat7 * (v2f64) __msa_splati_d((v2i64) det1, 0);
465 tmp10 = mat7 * (v2f64) __msa_splati_d((v2i64) det2, 0);
466 tmp11 = mat7 * (v2f64) __msa_splati_d((v2i64) det2, 1);
467 tmp0 -= tmp6;
468 tmp1 -= tmp4;
469 tmp2 -= tmp7;
470 tmp3 -= tmp5;
471 tmp0 += tmp9;
472 tmp1 += tmp11;
473 tmp2 += tmp10;
474 tmp3 += tmp8;
475
476 // Multiply with 1/det
477 tmp0 *= rdet;
478 tmp1 *= rdet;
479 tmp2 *= rdet;
480 tmp3 *= rdet;
481
482 // Inverse: Lower half
483 result[2][0] = tmp0[1];
484 result[2][1] = -tmp2[1];
485 result[2][2] = tmp1[1];
486 result[2][3] = -tmp3[1];
487 result[3][0] = -tmp0[0];
488 result[3][1] = tmp2[0];
489 result[3][2] = -tmp1[0];
490 result[3][3] = tmp3[0];
347 #else 491 #else
348 // Calculate the adjoint matrix 492 // Calculate the adjoint matrix
349 adjoint(matrix, result); 493 adjoint(matrix, result);
350 494
351 // Scale the adjoint matrix to get the inverse 495 // Scale the adjoint matrix to get the inverse
352 for (int i = 0; i < 4; i++) 496 for (int i = 0; i < 4; i++)
353 for (int j = 0; j < 4; j++) 497 for (int j = 0; j < 4; j++)
354 result[i][j] = result[i][j] / det; 498 result[i][j] = result[i][j] / det;
355 #endif 499 #endif
356 return true; 500 return true;
(...skipping 821 matching lines...) Expand 10 before | Expand all | Expand 10 after
1178 "fmla v6.2d, v30.2d, v23.d[1] \t\n" 1322 "fmla v6.2d, v30.2d, v23.d[1] \t\n"
1179 "fmla v7.2d, v31.2d, v23.d[1] \t\n" 1323 "fmla v7.2d, v31.2d, v23.d[1] \t\n"
1180 1324
1181 "st1 {v0.2d - v3.2d}, [x9], 64 \t\n" 1325 "st1 {v0.2d - v3.2d}, [x9], 64 \t\n"
1182 "st1 {v4.2d - v7.2d}, [x9] \t\n" 1326 "st1 {v4.2d - v7.2d}, [x9] \t\n"
1183 : [leftMatrix] "+r"(leftMatrix), [rightMatrix] "+r"(rightMatrix) 1327 : [leftMatrix] "+r"(leftMatrix), [rightMatrix] "+r"(rightMatrix)
1184 : 1328 :
1185 : "memory", "x9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", 1329 : "memory", "x9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
1186 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "v0", "v1", 1330 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "v0", "v1",
1187 "v2", "v3", "v4", "v5", "v6", "v7"); 1331 "v2", "v3", "v4", "v5", "v6", "v7");
1332 #elif HAVE(MIPS_MSA_INTRINSICS)
1333 v2f64 vleftM0, vleftM1, vleftM2, vleftM3, vleftM4, vleftM5, vleftM6, vleftM7;
1334 v2f64 vRightM0, vRightM1, vRightM2, vRightM3, vRightM4, vRightM5, vRightM6, vR ightM7;
1335 v2f64 vTmpM0, vTmpM1, vTmpM2, vTmpM3;
1336
1337 vRightM0 = LD_DP(&(m_matrix[0][0]));
1338 vRightM1 = LD_DP(&(m_matrix[0][2]));
1339 vRightM2 = LD_DP(&(m_matrix[1][0]));
1340 vRightM3 = LD_DP(&(m_matrix[1][2]));
1341 vRightM4 = LD_DP(&(m_matrix[2][0]));
1342 vRightM5 = LD_DP(&(m_matrix[2][2]));
1343 vRightM6 = LD_DP(&(m_matrix[3][0]));
1344 vRightM7 = LD_DP(&(m_matrix[3][2]));
1345
1346 vleftM0 = LD_DP(&(mat.m_matrix[0][0]));
1347 vleftM2 = LD_DP(&(mat.m_matrix[0][2]));
1348 vleftM4 = LD_DP(&(mat.m_matrix[1][0]));
1349 vleftM6 = LD_DP(&(mat.m_matrix[1][2]));
1350
1351 vleftM1 = (v2f64)__msa_splati_d((v2i64)vleftM0, 1);
1352 vleftM0 = (v2f64)__msa_splati_d((v2i64)vleftM0, 0);
1353 vleftM3 = (v2f64)__msa_splati_d((v2i64)vleftM2, 1);
1354 vleftM2 = (v2f64)__msa_splati_d((v2i64)vleftM2, 0);
1355 vleftM5 = (v2f64)__msa_splati_d((v2i64)vleftM4, 1);
1356 vleftM4 = (v2f64)__msa_splati_d((v2i64)vleftM4, 0);
1357 vleftM7 = (v2f64)__msa_splati_d((v2i64)vleftM6, 1);
1358 vleftM6 = (v2f64)__msa_splati_d((v2i64)vleftM6, 0);
1359
1360 vTmpM0 = vleftM0 * vRightM0;
1361 vTmpM1 = vleftM0 * vRightM1;
1362 vTmpM0 += vleftM1 * vRightM2;
1363 vTmpM1 += vleftM1 * vRightM3;
1364 vTmpM0 += vleftM2 * vRightM4;
1365 vTmpM1 += vleftM2 * vRightM5;
1366 vTmpM0 += vleftM3 * vRightM6;
1367 vTmpM1 += vleftM3 * vRightM7;
1368
1369 vTmpM2 = vleftM4 * vRightM0;
1370 vTmpM3 = vleftM4 * vRightM1;
1371 vTmpM2 += vleftM5 * vRightM2;
1372 vTmpM3 += vleftM5 * vRightM3;
1373 vTmpM2 += vleftM6 * vRightM4;
1374 vTmpM3 += vleftM6 * vRightM5;
1375 vTmpM2 += vleftM7 * vRightM6;
1376 vTmpM3 += vleftM7 * vRightM7;
1377
1378 vleftM0 = LD_DP(&(mat.m_matrix[2][0]));
1379 vleftM2 = LD_DP(&(mat.m_matrix[2][2]));
1380 vleftM4 = LD_DP(&(mat.m_matrix[3][0]));
1381 vleftM6 = LD_DP(&(mat.m_matrix[3][2]));
1382
1383 ST_DP(vTmpM0, &(m_matrix[0][0]));
1384 ST_DP(vTmpM1, &(m_matrix[0][2]));
1385 ST_DP(vTmpM2, &(m_matrix[1][0]));
1386 ST_DP(vTmpM3, &(m_matrix[1][2]));
1387
1388 vleftM1 = (v2f64)__msa_splati_d((v2i64)vleftM0, 1);
1389 vleftM0 = (v2f64)__msa_splati_d((v2i64)vleftM0, 0);
1390 vleftM3 = (v2f64)__msa_splati_d((v2i64)vleftM2, 1);
1391 vleftM2 = (v2f64)__msa_splati_d((v2i64)vleftM2, 0);
1392 vleftM5 = (v2f64)__msa_splati_d((v2i64)vleftM4, 1);
1393 vleftM4 = (v2f64)__msa_splati_d((v2i64)vleftM4, 0);
1394 vleftM7 = (v2f64)__msa_splati_d((v2i64)vleftM6, 1);
1395 vleftM6 = (v2f64)__msa_splati_d((v2i64)vleftM6, 0);
1396
1397 vTmpM0 = vleftM0 * vRightM0;
1398 vTmpM1 = vleftM0 * vRightM1;
1399 vTmpM0 += vleftM1 * vRightM2;
1400 vTmpM1 += vleftM1 * vRightM3;
1401 vTmpM0 += vleftM2 * vRightM4;
1402 vTmpM1 += vleftM2 * vRightM5;
1403 vTmpM0 += vleftM3 * vRightM6;
1404 vTmpM1 += vleftM3 * vRightM7;
1405
1406 vTmpM2 = vleftM4 * vRightM0;
1407 vTmpM3 = vleftM4 * vRightM1;
1408 vTmpM2 += vleftM5 * vRightM2;
1409 vTmpM3 += vleftM5 * vRightM3;
1410 vTmpM2 += vleftM6 * vRightM4;
1411 vTmpM3 += vleftM6 * vRightM5;
1412 vTmpM2 += vleftM7 * vRightM6;
1413 vTmpM3 += vleftM7 * vRightM7;
1414
1415 ST_DP(vTmpM0, &(m_matrix[2][0]));
1416 ST_DP(vTmpM1, &(m_matrix[2][2]));
1417 ST_DP(vTmpM2, &(m_matrix[3][0]));
1418 ST_DP(vTmpM3, &(m_matrix[3][2]));
1188 #elif defined(TRANSFORMATION_MATRIX_USE_X86_64_SSE2) 1419 #elif defined(TRANSFORMATION_MATRIX_USE_X86_64_SSE2)
1189 // x86_64 has 16 XMM registers which is enough to do the multiplication fully in registers. 1420 // x86_64 has 16 XMM registers which is enough to do the multiplication fully in registers.
1190 __m128d matrixBlockA = _mm_load_pd(&(m_matrix[0][0])); 1421 __m128d matrixBlockA = _mm_load_pd(&(m_matrix[0][0]));
1191 __m128d matrixBlockC = _mm_load_pd(&(m_matrix[1][0])); 1422 __m128d matrixBlockC = _mm_load_pd(&(m_matrix[1][0]));
1192 __m128d matrixBlockE = _mm_load_pd(&(m_matrix[2][0])); 1423 __m128d matrixBlockE = _mm_load_pd(&(m_matrix[2][0]));
1193 __m128d matrixBlockG = _mm_load_pd(&(m_matrix[3][0])); 1424 __m128d matrixBlockG = _mm_load_pd(&(m_matrix[3][0]));
1194 1425
1195 // First row. 1426 // First row.
1196 __m128d otherMatrixFirstParam = _mm_set1_pd(mat.m_matrix[0][0]); 1427 __m128d otherMatrixFirstParam = _mm_set1_pd(mat.m_matrix[0][0]);
1197 __m128d otherMatrixSecondParam = _mm_set1_pd(mat.m_matrix[0][1]); 1428 __m128d otherMatrixSecondParam = _mm_set1_pd(mat.m_matrix[0][1]);
(...skipping 482 matching lines...) Expand 10 before | Expand all | Expand 10 after
1680 decomposition.translateZ, decomposition.scaleX, decomposition.scaleY, 1911 decomposition.translateZ, decomposition.scaleX, decomposition.scaleY,
1681 decomposition.scaleZ, decomposition.skewXY, decomposition.skewXZ, 1912 decomposition.scaleZ, decomposition.skewXY, decomposition.skewXZ,
1682 decomposition.skewYZ, decomposition.quaternionX, 1913 decomposition.skewYZ, decomposition.quaternionX,
1683 decomposition.quaternionY, decomposition.quaternionZ, 1914 decomposition.quaternionY, decomposition.quaternionZ,
1684 decomposition.quaternionW, decomposition.perspectiveX, 1915 decomposition.quaternionW, decomposition.perspectiveX,
1685 decomposition.perspectiveY, decomposition.perspectiveZ, 1916 decomposition.perspectiveY, decomposition.perspectiveZ,
1686 decomposition.perspectiveW); 1917 decomposition.perspectiveW);
1687 } 1918 }
1688 1919
1689 } // namespace blink 1920 } // namespace blink
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698