OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2006 The Android Open Source Project | 2 * Copyright 2006 The Android Open Source Project |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkMatrix.h" | 8 #include "SkMatrix.h" |
9 #include "Sk64.h" | 9 #include "Sk64.h" |
10 #include "SkFloatBits.h" | 10 #include "SkFloatBits.h" |
11 #include "SkOnce.h" | 11 #include "SkOnce.h" |
12 #include "SkString.h" | 12 #include "SkString.h" |
13 | 13 |
14 #ifdef SK_SCALAR_IS_FLOAT | 14 #define kMatrix22Elem SK_Scalar1 |
15 #define kMatrix22Elem SK_Scalar1 | |
16 | 15 |
17 static inline float SkDoubleToFloat(double x) { | 16 static inline float SkDoubleToFloat(double x) { |
18 return static_cast<float>(x); | 17 return static_cast<float>(x); |
19 } | 18 } |
20 #else | |
21 #define kMatrix22Elem SK_Fract1 | |
22 #endif | |
23 | 19 |
24 /* [scale-x skew-x trans-x] [X] [X'] | 20 /* [scale-x skew-x trans-x] [X] [X'] |
25 [skew-y scale-y trans-y] * [Y] = [Y'] | 21 [skew-y scale-y trans-y] * [Y] = [Y'] |
26 [persp-0 persp-1 persp-2] [1] [1 ] | 22 [persp-0 persp-1 persp-2] [1] [1 ] |
27 */ | 23 */ |
28 | 24 |
29 void SkMatrix::reset() { | 25 void SkMatrix::reset() { |
30 fMat[kMScaleX] = fMat[kMScaleY] = SK_Scalar1; | 26 fMat[kMScaleX] = fMat[kMScaleY] = SK_Scalar1; |
31 fMat[kMSkewX] = fMat[kMSkewY] = | 27 fMat[kMSkewX] = fMat[kMSkewY] = |
32 fMat[kMTransX] = fMat[kMTransY] = | 28 fMat[kMTransX] = fMat[kMTransY] = |
33 fMat[kMPersp0] = fMat[kMPersp1] = 0; | 29 fMat[kMPersp0] = fMat[kMPersp1] = 0; |
34 fMat[kMPersp2] = kMatrix22Elem; | 30 fMat[kMPersp2] = kMatrix22Elem; |
35 | 31 |
36 this->setTypeMask(kIdentity_Mask | kRectStaysRect_Mask); | 32 this->setTypeMask(kIdentity_Mask | kRectStaysRect_Mask); |
37 } | 33 } |
38 | 34 |
39 // this guy aligns with the masks, so we can compute a mask from a varaible 0/1 | 35 // this guy aligns with the masks, so we can compute a mask from a varaible 0/1 |
40 enum { | 36 enum { |
41 kTranslate_Shift, | 37 kTranslate_Shift, |
42 kScale_Shift, | 38 kScale_Shift, |
43 kAffine_Shift, | 39 kAffine_Shift, |
44 kPerspective_Shift, | 40 kPerspective_Shift, |
45 kRectStaysRect_Shift | 41 kRectStaysRect_Shift |
46 }; | 42 }; |
47 | 43 |
48 #ifdef SK_SCALAR_IS_FLOAT | 44 static const int32_t kScalar1Int = 0x3f800000; |
49 static const int32_t kScalar1Int = 0x3f800000; | |
50 #else | |
51 #define scalarAsInt(x) (x) | |
52 static const int32_t kScalar1Int = (1 << 16); | |
53 static const int32_t kPersp1Int = (1 << 30); | |
54 #endif | |
55 | 45 |
56 uint8_t SkMatrix::computePerspectiveTypeMask() const { | 46 uint8_t SkMatrix::computePerspectiveTypeMask() const { |
57 // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment | 47 // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment |
58 // is a win, but replacing those below is not. We don't yet understand | 48 // is a win, but replacing those below is not. We don't yet understand |
59 // that result. | 49 // that result. |
60 if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || | 50 if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || |
61 fMat[kMPersp2] != kMatrix22Elem) { | 51 fMat[kMPersp2] != kMatrix22Elem) { |
62 // If this is a perspective transform, we return true for all other | 52 // If this is a perspective transform, we return true for all other |
63 // transform flags - this does not disable any optimizations, respects | 53 // transform flags - this does not disable any optimizations, respects |
64 // the rule that the type mask must be conservative, and speeds up | 54 // the rule that the type mask must be conservative, and speeds up |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
126 | 116 |
127 // record if the (p)rimary diagonal is all non-zero | 117 // record if the (p)rimary diagonal is all non-zero |
128 mask |= (m00 & m11) << kRectStaysRect_Shift; | 118 mask |= (m00 & m11) << kRectStaysRect_Shift; |
129 } | 119 } |
130 | 120 |
131 return SkToU8(mask); | 121 return SkToU8(mask); |
132 } | 122 } |
133 | 123 |
134 /////////////////////////////////////////////////////////////////////////////// | 124 /////////////////////////////////////////////////////////////////////////////// |
135 | 125 |
136 #ifdef SK_SCALAR_IS_FLOAT | |
137 | |
138 bool operator==(const SkMatrix& a, const SkMatrix& b) { | 126 bool operator==(const SkMatrix& a, const SkMatrix& b) { |
139 const SkScalar* SK_RESTRICT ma = a.fMat; | 127 const SkScalar* SK_RESTRICT ma = a.fMat; |
140 const SkScalar* SK_RESTRICT mb = b.fMat; | 128 const SkScalar* SK_RESTRICT mb = b.fMat; |
141 | 129 |
142 return ma[0] == mb[0] && ma[1] == mb[1] && ma[2] == mb[2] && | 130 return ma[0] == mb[0] && ma[1] == mb[1] && ma[2] == mb[2] && |
143 ma[3] == mb[3] && ma[4] == mb[4] && ma[5] == mb[5] && | 131 ma[3] == mb[3] && ma[4] == mb[4] && ma[5] == mb[5] && |
144 ma[6] == mb[6] && ma[7] == mb[7] && ma[8] == mb[8]; | 132 ma[6] == mb[6] && ma[7] == mb[7] && ma[8] == mb[8]; |
145 } | 133 } |
146 | 134 |
147 #endif | |
148 | |
149 /////////////////////////////////////////////////////////////////////////////// | 135 /////////////////////////////////////////////////////////////////////////////// |
150 | 136 |
151 // helper function to determine if upper-left 2x2 of matrix is degenerate | 137 // helper function to determine if upper-left 2x2 of matrix is degenerate |
152 static inline bool is_degenerate_2x2(SkScalar scaleX, SkScalar skewX, | 138 static inline bool is_degenerate_2x2(SkScalar scaleX, SkScalar skewX, |
153 SkScalar skewY, SkScalar scaleY) { | 139 SkScalar skewY, SkScalar scaleY) { |
154 SkScalar perp_dot = scaleX*scaleY - skewX*skewY; | 140 SkScalar perp_dot = scaleX*scaleY - skewX*skewY; |
155 return SkScalarNearlyZero(perp_dot, SK_ScalarNearlyZero*SK_ScalarNearlyZero)
; | 141 return SkScalarNearlyZero(perp_dot, SK_ScalarNearlyZero*SK_ScalarNearlyZero)
; |
156 } | 142 } |
157 | 143 |
158 /////////////////////////////////////////////////////////////////////////////// | 144 /////////////////////////////////////////////////////////////////////////////// |
(...skipping 446 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
605 } | 591 } |
606 this->setTypeMask(mask); | 592 this->setTypeMask(mask); |
607 } | 593 } |
608 // shared cleanup | 594 // shared cleanup |
609 fMat[kMPersp2] = kMatrix22Elem; | 595 fMat[kMPersp2] = kMatrix22Elem; |
610 return true; | 596 return true; |
611 } | 597 } |
612 | 598 |
613 /////////////////////////////////////////////////////////////////////////////// | 599 /////////////////////////////////////////////////////////////////////////////// |
614 | 600 |
615 #ifdef SK_SCALAR_IS_FLOAT | 601 static inline int fixmuladdmul(float a, float b, float c, float d, |
616 static inline int fixmuladdmul(float a, float b, float c, float d, | 602 float* result) { |
617 float* result) { | 603 *result = SkDoubleToFloat((double)a * b + (double)c * d); |
618 *result = SkDoubleToFloat((double)a * b + (double)c * d); | 604 return true; |
619 return true; | 605 } |
620 } | |
621 | 606 |
622 static inline bool rowcol3(const float row[], const float col[], | 607 static inline bool rowcol3(const float row[], const float col[], |
623 float* result) { | 608 float* result) { |
624 *result = row[0] * col[0] + row[1] * col[3] + row[2] * col[6]; | 609 *result = row[0] * col[0] + row[1] * col[3] + row[2] * col[6]; |
625 return true; | 610 return true; |
626 } | 611 } |
627 | 612 |
628 static inline int negifaddoverflows(float& result, float a, float b) { | 613 static inline int negifaddoverflows(float& result, float a, float b) { |
629 result = a + b; | 614 result = a + b; |
630 return 0; | 615 return 0; |
631 } | 616 } |
632 #else | |
633 static inline bool fixmuladdmul(SkFixed a, SkFixed b, SkFixed c, SkFixed d, | |
634 SkFixed* result) { | |
635 Sk64 tmp1, tmp2; | |
636 tmp1.setMul(a, b); | |
637 tmp2.setMul(c, d); | |
638 tmp1.add(tmp2); | |
639 if (tmp1.isFixed()) { | |
640 *result = tmp1.getFixed(); | |
641 return true; | |
642 } | |
643 return false; | |
644 } | |
645 | |
646 static inline SkFixed fracmuladdmul(SkFixed a, SkFract b, SkFixed c, | |
647 SkFract d) { | |
648 Sk64 tmp1, tmp2; | |
649 tmp1.setMul(a, b); | |
650 tmp2.setMul(c, d); | |
651 tmp1.add(tmp2); | |
652 return tmp1.getFract(); | |
653 } | |
654 | |
655 static inline bool rowcol3(const SkFixed row[], const SkFixed col[], | |
656 SkFixed* result) { | |
657 Sk64 tmp1, tmp2; | |
658 | |
659 tmp1.setMul(row[0], col[0]); // N * fixed | |
660 tmp2.setMul(row[1], col[3]); // N * fixed | |
661 tmp1.add(tmp2); | |
662 | |
663 tmp2.setMul(row[2], col[6]); // N * fract | |
664 tmp2.roundRight(14); // make it fixed | |
665 tmp1.add(tmp2); | |
666 | |
667 if (tmp1.isFixed()) { | |
668 *result = tmp1.getFixed(); | |
669 return true; | |
670 } | |
671 return false; | |
672 } | |
673 | |
674 static inline int negifaddoverflows(SkFixed& result, SkFixed a, SkFixed b) { | |
675 SkFixed c = a + b; | |
676 result = c; | |
677 return (c ^ a) & (c ^ b); | |
678 } | |
679 #endif | |
680 | 617 |
681 static void normalize_perspective(SkScalar mat[9]) { | 618 static void normalize_perspective(SkScalar mat[9]) { |
682 if (SkScalarAbs(mat[SkMatrix::kMPersp2]) > kMatrix22Elem) { | 619 if (SkScalarAbs(mat[SkMatrix::kMPersp2]) > kMatrix22Elem) { |
683 for (int i = 0; i < 9; i++) | 620 for (int i = 0; i < 9; i++) |
684 mat[i] = SkScalarHalf(mat[i]); | 621 mat[i] = SkScalarHalf(mat[i]); |
685 } | 622 } |
686 } | 623 } |
687 | 624 |
688 bool SkMatrix::setConcat(const SkMatrix& a, const SkMatrix& b) { | 625 bool SkMatrix::setConcat(const SkMatrix& a, const SkMatrix& b) { |
689 TypeMask aType = a.getPerspectiveTypeMaskOnly(); | 626 TypeMask aType = a.getPerspectiveTypeMaskOnly(); |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
786 // to ourselves inside setConcat() | 723 // to ourselves inside setConcat() |
787 return mat.isIdentity() || this->setConcat(mat, *this); | 724 return mat.isIdentity() || this->setConcat(mat, *this); |
788 } | 725 } |
789 | 726 |
790 /////////////////////////////////////////////////////////////////////////////// | 727 /////////////////////////////////////////////////////////////////////////////// |
791 | 728 |
792 /* Matrix inversion is very expensive, but also the place where keeping | 729 /* Matrix inversion is very expensive, but also the place where keeping |
793 precision may be most important (here and matrix concat). Hence to avoid | 730 precision may be most important (here and matrix concat). Hence to avoid |
794 bitmap blitting artifacts when walking the inverse, we use doubles for | 731 bitmap blitting artifacts when walking the inverse, we use doubles for |
795 the intermediate math, even though we know that is more expensive. | 732 the intermediate math, even though we know that is more expensive. |
796 The fixed counter part is us using Sk64 for temp calculations. | |
797 */ | 733 */ |
798 | 734 |
799 #ifdef SK_SCALAR_IS_FLOAT | 735 typedef double SkDetScalar; |
800 typedef double SkDetScalar; | 736 #define SkPerspMul(a, b) SkScalarMul(a, b) |
801 #define SkPerspMul(a, b) SkScalarMul(a, b) | 737 #define SkScalarMulShift(a, b, s) SkDoubleToFloat((a) * (b)) |
802 #define SkScalarMulShift(a, b, s) SkDoubleToFloat((a) * (b)) | 738 static double sk_inv_determinant(const float mat[9], int isPerspective, |
803 static double sk_inv_determinant(const float mat[9], int isPerspective, | 739 int* /* (only used in Fixed case) */) { |
804 int* /* (only used in Fixed case) */) { | 740 double det; |
805 double det; | |
806 | 741 |
807 if (isPerspective) { | 742 if (isPerspective) { |
808 det = mat[SkMatrix::kMScaleX] * ((double)mat[SkMatrix::kMScaleY] *
mat[SkMatrix::kMPersp2] - (double)mat[SkMatrix::kMTransY] * mat[SkMatrix::kMPer
sp1]) + | 743 det = mat[SkMatrix::kMScaleX] * ((double)mat[SkMatrix::kMScaleY] * mat
[SkMatrix::kMPersp2] - (double)mat[SkMatrix::kMTransY] * mat[SkMatrix::kMPersp1]
) + |
809 mat[SkMatrix::kMSkewX] * ((double)mat[SkMatrix::kMTransY] *
mat[SkMatrix::kMPersp0] - (double)mat[SkMatrix::kMSkewY] * mat[SkMatrix::kMPersp
2]) + | 744 mat[SkMatrix::kMSkewX] * ((double)mat[SkMatrix::kMTransY] * mat[
SkMatrix::kMPersp0] - (double)mat[SkMatrix::kMSkewY] * mat[SkMatrix::kMPersp2])
+ |
810 mat[SkMatrix::kMTransX] * ((double)mat[SkMatrix::kMSkewY] *
mat[SkMatrix::kMPersp1] - (double)mat[SkMatrix::kMScaleY] * mat[SkMatrix::kMPers
p0]); | 745 mat[SkMatrix::kMTransX] * ((double)mat[SkMatrix::kMSkewY] * mat[
SkMatrix::kMPersp1] - (double)mat[SkMatrix::kMScaleY] * mat[SkMatrix::kMPersp0])
; |
811 } else { | 746 } else { |
812 det = (double)mat[SkMatrix::kMScaleX] * mat[SkMatrix::kMScaleY] -
(double)mat[SkMatrix::kMSkewX] * mat[SkMatrix::kMSkewY]; | 747 det = (double)mat[SkMatrix::kMScaleX] * mat[SkMatrix::kMScaleY] - (dou
ble)mat[SkMatrix::kMSkewX] * mat[SkMatrix::kMSkewY]; |
813 } | |
814 | |
815 // Since the determinant is on the order of the cube of the matrix membe
rs, | |
816 // compare to the cube of the default nearly-zero constant (although an | |
817 // estimate of the condition number would be better if it wasn't so expe
nsive). | |
818 if (SkScalarNearlyZero((float)det, SK_ScalarNearlyZero * SK_ScalarNearly
Zero * SK_ScalarNearlyZero)) { | |
819 return 0; | |
820 } | |
821 return 1.0 / det; | |
822 } | |
823 // we declar a,b,c,d to all be doubles, because we want to perform | |
824 // double-precision muls and subtract, even though the original values are | |
825 // from the matrix, which are floats. | |
826 static float inline mul_diff_scale(double a, double b, double c, double d, | |
827 double scale) { | |
828 return SkDoubleToFloat((a * b - c * d) * scale); | |
829 } | |
830 #else | |
831 typedef SkFixed SkDetScalar; | |
832 #define SkPerspMul(a, b) SkFractMul(a, b) | |
833 #define SkScalarMulShift(a, b, s) SkMulShift(a, b, s) | |
834 static void set_muladdmul(Sk64* dst, int32_t a, int32_t b, int32_t c, | |
835 int32_t d) { | |
836 Sk64 tmp; | |
837 dst->setMul(a, b); | |
838 tmp.setMul(c, d); | |
839 dst->add(tmp); | |
840 } | 748 } |
841 | 749 |
842 static SkFixed sk_inv_determinant(const SkFixed mat[9], int isPerspective, | 750 // Since the determinant is on the order of the cube of the matrix members, |
843 int* shift) { | 751 // compare to the cube of the default nearly-zero constant (although an |
844 Sk64 tmp1, tmp2; | 752 // estimate of the condition number would be better if it wasn't so expensiv
e). |
845 | 753 if (SkScalarNearlyZero((float)det, SK_ScalarNearlyZero * SK_ScalarNearlyZero
* SK_ScalarNearlyZero)) { |
846 if (isPerspective) { | 754 return 0; |
847 tmp1.setMul(mat[SkMatrix::kMScaleX], fracmuladdmul(mat[SkMatrix::kMS
caleY], mat[SkMatrix::kMPersp2], -mat[SkMatrix::kMTransY], mat[SkMatrix::kMPersp
1])); | |
848 tmp2.setMul(mat[SkMatrix::kMSkewX], fracmuladdmul(mat[SkMatrix::kMTr
ansY], mat[SkMatrix::kMPersp0], -mat[SkMatrix::kMSkewY], mat[SkMatrix::kMPersp2]
)); | |
849 tmp1.add(tmp2); | |
850 tmp2.setMul(mat[SkMatrix::kMTransX], fracmuladdmul(mat[SkMatrix::kMS
kewY], mat[SkMatrix::kMPersp1], -mat[SkMatrix::kMScaleY], mat[SkMatrix::kMPersp0
])); | |
851 tmp1.add(tmp2); | |
852 } else { | |
853 tmp1.setMul(mat[SkMatrix::kMScaleX], mat[SkMatrix::kMScaleY]); | |
854 tmp2.setMul(mat[SkMatrix::kMSkewX], mat[SkMatrix::kMSkewY]); | |
855 tmp1.sub(tmp2); | |
856 } | |
857 | |
858 int s = tmp1.getClzAbs(); | |
859 *shift = s; | |
860 | |
861 SkFixed denom; | |
862 if (s <= 32) { | |
863 denom = tmp1.getShiftRight(33 - s); | |
864 } else { | |
865 denom = (int32_t)tmp1.fLo << (s - 33); | |
866 } | |
867 | |
868 if (denom == 0) { | |
869 return 0; | |
870 } | |
871 /** This could perhaps be a special fractdiv function, since both of its | |
872 arguments are known to have bit 31 clear and bit 30 set (when they | |
873 are made positive), thus eliminating the need for calling clz() | |
874 */ | |
875 return SkFractDiv(SK_Fract1, denom); | |
876 } | 755 } |
877 #endif | 756 return 1.0 / det; |
| 757 } |
| 758 // we declar a,b,c,d to all be doubles, because we want to perform |
| 759 // double-precision muls and subtract, even though the original values are |
| 760 // from the matrix, which are floats. |
| 761 static float inline mul_diff_scale(double a, double b, double c, double d, |
| 762 double scale) { |
| 763 return SkDoubleToFloat((a * b - c * d) * scale); |
| 764 } |
878 | 765 |
879 void SkMatrix::SetAffineIdentity(SkScalar affine[6]) { | 766 void SkMatrix::SetAffineIdentity(SkScalar affine[6]) { |
880 affine[kAScaleX] = SK_Scalar1; | 767 affine[kAScaleX] = SK_Scalar1; |
881 affine[kASkewY] = 0; | 768 affine[kASkewY] = 0; |
882 affine[kASkewX] = 0; | 769 affine[kASkewX] = 0; |
883 affine[kAScaleY] = SK_Scalar1; | 770 affine[kAScaleY] = SK_Scalar1; |
884 affine[kATransX] = 0; | 771 affine[kATransX] = 0; |
885 affine[kATransY] = 0; | 772 affine[kATransY] = 0; |
886 } | 773 } |
887 | 774 |
(...skipping 1060 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1948 | 1835 |
1949 #ifdef SK_DEVELOPER | 1836 #ifdef SK_DEVELOPER |
1950 void SkMatrix::dump() const { | 1837 void SkMatrix::dump() const { |
1951 SkString str; | 1838 SkString str; |
1952 this->toString(&str); | 1839 this->toString(&str); |
1953 SkDebugf("%s\n", str.c_str()); | 1840 SkDebugf("%s\n", str.c_str()); |
1954 } | 1841 } |
1955 | 1842 |
1956 void SkMatrix::toString(SkString* str) const { | 1843 void SkMatrix::toString(SkString* str) const { |
1957 str->appendf("[%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f]", | 1844 str->appendf("[%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f]", |
1958 #ifdef SK_SCALAR_IS_FLOAT | |
1959 fMat[0], fMat[1], fMat[2], fMat[3], fMat[4], fMat[5], | 1845 fMat[0], fMat[1], fMat[2], fMat[3], fMat[4], fMat[5], |
1960 fMat[6], fMat[7], fMat[8]); | 1846 fMat[6], fMat[7], fMat[8]); |
1961 #else | |
1962 SkFixedToFloat(fMat[0]), SkFixedToFloat(fMat[1]), SkFixedToFloat(fMat[2]), | |
1963 SkFixedToFloat(fMat[3]), SkFixedToFloat(fMat[4]), SkFixedToFloat(fMat[5]), | |
1964 SkFractToFloat(fMat[6]), SkFractToFloat(fMat[7]), SkFractToFloat(fMat[8])); | |
1965 #endif | |
1966 } | 1847 } |
1967 #endif | 1848 #endif |
1968 | 1849 |
1969 /////////////////////////////////////////////////////////////////////////////// | 1850 /////////////////////////////////////////////////////////////////////////////// |
1970 | 1851 |
1971 #include "SkMatrixUtils.h" | 1852 #include "SkMatrixUtils.h" |
1972 | 1853 |
1973 bool SkTreatAsSprite(const SkMatrix& mat, int width, int height, | 1854 bool SkTreatAsSprite(const SkMatrix& mat, int width, int height, |
1974 unsigned subpixelBits) { | 1855 unsigned subpixelBits) { |
1975 // quick reject on affine or perspective | 1856 // quick reject on affine or perspective |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2112 rotation1->fX = cos1; | 1993 rotation1->fX = cos1; |
2113 rotation1->fY = sin1; | 1994 rotation1->fY = sin1; |
2114 } | 1995 } |
2115 if (NULL != rotation2) { | 1996 if (NULL != rotation2) { |
2116 rotation2->fX = cos2; | 1997 rotation2->fX = cos2; |
2117 rotation2->fY = sin2; | 1998 rotation2->fY = sin2; |
2118 } | 1999 } |
2119 | 2000 |
2120 return true; | 2001 return true; |
2121 } | 2002 } |
OLD | NEW |