| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "config.h" | 5 #include "config.h" |
| 6 #include "core/css/parser/CSSTokenizer.h" | 6 #include "core/css/parser/CSSTokenizer.h" |
| 7 | 7 |
| 8 #include "core/css/parser/MediaQueryBlockWatcher.h" | 8 #include "core/css/parser/MediaQueryBlockWatcher.h" |
| 9 #include "wtf/PassOwnPtr.h" | |
| 10 #include <gtest/gtest.h> | 9 #include <gtest/gtest.h> |
| 11 | 10 |
| 12 namespace blink { | 11 namespace blink { |
| 13 | 12 |
| 13 // This let's us see the line numbers of failing tests |
| 14 #define TEST_TOKENS(string, ...) { \ |
| 15 String s = string; \ |
| 16 SCOPED_TRACE(s.ascii().data()); \ |
| 17 testTokens(string, __VA_ARGS__); \ |
| 18 } |
| 19 |
| 20 void compareTokens(const CSSParserToken& expected, const CSSParserToken& actual) |
| 21 { |
| 22 ASSERT_EQ(expected.type(), actual.type()); |
| 23 switch (expected.type()) { |
| 24 case DelimiterToken: |
| 25 ASSERT_EQ(expected.delimiter(), actual.delimiter()); |
| 26 break; |
| 27 case IdentToken: |
| 28 case FunctionToken: |
| 29 case StringToken: |
| 30 ASSERT_EQ(expected.value(), actual.value()); |
| 31 break; |
| 32 case DimensionToken: |
| 33 ASSERT_EQ(expected.value(), actual.value()); |
| 34 // fallthrough |
| 35 case NumberToken: |
| 36 case PercentageToken: |
| 37 ASSERT_EQ(expected.numericValueType(), actual.numericValueType()); |
| 38 ASSERT_DOUBLE_EQ(expected.numericValue(), actual.numericValue()); |
| 39 break; |
| 40 default: |
| 41 break; |
| 42 } |
| 43 } |
| 44 |
| 45 void testTokens(const String& string, const CSSParserToken& token1, const CSSPar
serToken& token2 = CSSParserToken(EOFToken), const CSSParserToken& token3 = CSSP
arserToken(EOFToken)) |
| 46 { |
| 47 Vector<CSSParserToken> expectedTokens; |
| 48 expectedTokens.append(token1); |
| 49 if (token2.type() != EOFToken) { |
| 50 expectedTokens.append(token2); |
| 51 if (token3.type() != EOFToken) |
| 52 expectedTokens.append(token3); |
| 53 } |
| 54 |
| 55 Vector<CSSParserToken> actualTokens; |
| 56 CSSTokenizer::tokenize(string, actualTokens); |
| 57 ASSERT_FALSE(actualTokens.isEmpty()); |
| 58 ASSERT_EQ(EOFToken, actualTokens.last().type()); |
| 59 actualTokens.removeLast(); |
| 60 |
| 61 ASSERT_EQ(expectedTokens.size(), actualTokens.size()); |
| 62 for (size_t i = 0; i < expectedTokens.size(); ++i) |
| 63 compareTokens(expectedTokens[i], actualTokens[i]); |
| 64 } |
| 65 |
| 66 static CSSParserToken ident(const String& string) { return CSSParserToken(IdentT
oken, string); } |
| 67 static CSSParserToken string(const String& string) { return CSSParserToken(Strin
gToken, string); } |
| 68 static CSSParserToken function(const String& string) { return CSSParserToken(Fun
ctionToken, string); } |
| 69 static CSSParserToken delim(char c) { return CSSParserToken(DelimiterToken, c);
} |
| 70 |
| 71 static CSSParserToken number(NumericValueType type, double value) |
| 72 { |
| 73 return CSSParserToken(NumberToken, value, type); |
| 74 } |
| 75 |
| 76 static CSSParserToken dimension(NumericValueType type, double value, const Strin
g& string) |
| 77 { |
| 78 CSSParserToken token = number(type, value); |
| 79 token.convertToDimensionWithUnit(string); |
| 80 return token; |
| 81 } |
| 82 |
| 83 static CSSParserToken percentage(NumericValueType type, double value) |
| 84 { |
| 85 CSSParserToken token = number(type, value); |
| 86 token.convertToPercentage(); |
| 87 return token; |
| 88 } |
| 89 |
| 90 DEFINE_STATIC_LOCAL(CSSParserToken, whitespace, (WhitespaceToken)); |
| 91 DEFINE_STATIC_LOCAL(CSSParserToken, colon, (ColonToken)); |
| 92 DEFINE_STATIC_LOCAL(CSSParserToken, semicolon, (SemicolonToken)); |
| 93 DEFINE_STATIC_LOCAL(CSSParserToken, comma, (CommaToken)); |
| 94 DEFINE_STATIC_LOCAL(CSSParserToken, leftParenthesis, (LeftParenthesisToken)); |
| 95 DEFINE_STATIC_LOCAL(CSSParserToken, rightParenthesis, (RightParenthesisToken)); |
| 96 DEFINE_STATIC_LOCAL(CSSParserToken, leftBracket, (LeftBracketToken)); |
| 97 DEFINE_STATIC_LOCAL(CSSParserToken, rightBracket, (RightBracketToken)); |
| 98 DEFINE_STATIC_LOCAL(CSSParserToken, leftBrace, (LeftBraceToken)); |
| 99 DEFINE_STATIC_LOCAL(CSSParserToken, rightBrace, (RightBraceToken)); |
| 100 DEFINE_STATIC_LOCAL(CSSParserToken, badString, (BadStringToken)); |
| 101 DEFINE_STATIC_LOCAL(CSSParserToken, comment, (CommentToken)); |
| 102 |
| 103 String fromUChar32(UChar32 c) |
| 104 { |
| 105 StringBuilder input; |
| 106 input.append(c); |
| 107 return input.toString(); |
| 108 } |
| 109 |
| 110 TEST(CSSTokenizerTest, SingleCharacterTokens) |
| 111 { |
| 112 TEST_TOKENS("(", leftParenthesis); |
| 113 TEST_TOKENS(")", rightParenthesis); |
| 114 TEST_TOKENS("[", leftBracket); |
| 115 TEST_TOKENS("]", rightBracket); |
| 116 TEST_TOKENS(",", comma); |
| 117 TEST_TOKENS(":", colon); |
| 118 TEST_TOKENS(";", semicolon); |
| 119 TEST_TOKENS(")[", rightParenthesis, leftBracket); |
| 120 TEST_TOKENS("[)", leftBracket, rightParenthesis); |
| 121 TEST_TOKENS("{}", leftBrace, rightBrace); |
| 122 TEST_TOKENS(",,", comma, comma); |
| 123 } |
| 124 |
| 125 TEST(CSSTokenizerTest, DelimiterToken) |
| 126 { |
| 127 TEST_TOKENS("*", delim('*')); |
| 128 TEST_TOKENS("%", delim('%')); |
| 129 TEST_TOKENS("~", delim('~')); |
| 130 TEST_TOKENS("&", delim('&')); |
| 131 TEST_TOKENS("\x7f", delim('\x7f')); |
| 132 TEST_TOKENS("\1", delim('\x1')); |
| 133 } |
| 134 |
| 135 TEST(CSSTokenizerTest, WhitespaceTokens) |
| 136 { |
| 137 TEST_TOKENS(" ", whitespace); |
| 138 TEST_TOKENS("\n\rS", whitespace, ident("S")); |
| 139 TEST_TOKENS(" *", whitespace, delim('*')); |
| 140 TEST_TOKENS("\r\n\f\t2", whitespace, number(IntegerValueType, 2)); |
| 141 } |
| 142 |
| 143 TEST(CSSTokenizerTest, Escapes) |
| 144 { |
| 145 TEST_TOKENS("hel\\6Co", ident("hello")); |
| 146 TEST_TOKENS("\\26 B", ident("&B")); |
| 147 TEST_TOKENS("'hel\\6c o'", string("hello")); |
| 148 TEST_TOKENS("'spac\\65\r\ns'", string("spaces")); |
| 149 TEST_TOKENS("spac\\65\r\ns", ident("spaces")); |
| 150 TEST_TOKENS("spac\\65\n\rs", ident("space"), whitespace, ident("s")); |
| 151 TEST_TOKENS("sp\\61\tc\\65\fs", ident("spaces")); |
| 152 TEST_TOKENS("hel\\6c o", ident("hell"), whitespace, ident("o")); |
| 153 TEST_TOKENS("test\\\n", ident("test"), delim('\\'), whitespace); |
| 154 TEST_TOKENS("eof\\", ident("eof"), delim('\\')); |
| 155 TEST_TOKENS("test\\D799", ident("test" + fromUChar32(0xD799))); |
| 156 TEST_TOKENS("\\E000", ident(fromUChar32(0xE000))); |
| 157 TEST_TOKENS("te\\s\\t", ident("test")); |
| 158 TEST_TOKENS("spaces\\ in\\\tident", ident("spaces in\tident")); |
| 159 TEST_TOKENS("\\.\\,\\:\\!", ident(".,:!")); |
| 160 // FIXME: We don't correctly return replacement characters |
| 161 // String replacement = fromUChar32(0xFFFD); |
| 162 // TEST_TOKENS("null\\0", ident("null" + replacement)); |
| 163 // TEST_TOKENS("null\\0000", ident("null" + replacement)); |
| 164 // TEST_TOKENS("large\\110000", ident("large" + replacement)); |
| 165 // TEST_TOKENS("surrogate\\D800", ident("surrogate" + replacement)); |
| 166 // TEST_TOKENS("surrogate\\0DABC", ident("surrogate" + replacement)); |
| 167 // TEST_TOKENS("\\00DFFFsurrogate", ident(replacement + "surrogate")); |
| 168 // FIXME: We don't correctly return supplementary plane characters |
| 169 // TEST_TOKENS("\\10fFfF", ident(fromUChar32(0x10ffff) + "0")); |
| 170 // TEST_TOKENS("\\10000000", ident(fromUChar32(0x100000) + "000")); |
| 171 // FIXME: We don't correctly match newlines (normally handled in preprocessi
ng) |
| 172 // TEST_TOKENS("\\\r", delim('\\'), whitespace); |
| 173 // TEST_TOKENS("\\\f", delim('\\'), whitespace); |
| 174 // TEST_TOKENS("\\\r\n", delim('\\'), whitespace); |
| 175 } |
| 176 |
| 177 TEST(CSSTokenizerTest, IdentToken) |
| 178 { |
| 179 TEST_TOKENS("simple-ident", ident("simple-ident")); |
| 180 TEST_TOKENS("testing123", ident("testing123")); |
| 181 TEST_TOKENS("hello!", ident("hello"), delim('!')); |
| 182 TEST_TOKENS("world\5", ident("world"), delim('\5')); |
| 183 TEST_TOKENS("_under score", ident("_under"), whitespace, ident("score")); |
| 184 TEST_TOKENS("-_underscore", ident("-_underscore")); |
| 185 TEST_TOKENS("-text", ident("-text")); |
| 186 TEST_TOKENS("-\\6d", ident("-m")); |
| 187 TEST_TOKENS(fromUChar32(0x2003), ident(fromUChar32(0x2003))); // em-space |
| 188 TEST_TOKENS(fromUChar32(0xA0), ident(fromUChar32(0xA0))); // non-breaking sp
ace |
| 189 TEST_TOKENS(fromUChar32(0x1234), ident(fromUChar32(0x1234))); |
| 190 TEST_TOKENS(fromUChar32(0x12345), ident(fromUChar32(0x12345))); |
| 191 // FIXME: These are idents in the editor's draft |
| 192 // TEST_TOKENS("--abc", ident("--abc")); |
| 193 // TEST_TOKENS("--", ident("--")); |
| 194 // TEST_TOKENS("---", ident("---")); |
| 195 // FIXME: Preprocessing is supposed to replace U+0000 with U+FFFD |
| 196 // TEST_TOKENS("\0", ident(fromUChar32(0xFFFD))); |
| 197 } |
| 198 |
| 199 TEST(CSSTokenizerTest, FunctionToken) |
| 200 { |
| 201 TEST_TOKENS("scale(2)", function("scale"), number(IntegerValueType, 2), righ
tParenthesis); |
| 202 TEST_TOKENS("foo-bar\\ baz(", function("foo-bar baz")); |
| 203 TEST_TOKENS("fun\\(ction(", function("fun(ction")); |
| 204 TEST_TOKENS("-foo(", function("-foo")); |
| 205 } |
| 206 |
| 207 TEST(CSSTokenizerTest, StringToken) |
| 208 { |
| 209 TEST_TOKENS("'text'", string("text")); |
| 210 TEST_TOKENS("\"text\"", string("text")); |
| 211 TEST_TOKENS("'testing, 123!'", string("testing, 123!")); |
| 212 TEST_TOKENS("'es\\'ca\\\"pe'", string("es'ca\"pe")); |
| 213 TEST_TOKENS("'\"quotes\"'", string("\"quotes\"")); |
| 214 TEST_TOKENS("\"'quotes'\"", string("'quotes'")); |
| 215 TEST_TOKENS("\"mismatch'", string("mismatch'")); |
| 216 TEST_TOKENS("'text\5\t\13'", string("text\5\t\13")); |
| 217 TEST_TOKENS("\"end on eof", string("end on eof")); |
| 218 TEST_TOKENS("'esca\\\nped'", string("escaped")); |
| 219 TEST_TOKENS("\"esc\\\faped\"", string("escaped")); |
| 220 TEST_TOKENS("'new\\\rline'", string("newline")); |
| 221 TEST_TOKENS("'bad\nstring", badString, whitespace, ident("string")); |
| 222 TEST_TOKENS("'bad\rstring", badString, whitespace, ident("string")); |
| 223 TEST_TOKENS("'bad\r\nstring", badString, whitespace, ident("string")); |
| 224 TEST_TOKENS("'bad\fstring", badString, whitespace, ident("string")); |
| 225 // FIXME: Preprocessing is supposed to replace U+0000 with U+FFFD |
| 226 // TEST_TOKENS("'\0'", string(fromUChar32(0xFFFD))); |
| 227 // FIXME: We don't correctly match newlines (normally handled in preprocessi
ng) |
| 228 // TEST_TOKENS("\"new\\\r\nline\"", string("newline")); |
| 229 } |
| 230 |
| 231 TEST(CSSTokenizerTest, NumberToken) |
| 232 { |
| 233 TEST_TOKENS("10", number(IntegerValueType, 10)); |
| 234 TEST_TOKENS("12.0", number(NumberValueType, 12)); |
| 235 TEST_TOKENS("+45.6", number(NumberValueType, 45.6)); |
| 236 TEST_TOKENS("-7", number(IntegerValueType, -7)); |
| 237 TEST_TOKENS("010", number(IntegerValueType, 10)); |
| 238 TEST_TOKENS("10e0", number(NumberValueType, 10)); |
| 239 TEST_TOKENS("12e3", number(NumberValueType, 12000)); |
| 240 TEST_TOKENS("3e+1", number(NumberValueType, 30)); |
| 241 TEST_TOKENS("12E-1", number(NumberValueType, 1.2)); |
| 242 TEST_TOKENS(".7", number(NumberValueType, 0.7)); |
| 243 TEST_TOKENS("-.3", number(NumberValueType, -0.3)); |
| 244 TEST_TOKENS("+637.54e-2", number(NumberValueType, 6.3754)); |
| 245 TEST_TOKENS("-12.34E+2", number(NumberValueType, -1234)); |
| 246 |
| 247 TEST_TOKENS("+ 5", delim('+'), whitespace, number(IntegerValueType, 5)); |
| 248 TEST_TOKENS("--11", delim('-'), number(IntegerValueType, -11)); |
| 249 TEST_TOKENS("-+12", delim('-'), number(IntegerValueType, 12)); |
| 250 TEST_TOKENS("+-21", delim('+'), number(IntegerValueType, -21)); |
| 251 TEST_TOKENS("++22", delim('+'), number(IntegerValueType, 22)); |
| 252 TEST_TOKENS("13.", number(IntegerValueType, 13), delim('.')); |
| 253 TEST_TOKENS("1.e2", number(IntegerValueType, 1), delim('.'), ident("e2")); |
| 254 TEST_TOKENS("2e3.5", number(NumberValueType, 2000), number(NumberValueType,
0.5)); |
| 255 TEST_TOKENS("2e3.", number(NumberValueType, 2000), delim('.')); |
| 256 } |
| 257 |
| 258 TEST(CSSTokenizerTest, DimensionToken) |
| 259 { |
| 260 TEST_TOKENS("10px", dimension(IntegerValueType, 10, "px")); |
| 261 TEST_TOKENS("12.0em", dimension(NumberValueType, 12, "em")); |
| 262 TEST_TOKENS("-12.0em", dimension(NumberValueType, -12, "em")); |
| 263 TEST_TOKENS("+45.6__qem", dimension(NumberValueType, 45.6, "__qem")); |
| 264 TEST_TOKENS("5e", dimension(IntegerValueType, 5, "e")); |
| 265 TEST_TOKENS("5px-2px", dimension(IntegerValueType, 5, "px-2px")); |
| 266 TEST_TOKENS("5e-", dimension(IntegerValueType, 5, "e-")); |
| 267 TEST_TOKENS("5\\ ", dimension(IntegerValueType, 5, " ")); |
| 268 TEST_TOKENS("40\\70\\78", dimension(IntegerValueType, 40, "px")); |
| 269 TEST_TOKENS("4e3e2", dimension(NumberValueType, 4000, "e2")); |
| 270 TEST_TOKENS("0x10px", dimension(IntegerValueType, 0, "x1px")); |
| 271 TEST_TOKENS("4unit ", dimension(IntegerValueType, 4, "unit"), whitespace); |
| 272 TEST_TOKENS("5e+", dimension(IntegerValueType, 5, "e"), delim('+')); |
| 273 TEST_TOKENS("2e.5", dimension(IntegerValueType, 2, "e"), number(NumberValueT
ype, 0.5)); |
| 274 TEST_TOKENS("2e+.5", dimension(IntegerValueType, 2, "e"), number(NumberValue
Type, 0.5)); |
| 275 } |
| 276 |
| 277 TEST(CSSTokenizerTest, PercentageToken) |
| 278 { |
| 279 TEST_TOKENS("10%", percentage(IntegerValueType, 10)); |
| 280 TEST_TOKENS("+12.0%", percentage(NumberValueType, 12)); |
| 281 TEST_TOKENS("-48.99%", percentage(NumberValueType, -48.99)); |
| 282 TEST_TOKENS("6e-1%", percentage(NumberValueType, 0.6)); |
| 283 TEST_TOKENS("5%%", percentage(IntegerValueType, 5), delim('%')); |
| 284 } |
| 285 |
| 286 TEST(CSSTokenizerTest, CommentToken) |
| 287 { |
| 288 TEST_TOKENS("/*comment*/", comment); |
| 289 TEST_TOKENS("/**\\2f**/", comment); |
| 290 TEST_TOKENS("/**y*a*y**/", comment); |
| 291 TEST_TOKENS("/* \n :) \n */", comment); |
| 292 TEST_TOKENS("/*/*/", comment); |
| 293 TEST_TOKENS("/**/*", comment, delim('*')); |
| 294 // FIXME: Should an EOF-terminated comment get a token? |
| 295 // TEST_TOKENS("/******", comment); |
| 296 } |
| 297 |
| 298 |
| 14 typedef struct { | 299 typedef struct { |
| 15 const char* input; | 300 const char* input; |
| 16 const char* output; | |
| 17 } TestCase; | |
| 18 | |
| 19 typedef struct { | |
| 20 const char* input; | |
| 21 const unsigned maxLevel; | 301 const unsigned maxLevel; |
| 22 const unsigned finalLevel; | 302 const unsigned finalLevel; |
| 23 } BlockTestCase; | 303 } BlockTestCase; |
| 24 | 304 |
| 25 TEST(CSSTokenizerTest, Basic) | |
| 26 { | |
| 27 TestCase testCases[] = { | |
| 28 { "(max-width: 50px)", "(max-width: 50px)" }, | |
| 29 { "(max-width: 1e+2px)", "(max-width: 100.000000px)" }, | |
| 30 { "(max-width: 1e2px)", "(max-width: 100.000000px)" }, | |
| 31 { "(max-width: 1000e-1px)", "(max-width: 100.000000px)" }, | |
| 32 { "(max-width: 50\\70\\78)", "(max-width: 50px)" }, | |
| 33 { "(max-width: /* comment */50px)", "(max-width: 50px)" }, | |
| 34 { "(max-width: /** *commen*t */60px)", "(max-width: 60px)" }, | |
| 35 { "(max-width: /** *commen*t **/70px)", "(max-width: 70px)" }, | |
| 36 { "(max-width: /** *commen*t **//**/80px)", "(max-width: 80px)" }, | |
| 37 { "(max-width: /*/ **/90px)", "(max-width: 90px)" }, | |
| 38 { "(max-width: /*/ **/*100px)", "(max-width: '*'100px)" }, | |
| 39 { "(max-width: 110px/*)", "(max-width: 110px" }, | |
| 40 { "(max-width: 120px)/*", "(max-width: 120px)" }, | |
| 41 { "(max-width: 130px)/**", "(max-width: 130px)" }, | |
| 42 { "(max-width: /***/140px)/**/", "(max-width: 140px)" }, | |
| 43 { "(max-width: '40px')", "(max-width: 40px)" }, | |
| 44 { "(max-width: '40px", "(max-width: 40px" }, | |
| 45 { "(max-width: '40px\n", "(max-width: " }, | |
| 46 { "(max-width: '40px\\", "(max-width: 40px" }, | |
| 47 { "(max-width: '40px\\\n", "(max-width: 40px" }, | |
| 48 { "(max-width: '40px\\\n')", "(max-width: 40px)" }, | |
| 49 { "(max-width: '40\\70\\78')", "(max-width: 40px)" }, | |
| 50 { "(max-width: '40\\\npx')", "(max-width: 40px)" }, | |
| 51 { "(max-aspect-ratio: 5)", "(max-aspect-ratio: 5)" }, | |
| 52 { "(max-aspect-ratio: +5)", "(max-aspect-ratio: 5)" }, | |
| 53 { "(max-aspect-ratio: -5)", "(max-aspect-ratio: -5)" }, | |
| 54 { "(max-aspect-ratio: -+5)", "(max-aspect-ratio: '-'5)" }, | |
| 55 { "(max-aspect-ratio: +-5)", "(max-aspect-ratio: '+'-5)" }, | |
| 56 { "(max-aspect-ratio: +bla5)", "(max-aspect-ratio: '+'bla5)" }, | |
| 57 { "(max-aspect-ratio: +5bla)", "(max-aspect-ratio: 5other)" }, | |
| 58 { "(max-aspect-ratio: -bla)", "(max-aspect-ratio: -bla)" }, | |
| 59 { "(max-aspect-ratio: --bla)", "(max-aspect-ratio: '-'-bla)" }, | |
| 60 { "5e0", "5.000000" }, | |
| 61 { "5.0", "5.000000" }, | |
| 62 { "5.", "5'.'" }, | |
| 63 { "5.0e-1", "0.500000" }, | |
| 64 { "5.e-1", "5'.'e-1" }, | |
| 65 { "hel\\6co", "hello" }, | |
| 66 { "wor\\6c d", "world" }, | |
| 67 { "wor\\6c\r\nd wor\\6c\n\rd", "world worl d" }, | |
| 68 { "cod\\65point esca\\70\fe \\74\test", "codepoint escape test" }, | |
| 69 { "esca\\70\f\te \\74 \nest", "escap e t est" }, | |
| 70 { 0, 0 } // Do not remove the terminator line. | |
| 71 }; | |
| 72 | |
| 73 for (int i = 0; testCases[i].input; ++i) { | |
| 74 Vector<CSSParserToken> tokens; | |
| 75 CSSTokenizer::tokenize(testCases[i].input, tokens); | |
| 76 StringBuilder output; | |
| 77 for (size_t j = 0; j < tokens.size(); ++j) | |
| 78 output.append(tokens[j].textForUnitTests()); | |
| 79 ASSERT_STREQ(testCases[i].output, output.toString().ascii().data()); | |
| 80 } | |
| 81 } | |
| 82 | |
| 83 TEST(CSSTokenizerBlockTest, Basic) | 305 TEST(CSSTokenizerBlockTest, Basic) |
| 84 { | 306 { |
| 85 BlockTestCase testCases[] = { | 307 BlockTestCase testCases[] = { |
| 86 {"(max-width: 800px()), (max-width: 800px)", 2, 0}, | 308 {"(max-width: 800px()), (max-width: 800px)", 2, 0}, |
| 87 {"(max-width: 900px(()), (max-width: 900px)", 3, 1}, | 309 {"(max-width: 900px(()), (max-width: 900px)", 3, 1}, |
| 88 {"(max-width: 600px(())))), (max-width: 600px)", 3, 0}, | 310 {"(max-width: 600px(())))), (max-width: 600px)", 3, 0}, |
| 89 {"(max-width: 500px(((((((((())))), (max-width: 500px)", 11, 6}, | 311 {"(max-width: 500px(((((((((())))), (max-width: 500px)", 11, 6}, |
| 90 {"(max-width: 800px[]), (max-width: 800px)", 2, 0}, | 312 {"(max-width: 800px[]), (max-width: 800px)", 2, 0}, |
| 91 {"(max-width: 900px[[]), (max-width: 900px)", 3, 2}, | 313 {"(max-width: 900px[[]), (max-width: 900px)", 3, 2}, |
| 92 {"(max-width: 600px[[]]]]), (max-width: 600px)", 3, 0}, | 314 {"(max-width: 600px[[]]]]), (max-width: 600px)", 3, 0}, |
| (...skipping 28 matching lines...) Expand all Loading... |
| 121 for (size_t j = 0; j < tokens.size(); ++j) { | 343 for (size_t j = 0; j < tokens.size(); ++j) { |
| 122 blockWatcher.handleToken(tokens[j]); | 344 blockWatcher.handleToken(tokens[j]); |
| 123 level = blockWatcher.blockLevel(); | 345 level = blockWatcher.blockLevel(); |
| 124 maxLevel = std::max(level, maxLevel); | 346 maxLevel = std::max(level, maxLevel); |
| 125 } | 347 } |
| 126 ASSERT_EQ(testCases[i].maxLevel, maxLevel); | 348 ASSERT_EQ(testCases[i].maxLevel, maxLevel); |
| 127 ASSERT_EQ(testCases[i].finalLevel, level); | 349 ASSERT_EQ(testCases[i].finalLevel, level); |
| 128 } | 350 } |
| 129 } | 351 } |
| 130 | 352 |
| 131 void testToken(UChar c, CSSParserTokenType tokenType) | |
| 132 { | |
| 133 Vector<CSSParserToken> tokens; | |
| 134 StringBuilder input; | |
| 135 input.append(c); | |
| 136 CSSTokenizer::tokenize(input.toString(), tokens); | |
| 137 ASSERT_EQ(tokens[0].type(), tokenType); | |
| 138 } | |
| 139 | |
| 140 TEST(CSSTokenizerCodepointsTest, Basic) | |
| 141 { | |
| 142 for (UChar c = 0; c <= 1000; ++c) { | |
| 143 if (isASCIIDigit(c)) | |
| 144 testToken(c, NumberToken); | |
| 145 else if (isASCIIAlpha(c)) | |
| 146 testToken(c, IdentToken); | |
| 147 else if (c == '_') | |
| 148 testToken(c, IdentToken); | |
| 149 else if (c == '\r' || c == ' ' || c == '\n' || c == '\t' || c == '\f') | |
| 150 testToken(c, WhitespaceToken); | |
| 151 else if (c == '(') | |
| 152 testToken(c, LeftParenthesisToken); | |
| 153 else if (c == ')') | |
| 154 testToken(c, RightParenthesisToken); | |
| 155 else if (c == '[') | |
| 156 testToken(c, LeftBracketToken); | |
| 157 else if (c == ']') | |
| 158 testToken(c, RightBracketToken); | |
| 159 else if (c == '{') | |
| 160 testToken(c, LeftBraceToken); | |
| 161 else if (c == '}') | |
| 162 testToken(c, RightBraceToken); | |
| 163 else if (c == '.' || c == '+' || c == '-' || c == '/' || c == '\\') | |
| 164 testToken(c, DelimiterToken); | |
| 165 else if (c == '\'' || c == '"') | |
| 166 testToken(c, StringToken); | |
| 167 else if (c == ',') | |
| 168 testToken(c, CommaToken); | |
| 169 else if (c == ':') | |
| 170 testToken(c, ColonToken); | |
| 171 else if (c == ';') | |
| 172 testToken(c, SemicolonToken); | |
| 173 else if (!c) | |
| 174 testToken(c, EOFToken); | |
| 175 else if (c > SCHAR_MAX) | |
| 176 testToken(c, IdentToken); | |
| 177 else | |
| 178 testToken(c, DelimiterToken); | |
| 179 } | |
| 180 testToken(USHRT_MAX, IdentToken); | |
| 181 } | |
| 182 | |
| 183 } // namespace | 353 } // namespace |
| OLD | NEW |