Chromium Code Reviews| OLD | NEW | 
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "config.h" | 5 #include "config.h" | 
| 6 #include "core/css/parser/CSSTokenizer.h" | 6 #include "core/css/parser/CSSTokenizer.h" | 
| 7 | 7 | 
| 8 #include "core/css/parser/MediaQueryBlockWatcher.h" | 8 #include "core/css/parser/MediaQueryBlockWatcher.h" | 
| 9 #include "wtf/PassOwnPtr.h" | |
| 10 #include <gtest/gtest.h> | 9 #include <gtest/gtest.h> | 
| 11 | 10 | 
| 12 namespace blink { | 11 namespace blink { | 
| 13 | 12 | 
| 13 // This let's us see the line numbers of failing tests | |
| 14 #define TEST_TOKENS(string, ...) { \ | |
| 15 String s = string; \ | |
| 16 SCOPED_TRACE(s.ascii().data()); \ | |
| 17 testTokens(string, __VA_ARGS__); \ | |
| 18 } | |
| 19 | |
| 20 void compareTokens(const CSSParserToken& expected, const CSSParserToken& actual) | |
| 21 { | |
| 22 ASSERT_EQ(expected.type(), actual.type()); | |
| 23 switch (expected.type()) { | |
| 24 case DelimiterToken: | |
| 25 ASSERT_EQ(expected.delimiter(), actual.delimiter()); | |
| 26 break; | |
| 27 case IdentToken: | |
| 28 case FunctionToken: | |
| 29 case StringToken: | |
| 30 ASSERT_EQ(expected.value(), actual.value()); | |
| 31 break; | |
| 32 case DimensionToken: | |
| 33 ASSERT_EQ(expected.value(), actual.value()); | |
| 34 // fallthrough | |
| 35 case NumberToken: | |
| 36 case PercentageToken: | |
| 37 ASSERT_EQ(expected.numericValueType(), actual.numericValueType()); | |
| 38 ASSERT_DOUBLE_EQ(expected.numericValue(), actual.numericValue()); | |
| 39 break; | |
| 40 default: | |
| 41 break; | |
| 42 } | |
| 43 } | |
| 44 | |
| 45 void testTokens(const String& string, const CSSParserToken& token1, const CSSPar serToken& token2 = CSSParserToken(EOFToken), const CSSParserToken& token3 = CSSP arserToken(EOFToken)) | |
| 46 { | |
| 47 Vector<CSSParserToken> expectedTokens; | |
| 48 expectedTokens.append(token1); | |
| 49 if (token2.type() != EOFToken) { | |
| 50 expectedTokens.append(token2); | |
| 51 if (token3.type() != EOFToken) | |
| 52 expectedTokens.append(token3); | |
| 53 } | |
| 54 | |
| 55 Vector<CSSParserToken> actualTokens; | |
| 56 CSSTokenizer::tokenize(string, actualTokens); | |
| 57 ASSERT_FALSE(actualTokens.isEmpty()); | |
| 58 ASSERT_EQ(EOFToken, actualTokens.last().type()); | |
| 59 actualTokens.removeLast(); | |
| 60 | |
| 61 ASSERT_EQ(expectedTokens.size(), actualTokens.size()); | |
| 62 for (size_t i = 0; i < expectedTokens.size(); ++i) | |
| 63 compareTokens(expectedTokens[i], actualTokens[i]); | |
| 64 } | |
| 65 | |
| 66 static CSSParserToken ident(const String& string) { return CSSParserToken(IdentT oken, string); } | |
| 67 static CSSParserToken string(const String& string) { return CSSParserToken(Strin gToken, string); } | |
| 68 static CSSParserToken function(const String& string) { return CSSParserToken(Fun ctionToken, string); } | |
| 69 static CSSParserToken delim(char c) { return CSSParserToken(DelimiterToken, c); } | |
| 70 | |
| 71 static CSSParserToken number(NumericValueType type, double value) | |
| 72 { | |
| 73 return CSSParserToken(NumberToken, value, type); | |
| 74 } | |
| 75 | |
| 76 static CSSParserToken dimension(NumericValueType type, double value, const Strin g& string) | |
| 77 { | |
| 78 CSSParserToken token = number(type, value); | |
| 79 token.convertToDimensionWithUnit(string); | |
| 80 return token; | |
| 81 } | |
| 82 | |
| 83 static CSSParserToken percentage(NumericValueType type, double value) | |
| 84 { | |
| 85 CSSParserToken token = number(type, value); | |
| 86 token.convertToPercentage(); | |
| 87 return token; | |
| 88 } | |
| 89 | |
| 90 DEFINE_STATIC_LOCAL(CSSParserToken, whitespace, (WhitespaceToken)); | |
| 91 DEFINE_STATIC_LOCAL(CSSParserToken, colon, (ColonToken)); | |
| 92 DEFINE_STATIC_LOCAL(CSSParserToken, semicolon, (SemicolonToken)); | |
| 93 DEFINE_STATIC_LOCAL(CSSParserToken, comma, (CommaToken)); | |
| 94 DEFINE_STATIC_LOCAL(CSSParserToken, leftParenthesis, (LeftParenthesisToken)); | |
| 95 DEFINE_STATIC_LOCAL(CSSParserToken, rightParenthesis, (RightParenthesisToken)); | |
| 96 DEFINE_STATIC_LOCAL(CSSParserToken, leftBracket, (LeftBracketToken)); | |
| 97 DEFINE_STATIC_LOCAL(CSSParserToken, rightBracket, (RightBracketToken)); | |
| 98 DEFINE_STATIC_LOCAL(CSSParserToken, leftBrace, (LeftBraceToken)); | |
| 99 DEFINE_STATIC_LOCAL(CSSParserToken, rightBrace, (RightBraceToken)); | |
| 100 DEFINE_STATIC_LOCAL(CSSParserToken, badString, (BadStringToken)); | |
| 101 DEFINE_STATIC_LOCAL(CSSParserToken, comment, (CommentToken)); | |
| 102 | |
| 103 String fromUChar32(UChar32 c) | |
| 104 { | |
| 105 StringBuilder input; | |
| 106 input.append(c); | |
| 107 return input.toString(); | |
| 108 } | |
| 109 | |
| 110 TEST(CSSTokenizerTest, SingleCharacterTokens) | |
| 111 { | |
| 112 TEST_TOKENS("(", leftParenthesis); | |
| 113 TEST_TOKENS(")", rightParenthesis); | |
| 114 TEST_TOKENS("[", leftBracket); | |
| 115 TEST_TOKENS("]", rightBracket); | |
| 116 TEST_TOKENS(",", comma); | |
| 117 TEST_TOKENS(":", colon); | |
| 118 TEST_TOKENS(";", semicolon); | |
| 119 TEST_TOKENS(")[", rightParenthesis, leftBracket); | |
| 120 TEST_TOKENS("[)", leftBracket, rightParenthesis); | |
| 121 TEST_TOKENS("{}", leftBrace, rightBrace); | |
| 122 TEST_TOKENS(",,", comma, comma); | |
| 123 } | |
| 124 | |
| 125 TEST(CSSTokenizerTest, DelimiterToken) | |
| 126 { | |
| 127 TEST_TOKENS("*", delim('*')); | |
| 128 TEST_TOKENS("%", delim('%')); | |
| 129 TEST_TOKENS("~", delim('~')); | |
| 130 TEST_TOKENS("&", delim('&')); | |
| 131 TEST_TOKENS("\x7f", delim('\x7f')); | |
| 132 TEST_TOKENS("\1", delim('\x1')); | |
| 133 } | |
| 134 | |
| 135 TEST(CSSTokenizerTest, WhitespaceTokens) | |
| 136 { | |
| 137 TEST_TOKENS(" ", whitespace); | |
| 138 TEST_TOKENS("\n\rS", whitespace, ident("S")); | |
| 139 TEST_TOKENS(" *", whitespace, delim('*')); | |
| 140 TEST_TOKENS("\r\n\f\t2", whitespace, number(IntegerValueType, 2)); | |
| 141 } | |
| 142 | |
| 143 TEST(CSSTokenizerTest, Escapes) | |
| 144 { | |
| 145 TEST_TOKENS("hel\\6Co", ident("hello")); | |
| 146 TEST_TOKENS("\\26 B", ident("&B")); | |
| 147 TEST_TOKENS("'hel\\6c o'", string("hello")); | |
| 148 TEST_TOKENS("'spac\\65\r\ns'", string("spaces")); | |
| 149 TEST_TOKENS("spac\\65\r\ns", ident("spaces")); | |
| 150 TEST_TOKENS("spac\\65\n\rs", ident("space"), whitespace, ident("s")); | |
| 151 TEST_TOKENS("sp\\61\tc\\65\fs", ident("spaces")); | |
| 152 TEST_TOKENS("hel\\6c o", ident("hell"), whitespace, ident("o")); | |
| 153 TEST_TOKENS("test\\\n", ident("test"), delim('\\'), whitespace); | |
| 154 TEST_TOKENS("eof\\", ident("eof"), delim('\\')); | |
| 155 TEST_TOKENS("test\\D799", ident("test" + fromUChar32(0xD799))); | |
| 156 TEST_TOKENS("\\E000", ident(fromUChar32(0xE000))); | |
| 157 TEST_TOKENS("te\\s\\t", ident("test")); | |
| 158 TEST_TOKENS("spaces\\ in\\\tident", ident("spaces in\tident")); | |
| 159 TEST_TOKENS("\\.\\,\\:\\!", ident(".,:!")); | |
| 160 // FIXME: We don't correctly return replacement characters | |
| 161 // String replacement = fromUChar32(0xFFFD); | |
| 162 // TEST_TOKENS("null\\0", ident("null" + replacement)); | |
| 163 // TEST_TOKENS("null\\0000", ident("null" + replacement)); | |
| 164 // TEST_TOKENS("large\\110000", ident("large" + replacement)); | |
| 165 // TEST_TOKENS("surrogate\\D800", ident("surrogate" + replacement)); | |
| 166 // TEST_TOKENS("surrogate\\0DABC", ident("surrogate" + replacement)); | |
| 167 // TEST_TOKENS("\\00DFFFsurrogate", ident(replacement + "surrogate")); | |
| 168 // FIXME: We don't correctly return supplementary plane characters | |
| 169 // TEST_TOKENS("\\10fFfF", ident(fromUChar32(0x10ffff) + "0")); | |
| 170 // TEST_TOKENS("\\10000000", ident(fromUChar32(0x100000) + "000")); | |
| 171 // FIXME: We don't correctly match newlines (normally handled in preprocessi ng) | |
| 172 // TEST_TOKENS("\\\r", delim('\\'), whitespace); | |
| 173 // TEST_TOKENS("\\\f", delim('\\'), whitespace); | |
| 174 // TEST_TOKENS("\\\r\n", delim('\\'), whitespace); | |
| 175 } | |
| 176 | |
| 177 TEST(CSSTokenizerTest, IdentToken) | |
| 178 { | |
| 179 TEST_TOKENS("simple-ident", ident("simple-ident")); | |
| 180 TEST_TOKENS("testing123", ident("testing123")); | |
| 181 TEST_TOKENS("hello!", ident("hello"), delim('!')); | |
| 182 TEST_TOKENS("world\5", ident("world"), delim('\5')); | |
| 183 TEST_TOKENS("_under score", ident("_under"), whitespace, ident("score")); | |
| 184 TEST_TOKENS("-_underscore", ident("-_underscore")); | |
| 185 TEST_TOKENS("-text", ident("-text")); | |
| 186 TEST_TOKENS("-\\6d", ident("-m")); | |
| 187 TEST_TOKENS(fromUChar32(0x2003), ident(fromUChar32(0x2003))); // em-space | |
| 188 TEST_TOKENS(fromUChar32(0xA0), ident(fromUChar32(0xA0))); // non-breaking sp ace | |
| 189 TEST_TOKENS(fromUChar32(0x1234), ident(fromUChar32(0x1234))); | |
| 190 TEST_TOKENS(fromUChar32(0x12345), ident(fromUChar32(0x12345))); | |
| 191 // FIXME: These are idents in the editor's draft | |
| 192 // TEST_TOKENS("--abc", ident("--abc")); | |
| 193 // TEST_TOKENS("--", ident("--")); | |
| 194 // TEST_TOKENS("---", ident("---")); | |
| 195 // FIXME: Preprocessing is supposed to replace U+0000 with U+FFFD | |
| 196 // TEST_TOKENS("\0", ident(fromUChar32(0xFFFD))); | |
| 197 } | |
| 198 | |
| 199 TEST(CSSTokenizerTest, FunctionToken) | |
| 200 { | |
| 201 TEST_TOKENS("scale(2)", function("scale"), number(IntegerValueType, 2), righ tParenthesis); | |
| 202 TEST_TOKENS("foo-bar\\ baz(", function("foo-bar baz")); | |
| 203 TEST_TOKENS("fun\\(ction(", function("fun(ction")); | |
| 
 
ikilpatrick
2014/10/23 22:24:02
worth testing more esorteric names like: "--bar("
 
Timothy Loh
2014/10/24 00:29:00
This doesn't really add any coverage.
 
 | |
| 204 } | |
| 205 | |
| 206 TEST(CSSTokenizerTest, StringToken) | |
| 207 { | |
| 208 TEST_TOKENS("'text'", string("text")); | |
| 209 TEST_TOKENS("\"text\"", string("text")); | |
| 210 TEST_TOKENS("'testing, 123!'", string("testing, 123!")); | |
| 211 TEST_TOKENS("'es\\'ca\\\"pe'", string("es'ca\"pe")); | |
| 212 TEST_TOKENS("'\"quotes\"'", string("\"quotes\"")); | |
| 213 TEST_TOKENS("\"'quotes'\"", string("'quotes'")); | |
| 214 TEST_TOKENS("'text\5\t\13'", string("text\5\t\13")); | |
| 215 TEST_TOKENS("\"end on eof", string("end on eof")); | |
| 216 TEST_TOKENS("'esca\\\nped'", string("escaped")); | |
| 217 TEST_TOKENS("\"esc\\\faped\"", string("escaped")); | |
| 218 TEST_TOKENS("'new\\\rline'", string("newline")); | |
| 219 TEST_TOKENS("'bad\nstring", badString, whitespace, ident("string")); | |
| 220 TEST_TOKENS("'bad\rstring", badString, whitespace, ident("string")); | |
| 221 TEST_TOKENS("'bad\r\nstring", badString, whitespace, ident("string")); | |
| 222 TEST_TOKENS("'bad\fstring", badString, whitespace, ident("string")); | |
| 
 
ikilpatrick
2014/10/23 22:24:02
Worth adding a mismatched start/end char string he
 
Timothy Loh
2014/10/24 00:29:00
Done (but basically tested by "'quotes'" and '"quo
 
 | |
| 223 // FIXME: Preprocessing is supposed to replace U+0000 with U+FFFD | |
| 224 // TEST_TOKENS("'\0'", string(fromUChar32(0xFFFD))); | |
| 225 // FIXME: We don't correctly match newlines (normally handled in preprocessi ng) | |
| 226 // TEST_TOKENS("\"new\\\r\nline\"", string("newline")); | |
| 227 } | |
| 228 | |
| 229 TEST(CSSTokenizerTest, NumberToken) | |
| 230 { | |
| 231 TEST_TOKENS("10", number(IntegerValueType, 10)); | |
| 232 TEST_TOKENS("12.0", number(NumberValueType, 12)); | |
| 233 TEST_TOKENS("+45.6", number(NumberValueType, 45.6)); | |
| 234 TEST_TOKENS("-7", number(IntegerValueType, -7)); | |
| 235 TEST_TOKENS("010", number(IntegerValueType, 10)); | |
| 236 TEST_TOKENS("10e0", number(NumberValueType, 10)); | |
| 237 TEST_TOKENS("12e3", number(NumberValueType, 12000)); | |
| 238 TEST_TOKENS("3e+1", number(NumberValueType, 30)); | |
| 239 TEST_TOKENS("12E-1", number(NumberValueType, 1.2)); | |
| 240 TEST_TOKENS(".7", number(NumberValueType, 0.7)); | |
| 241 TEST_TOKENS("-.3", number(NumberValueType, -0.3)); | |
| 242 TEST_TOKENS("+637.54e-2", number(NumberValueType, 6.3754)); | |
| 243 TEST_TOKENS("-12.34E+2", number(NumberValueType, -1234)); | |
| 244 | |
| 245 TEST_TOKENS("+ 5", delim('+'), whitespace, number(IntegerValueType, 5)); | |
| 246 TEST_TOKENS("--11", delim('-'), number(IntegerValueType, -11)); | |
| 247 TEST_TOKENS("-+12", delim('-'), number(IntegerValueType, 12)); | |
| 248 TEST_TOKENS("+-21", delim('+'), number(IntegerValueType, -21)); | |
| 249 TEST_TOKENS("++22", delim('+'), number(IntegerValueType, 22)); | |
| 250 TEST_TOKENS("13.", number(IntegerValueType, 13), delim('.')); | |
| 251 TEST_TOKENS("1.e2", number(IntegerValueType, 1), delim('.'), ident("e2")); | |
| 
 
ikilpatrick
2014/10/23 22:24:02
test for:
"1e.5", "1e1.5", "1e5." etc.
 
Timothy Loh
2014/10/24 00:29:00
Done.
 
 | |
| 252 } | |
| 253 | |
| 254 TEST(CSSTokenizerTest, DimensionToken) | |
| 255 { | |
| 256 TEST_TOKENS("10px", dimension(IntegerValueType, 10, "px")); | |
| 257 TEST_TOKENS("12.0em", dimension(NumberValueType, 12, "em")); | |
| 258 TEST_TOKENS("-12.0em", dimension(NumberValueType, -12, "em")); | |
| 259 TEST_TOKENS("+45.6__qem", dimension(NumberValueType, 45.6, "__qem")); | |
| 260 TEST_TOKENS("5e", dimension(IntegerValueType, 5, "e")); | |
| 261 TEST_TOKENS("5px-2px", dimension(IntegerValueType, 5, "px-2px")); | |
| 262 TEST_TOKENS("5\\ ", dimension(IntegerValueType, 5, " ")); | |
| 263 TEST_TOKENS("40\\70\\78", dimension(IntegerValueType, 40, "px")); | |
| 264 TEST_TOKENS("4e3e2", dimension(NumberValueType, 4000, "e2")); | |
| 265 TEST_TOKENS("0x10px", dimension(IntegerValueType, 0, "x1px")); | |
| 266 TEST_TOKENS("4unit ", dimension(IntegerValueType, 4, "unit"), whitespace); | |
| 267 TEST_TOKENS("5e+", dimension(IntegerValueType, 5, "e"), delim('+')); | |
| 268 } | |
| 269 | |
| 270 TEST(CSSTokenizerTest, PercentageToken) | |
| 271 { | |
| 272 TEST_TOKENS("10%", percentage(IntegerValueType, 10)); | |
| 273 TEST_TOKENS("+12.0%", percentage(NumberValueType, 12)); | |
| 274 TEST_TOKENS("-48.99%", percentage(NumberValueType, -48.99)); | |
| 275 TEST_TOKENS("6e-1%", percentage(NumberValueType, 0.6)); | |
| 276 TEST_TOKENS("5%%", percentage(IntegerValueType, 5), delim('%')); | |
| 277 } | |
| 278 | |
| 279 TEST(CSSTokenizerTest, CommentToken) | |
| 280 { | |
| 281 TEST_TOKENS("/*comment*/", comment); | |
| 282 TEST_TOKENS("/**\\2f**/", comment); | |
| 283 TEST_TOKENS("/**y*a*y**/", comment); | |
| 284 TEST_TOKENS("/* \n :) \n */", comment); | |
| 285 TEST_TOKENS("/*/*/", comment); | |
| 286 TEST_TOKENS("/**/*", comment, delim('*')); | |
| 287 // FIXME: Should an EOF-terminated comment get a token? | |
| 288 // TEST_TOKENS("/******", comment); | |
| 289 } | |
| 290 | |
| 291 | |
| 14 typedef struct { | 292 typedef struct { | 
| 15 const char* input; | 293 const char* input; | 
| 16 const char* output; | |
| 17 } TestCase; | |
| 18 | |
| 19 typedef struct { | |
| 20 const char* input; | |
| 21 const unsigned maxLevel; | 294 const unsigned maxLevel; | 
| 22 const unsigned finalLevel; | 295 const unsigned finalLevel; | 
| 23 } BlockTestCase; | 296 } BlockTestCase; | 
| 24 | 297 | 
| 25 TEST(CSSTokenizerTest, Basic) | |
| 26 { | |
| 27 TestCase testCases[] = { | |
| 28 { "(max-width: 50px)", "(max-width: 50px)" }, | |
| 29 { "(max-width: 1e+2px)", "(max-width: 100.000000px)" }, | |
| 30 { "(max-width: 1e2px)", "(max-width: 100.000000px)" }, | |
| 31 { "(max-width: 1000e-1px)", "(max-width: 100.000000px)" }, | |
| 32 { "(max-width: 50\\70\\78)", "(max-width: 50px)" }, | |
| 33 { "(max-width: /* comment */50px)", "(max-width: 50px)" }, | |
| 34 { "(max-width: /** *commen*t */60px)", "(max-width: 60px)" }, | |
| 35 { "(max-width: /** *commen*t **/70px)", "(max-width: 70px)" }, | |
| 36 { "(max-width: /** *commen*t **//**/80px)", "(max-width: 80px)" }, | |
| 37 { "(max-width: /*/ **/90px)", "(max-width: 90px)" }, | |
| 38 { "(max-width: /*/ **/*100px)", "(max-width: '*'100px)" }, | |
| 39 { "(max-width: 110px/*)", "(max-width: 110px" }, | |
| 40 { "(max-width: 120px)/*", "(max-width: 120px)" }, | |
| 41 { "(max-width: 130px)/**", "(max-width: 130px)" }, | |
| 42 { "(max-width: /***/140px)/**/", "(max-width: 140px)" }, | |
| 43 { "(max-width: '40px')", "(max-width: 40px)" }, | |
| 44 { "(max-width: '40px", "(max-width: 40px" }, | |
| 45 { "(max-width: '40px\n", "(max-width: " }, | |
| 46 { "(max-width: '40px\\", "(max-width: 40px" }, | |
| 47 { "(max-width: '40px\\\n", "(max-width: 40px" }, | |
| 48 { "(max-width: '40px\\\n')", "(max-width: 40px)" }, | |
| 49 { "(max-width: '40\\70\\78')", "(max-width: 40px)" }, | |
| 50 { "(max-width: '40\\\npx')", "(max-width: 40px)" }, | |
| 51 { "(max-aspect-ratio: 5)", "(max-aspect-ratio: 5)" }, | |
| 52 { "(max-aspect-ratio: +5)", "(max-aspect-ratio: 5)" }, | |
| 53 { "(max-aspect-ratio: -5)", "(max-aspect-ratio: -5)" }, | |
| 54 { "(max-aspect-ratio: -+5)", "(max-aspect-ratio: '-'5)" }, | |
| 55 { "(max-aspect-ratio: +-5)", "(max-aspect-ratio: '+'-5)" }, | |
| 56 { "(max-aspect-ratio: +bla5)", "(max-aspect-ratio: '+'bla5)" }, | |
| 57 { "(max-aspect-ratio: +5bla)", "(max-aspect-ratio: 5other)" }, | |
| 58 { "(max-aspect-ratio: -bla)", "(max-aspect-ratio: -bla)" }, | |
| 59 { "(max-aspect-ratio: --bla)", "(max-aspect-ratio: '-'-bla)" }, | |
| 60 { "5e0", "5.000000" }, | |
| 61 { "5.0", "5.000000" }, | |
| 62 { "5.", "5'.'" }, | |
| 63 { "5.0e-1", "0.500000" }, | |
| 64 { "5.e-1", "5'.'e-1" }, | |
| 65 { "hel\\6co", "hello" }, | |
| 66 { "wor\\6c d", "world" }, | |
| 67 { "wor\\6c\r\nd wor\\6c\n\rd", "world worl d" }, | |
| 68 { "cod\\65point esca\\70\fe \\74\test", "codepoint escape test" }, | |
| 69 { "esca\\70\f\te \\74 \nest", "escap e t est" }, | |
| 70 { 0, 0 } // Do not remove the terminator line. | |
| 71 }; | |
| 72 | |
| 73 for (int i = 0; testCases[i].input; ++i) { | |
| 74 Vector<CSSParserToken> tokens; | |
| 75 CSSTokenizer::tokenize(testCases[i].input, tokens); | |
| 76 StringBuilder output; | |
| 77 for (size_t j = 0; j < tokens.size(); ++j) | |
| 78 output.append(tokens[j].textForUnitTests()); | |
| 79 ASSERT_STREQ(testCases[i].output, output.toString().ascii().data()); | |
| 80 } | |
| 81 } | |
| 82 | |
| 83 TEST(CSSTokenizerBlockTest, Basic) | 298 TEST(CSSTokenizerBlockTest, Basic) | 
| 84 { | 299 { | 
| 85 BlockTestCase testCases[] = { | 300 BlockTestCase testCases[] = { | 
| 86 {"(max-width: 800px()), (max-width: 800px)", 2, 0}, | 301 {"(max-width: 800px()), (max-width: 800px)", 2, 0}, | 
| 87 {"(max-width: 900px(()), (max-width: 900px)", 3, 1}, | 302 {"(max-width: 900px(()), (max-width: 900px)", 3, 1}, | 
| 88 {"(max-width: 600px(())))), (max-width: 600px)", 3, 0}, | 303 {"(max-width: 600px(())))), (max-width: 600px)", 3, 0}, | 
| 89 {"(max-width: 500px(((((((((())))), (max-width: 500px)", 11, 6}, | 304 {"(max-width: 500px(((((((((())))), (max-width: 500px)", 11, 6}, | 
| 90 {"(max-width: 800px[]), (max-width: 800px)", 2, 0}, | 305 {"(max-width: 800px[]), (max-width: 800px)", 2, 0}, | 
| 91 {"(max-width: 900px[[]), (max-width: 900px)", 3, 2}, | 306 {"(max-width: 900px[[]), (max-width: 900px)", 3, 2}, | 
| 92 {"(max-width: 600px[[]]]]), (max-width: 600px)", 3, 0}, | 307 {"(max-width: 600px[[]]]]), (max-width: 600px)", 3, 0}, | 
| (...skipping 28 matching lines...) Expand all Loading... | |
| 121 for (size_t j = 0; j < tokens.size(); ++j) { | 336 for (size_t j = 0; j < tokens.size(); ++j) { | 
| 122 blockWatcher.handleToken(tokens[j]); | 337 blockWatcher.handleToken(tokens[j]); | 
| 123 level = blockWatcher.blockLevel(); | 338 level = blockWatcher.blockLevel(); | 
| 124 maxLevel = std::max(level, maxLevel); | 339 maxLevel = std::max(level, maxLevel); | 
| 125 } | 340 } | 
| 126 ASSERT_EQ(testCases[i].maxLevel, maxLevel); | 341 ASSERT_EQ(testCases[i].maxLevel, maxLevel); | 
| 127 ASSERT_EQ(testCases[i].finalLevel, level); | 342 ASSERT_EQ(testCases[i].finalLevel, level); | 
| 128 } | 343 } | 
| 129 } | 344 } | 
| 130 | 345 | 
| 131 void testToken(UChar c, CSSParserTokenType tokenType) | |
| 132 { | |
| 133 Vector<CSSParserToken> tokens; | |
| 134 StringBuilder input; | |
| 135 input.append(c); | |
| 136 CSSTokenizer::tokenize(input.toString(), tokens); | |
| 137 ASSERT_EQ(tokens[0].type(), tokenType); | |
| 138 } | |
| 139 | |
| 140 TEST(CSSTokenizerCodepointsTest, Basic) | |
| 141 { | |
| 142 for (UChar c = 0; c <= 1000; ++c) { | |
| 143 if (isASCIIDigit(c)) | |
| 144 testToken(c, NumberToken); | |
| 145 else if (isASCIIAlpha(c)) | |
| 146 testToken(c, IdentToken); | |
| 147 else if (c == '_') | |
| 148 testToken(c, IdentToken); | |
| 149 else if (c == '\r' || c == ' ' || c == '\n' || c == '\t' || c == '\f') | |
| 150 testToken(c, WhitespaceToken); | |
| 151 else if (c == '(') | |
| 152 testToken(c, LeftParenthesisToken); | |
| 153 else if (c == ')') | |
| 154 testToken(c, RightParenthesisToken); | |
| 155 else if (c == '[') | |
| 156 testToken(c, LeftBracketToken); | |
| 157 else if (c == ']') | |
| 158 testToken(c, RightBracketToken); | |
| 159 else if (c == '{') | |
| 160 testToken(c, LeftBraceToken); | |
| 161 else if (c == '}') | |
| 162 testToken(c, RightBraceToken); | |
| 163 else if (c == '.' || c == '+' || c == '-' || c == '/' || c == '\\') | |
| 164 testToken(c, DelimiterToken); | |
| 165 else if (c == '\'' || c == '"') | |
| 166 testToken(c, StringToken); | |
| 167 else if (c == ',') | |
| 168 testToken(c, CommaToken); | |
| 169 else if (c == ':') | |
| 170 testToken(c, ColonToken); | |
| 171 else if (c == ';') | |
| 172 testToken(c, SemicolonToken); | |
| 173 else if (!c) | |
| 174 testToken(c, EOFToken); | |
| 175 else if (c > SCHAR_MAX) | |
| 176 testToken(c, IdentToken); | |
| 177 else | |
| 178 testToken(c, DelimiterToken); | |
| 179 } | |
| 180 testToken(USHRT_MAX, IdentToken); | |
| 181 } | |
| 182 | |
| 183 } // namespace | 346 } // namespace | 
| OLD | NEW |