| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "config.h" | 5 #include "config.h" |
| 6 #include "core/css/parser/CSSTokenizer.h" | 6 #include "core/css/parser/CSSTokenizer.h" |
| 7 | 7 |
| 8 #include "core/css/parser/MediaQueryBlockWatcher.h" | 8 #include "core/css/parser/MediaQueryBlockWatcher.h" |
| 9 #include <gtest/gtest.h> | 9 #include <gtest/gtest.h> |
| 10 | 10 |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 150 TEST_TOKENS("spac\\65\n\rs", ident("space"), whitespace, ident("s")); | 150 TEST_TOKENS("spac\\65\n\rs", ident("space"), whitespace, ident("s")); |
| 151 TEST_TOKENS("sp\\61\tc\\65\fs", ident("spaces")); | 151 TEST_TOKENS("sp\\61\tc\\65\fs", ident("spaces")); |
| 152 TEST_TOKENS("hel\\6c o", ident("hell"), whitespace, ident("o")); | 152 TEST_TOKENS("hel\\6c o", ident("hell"), whitespace, ident("o")); |
| 153 TEST_TOKENS("test\\\n", ident("test"), delim('\\'), whitespace); | 153 TEST_TOKENS("test\\\n", ident("test"), delim('\\'), whitespace); |
| 154 TEST_TOKENS("eof\\", ident("eof"), delim('\\')); | 154 TEST_TOKENS("eof\\", ident("eof"), delim('\\')); |
| 155 TEST_TOKENS("test\\D799", ident("test" + fromUChar32(0xD799))); | 155 TEST_TOKENS("test\\D799", ident("test" + fromUChar32(0xD799))); |
| 156 TEST_TOKENS("\\E000", ident(fromUChar32(0xE000))); | 156 TEST_TOKENS("\\E000", ident(fromUChar32(0xE000))); |
| 157 TEST_TOKENS("te\\s\\t", ident("test")); | 157 TEST_TOKENS("te\\s\\t", ident("test")); |
| 158 TEST_TOKENS("spaces\\ in\\\tident", ident("spaces in\tident")); | 158 TEST_TOKENS("spaces\\ in\\\tident", ident("spaces in\tident")); |
| 159 TEST_TOKENS("\\.\\,\\:\\!", ident(".,:!")); | 159 TEST_TOKENS("\\.\\,\\:\\!", ident(".,:!")); |
| 160 TEST_TOKENS("\\\r", delim('\\'), whitespace); |
| 161 TEST_TOKENS("\\\f", delim('\\'), whitespace); |
| 162 TEST_TOKENS("\\\r\n", delim('\\'), whitespace); |
| 160 // FIXME: We don't correctly return replacement characters | 163 // FIXME: We don't correctly return replacement characters |
| 161 // String replacement = fromUChar32(0xFFFD); | 164 // String replacement = fromUChar32(0xFFFD); |
| 162 // TEST_TOKENS("null\\0", ident("null" + replacement)); | 165 // TEST_TOKENS("null\\0", ident("null" + replacement)); |
| 163 // TEST_TOKENS("null\\0000", ident("null" + replacement)); | 166 // TEST_TOKENS("null\\0000", ident("null" + replacement)); |
| 164 // TEST_TOKENS("large\\110000", ident("large" + replacement)); | 167 // TEST_TOKENS("large\\110000", ident("large" + replacement)); |
| 165 // TEST_TOKENS("surrogate\\D800", ident("surrogate" + replacement)); | 168 // TEST_TOKENS("surrogate\\D800", ident("surrogate" + replacement)); |
| 166 // TEST_TOKENS("surrogate\\0DABC", ident("surrogate" + replacement)); | 169 // TEST_TOKENS("surrogate\\0DABC", ident("surrogate" + replacement)); |
| 167 // TEST_TOKENS("\\00DFFFsurrogate", ident(replacement + "surrogate")); | 170 // TEST_TOKENS("\\00DFFFsurrogate", ident(replacement + "surrogate")); |
| 168 // FIXME: We don't correctly return supplementary plane characters | 171 // FIXME: We don't correctly return supplementary plane characters |
| 169 // TEST_TOKENS("\\10fFfF", ident(fromUChar32(0x10ffff) + "0")); | 172 // TEST_TOKENS("\\10fFfF", ident(fromUChar32(0x10ffff) + "0")); |
| 170 // TEST_TOKENS("\\10000000", ident(fromUChar32(0x100000) + "000")); | 173 // TEST_TOKENS("\\10000000", ident(fromUChar32(0x100000) + "000")); |
| 171 // FIXME: We don't correctly match newlines (normally handled in preprocessi
ng) | |
| 172 // TEST_TOKENS("\\\r", delim('\\'), whitespace); | |
| 173 // TEST_TOKENS("\\\f", delim('\\'), whitespace); | |
| 174 // TEST_TOKENS("\\\r\n", delim('\\'), whitespace); | |
| 175 } | 174 } |
| 176 | 175 |
| 177 TEST(CSSTokenizerTest, IdentToken) | 176 TEST(CSSTokenizerTest, IdentToken) |
| 178 { | 177 { |
| 179 TEST_TOKENS("simple-ident", ident("simple-ident")); | 178 TEST_TOKENS("simple-ident", ident("simple-ident")); |
| 180 TEST_TOKENS("testing123", ident("testing123")); | 179 TEST_TOKENS("testing123", ident("testing123")); |
| 181 TEST_TOKENS("hello!", ident("hello"), delim('!')); | 180 TEST_TOKENS("hello!", ident("hello"), delim('!')); |
| 182 TEST_TOKENS("world\5", ident("world"), delim('\5')); | 181 TEST_TOKENS("world\5", ident("world"), delim('\5')); |
| 183 TEST_TOKENS("_under score", ident("_under"), whitespace, ident("score")); | 182 TEST_TOKENS("_under score", ident("_under"), whitespace, ident("score")); |
| 184 TEST_TOKENS("-_underscore", ident("-_underscore")); | 183 TEST_TOKENS("-_underscore", ident("-_underscore")); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 211 TEST_TOKENS("'testing, 123!'", string("testing, 123!")); | 210 TEST_TOKENS("'testing, 123!'", string("testing, 123!")); |
| 212 TEST_TOKENS("'es\\'ca\\\"pe'", string("es'ca\"pe")); | 211 TEST_TOKENS("'es\\'ca\\\"pe'", string("es'ca\"pe")); |
| 213 TEST_TOKENS("'\"quotes\"'", string("\"quotes\"")); | 212 TEST_TOKENS("'\"quotes\"'", string("\"quotes\"")); |
| 214 TEST_TOKENS("\"'quotes'\"", string("'quotes'")); | 213 TEST_TOKENS("\"'quotes'\"", string("'quotes'")); |
| 215 TEST_TOKENS("\"mismatch'", string("mismatch'")); | 214 TEST_TOKENS("\"mismatch'", string("mismatch'")); |
| 216 TEST_TOKENS("'text\5\t\13'", string("text\5\t\13")); | 215 TEST_TOKENS("'text\5\t\13'", string("text\5\t\13")); |
| 217 TEST_TOKENS("\"end on eof", string("end on eof")); | 216 TEST_TOKENS("\"end on eof", string("end on eof")); |
| 218 TEST_TOKENS("'esca\\\nped'", string("escaped")); | 217 TEST_TOKENS("'esca\\\nped'", string("escaped")); |
| 219 TEST_TOKENS("\"esc\\\faped\"", string("escaped")); | 218 TEST_TOKENS("\"esc\\\faped\"", string("escaped")); |
| 220 TEST_TOKENS("'new\\\rline'", string("newline")); | 219 TEST_TOKENS("'new\\\rline'", string("newline")); |
| 220 TEST_TOKENS("\"new\\\r\nline\"", string("newline")); |
| 221 TEST_TOKENS("'bad\nstring", badString, whitespace, ident("string")); | 221 TEST_TOKENS("'bad\nstring", badString, whitespace, ident("string")); |
| 222 TEST_TOKENS("'bad\rstring", badString, whitespace, ident("string")); | 222 TEST_TOKENS("'bad\rstring", badString, whitespace, ident("string")); |
| 223 TEST_TOKENS("'bad\r\nstring", badString, whitespace, ident("string")); | 223 TEST_TOKENS("'bad\r\nstring", badString, whitespace, ident("string")); |
| 224 TEST_TOKENS("'bad\fstring", badString, whitespace, ident("string")); | 224 TEST_TOKENS("'bad\fstring", badString, whitespace, ident("string")); |
| 225 // FIXME: Preprocessing is supposed to replace U+0000 with U+FFFD | 225 // FIXME: Preprocessing is supposed to replace U+0000 with U+FFFD |
| 226 // TEST_TOKENS("'\0'", string(fromUChar32(0xFFFD))); | 226 // TEST_TOKENS("'\0'", string(fromUChar32(0xFFFD))); |
| 227 // FIXME: We don't correctly match newlines (normally handled in preprocessi
ng) | |
| 228 // TEST_TOKENS("\"new\\\r\nline\"", string("newline")); | |
| 229 } | 227 } |
| 230 | 228 |
| 231 TEST(CSSTokenizerTest, NumberToken) | 229 TEST(CSSTokenizerTest, NumberToken) |
| 232 { | 230 { |
| 233 TEST_TOKENS("10", number(IntegerValueType, 10)); | 231 TEST_TOKENS("10", number(IntegerValueType, 10)); |
| 234 TEST_TOKENS("12.0", number(NumberValueType, 12)); | 232 TEST_TOKENS("12.0", number(NumberValueType, 12)); |
| 235 TEST_TOKENS("+45.6", number(NumberValueType, 45.6)); | 233 TEST_TOKENS("+45.6", number(NumberValueType, 45.6)); |
| 236 TEST_TOKENS("-7", number(IntegerValueType, -7)); | 234 TEST_TOKENS("-7", number(IntegerValueType, -7)); |
| 237 TEST_TOKENS("010", number(IntegerValueType, 10)); | 235 TEST_TOKENS("010", number(IntegerValueType, 10)); |
| 238 TEST_TOKENS("10e0", number(NumberValueType, 10)); | 236 TEST_TOKENS("10e0", number(NumberValueType, 10)); |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 344 blockWatcher.handleToken(tokens[j]); | 342 blockWatcher.handleToken(tokens[j]); |
| 345 level = blockWatcher.blockLevel(); | 343 level = blockWatcher.blockLevel(); |
| 346 maxLevel = std::max(level, maxLevel); | 344 maxLevel = std::max(level, maxLevel); |
| 347 } | 345 } |
| 348 ASSERT_EQ(testCases[i].maxLevel, maxLevel); | 346 ASSERT_EQ(testCases[i].maxLevel, maxLevel); |
| 349 ASSERT_EQ(testCases[i].finalLevel, level); | 347 ASSERT_EQ(testCases[i].finalLevel, level); |
| 350 } | 348 } |
| 351 } | 349 } |
| 352 | 350 |
| 353 } // namespace | 351 } // namespace |
| OLD | NEW |