| Index: third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc
|
| diff --git a/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc b/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc
|
| index de096fb9d82a4789a9406571bd0c5eccb931181c..dbb5be4f601a77f2aa2ad7d52eebf71fd83d8ebf 100644
|
| --- a/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc
|
| +++ b/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc
|
| @@ -1,6 +1,6 @@
|
| // Protocol Buffers - Google's data interchange format
|
| // Copyright 2008 Google Inc. All rights reserved.
|
| -// https://developers.google.com/protocol-buffers/
|
| +// http://code.google.com/p/protobuf/
|
| //
|
| // Redistribution and use in source and binary forms, with or without
|
| // modification, are permitted provided that the following conditions are
|
| @@ -411,6 +411,12 @@ MultiTokenCase kMultiTokenCases[] = {
|
| { Tokenizer::TYPE_END , "" , 1, 3, 3 },
|
| }},
|
|
|
| + // Bytes with the high-order bit set should not be seen as control characters.
|
| + { "\300", {
|
| + { Tokenizer::TYPE_SYMBOL, "\300", 0, 0, 1 },
|
| + { Tokenizer::TYPE_END , "" , 0, 1, 1 },
|
| + }},
|
| +
|
| // Test all whitespace chars
|
| { "foo\n\t\r\v\fbar", {
|
| { Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3 },
|
| @@ -735,7 +741,7 @@ TEST_F(TokenizerTest, ParseInteger) {
|
| EXPECT_EQ(0, ParseInteger("0x"));
|
|
|
| uint64 i;
|
| -#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
|
| +#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
|
| // Test invalid integers that will never be tokenized as integers.
|
| EXPECT_DEBUG_DEATH(Tokenizer::ParseInteger("zxy", kuint64max, &i),
|
| "passed text that could not have been tokenized as an integer");
|
| @@ -747,7 +753,7 @@ TEST_F(TokenizerTest, ParseInteger) {
|
| "passed text that could not have been tokenized as an integer");
|
| EXPECT_DEBUG_DEATH(Tokenizer::ParseInteger("-1", kuint64max, &i),
|
| "passed text that could not have been tokenized as an integer");
|
| -#endif // PROTOBUF_HAS_DEATH_TEST
|
| +#endif // PROTOBUF_HASDEATH_TEST
|
|
|
| // Test overflows.
|
| EXPECT_TRUE (Tokenizer::ParseInteger("0", 0, &i));
|
| @@ -790,7 +796,7 @@ TEST_F(TokenizerTest, ParseFloat) {
|
| EXPECT_EQ( 0.0, Tokenizer::ParseFloat("1e-9999999999999999999999999999"));
|
| EXPECT_EQ(HUGE_VAL, Tokenizer::ParseFloat("1e+9999999999999999999999999999"));
|
|
|
| -#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
|
| +#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
|
| // Test invalid integers that will never be tokenized as integers.
|
| EXPECT_DEBUG_DEATH(Tokenizer::ParseFloat("zxy"),
|
| "passed text that could not have been tokenized as a float");
|
| @@ -798,7 +804,7 @@ TEST_F(TokenizerTest, ParseFloat) {
|
| "passed text that could not have been tokenized as a float");
|
| EXPECT_DEBUG_DEATH(Tokenizer::ParseFloat("-1.0"),
|
| "passed text that could not have been tokenized as a float");
|
| -#endif // PROTOBUF_HAS_DEATH_TEST
|
| +#endif // PROTOBUF_HASDEATH_TEST
|
| }
|
|
|
| TEST_F(TokenizerTest, ParseString) {
|
| @@ -837,10 +843,10 @@ TEST_F(TokenizerTest, ParseString) {
|
| EXPECT_EQ("u0", output);
|
|
|
| // Test invalid strings that will never be tokenized as strings.
|
| -#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
|
| +#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
|
| EXPECT_DEBUG_DEATH(Tokenizer::ParseString("", &output),
|
| "passed text that could not have been tokenized as a string");
|
| -#endif // PROTOBUF_HAS_DEATH_TEST
|
| +#endif // PROTOBUF_HASDEATH_TEST
|
| }
|
|
|
| TEST_F(TokenizerTest, ParseStringAppend) {
|
| @@ -877,7 +883,7 @@ ErrorCase kErrorCases[] = {
|
| { "'\\x' foo", true,
|
| "0:3: Expected hex digits for escape sequence.\n" },
|
| { "'foo", false,
|
| - "0:4: Unexpected end of string.\n" },
|
| + "0:4: String literals cannot cross line boundaries.\n" },
|
| { "'bar\nfoo", true,
|
| "0:4: String literals cannot cross line boundaries.\n" },
|
| { "'\\u01' foo", true,
|
| @@ -945,10 +951,6 @@ ErrorCase kErrorCases[] = {
|
| "0:0: Invalid control characters encountered in text.\n" },
|
| { string("\0\0foo", 5), true,
|
| "0:0: Invalid control characters encountered in text.\n" },
|
| -
|
| - // Check error from high order bits set
|
| - { "\300foo", true,
|
| - "0:0: Interpreting non ascii codepoint 192.\n" },
|
| };
|
|
|
| TEST_2D(TokenizerTest, Errors, kErrorCases, kBlockSizes) {
|
|
|