Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(184)

Unified Diff: third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc

Issue 1842653006: Update //third_party/protobuf to version 3. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: merge Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc
diff --git a/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc b/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc
index dbb5be4f601a77f2aa2ad7d52eebf71fd83d8ebf..20d50a2c2ff0deb0515f6942514514aa46ba1105 100644
--- a/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc
+++ b/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc
@@ -1,6 +1,6 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
-// http://code.google.com/p/protobuf/
+// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -41,6 +41,7 @@
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/logging.h>
#include <google/protobuf/stubs/strutil.h>
#include <google/protobuf/stubs/substitute.h>
#include <google/protobuf/testing/googletest.h>
@@ -411,12 +412,6 @@ MultiTokenCase kMultiTokenCases[] = {
{ Tokenizer::TYPE_END , "" , 1, 3, 3 },
}},
- // Bytes with the high-order bit set should not be seen as control characters.
- { "\300", {
- { Tokenizer::TYPE_SYMBOL, "\300", 0, 0, 1 },
- { Tokenizer::TYPE_END , "" , 0, 1, 1 },
- }},
-
// Test all whitespace chars
{ "foo\n\t\r\v\fbar", {
{ Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3 },
@@ -741,7 +736,7 @@ TEST_F(TokenizerTest, ParseInteger) {
EXPECT_EQ(0, ParseInteger("0x"));
uint64 i;
-#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
+#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
// Test invalid integers that will never be tokenized as integers.
EXPECT_DEBUG_DEATH(Tokenizer::ParseInteger("zxy", kuint64max, &i),
"passed text that could not have been tokenized as an integer");
@@ -753,7 +748,7 @@ TEST_F(TokenizerTest, ParseInteger) {
"passed text that could not have been tokenized as an integer");
EXPECT_DEBUG_DEATH(Tokenizer::ParseInteger("-1", kuint64max, &i),
"passed text that could not have been tokenized as an integer");
-#endif // PROTOBUF_HASDEATH_TEST
+#endif // PROTOBUF_HAS_DEATH_TEST
// Test overflows.
EXPECT_TRUE (Tokenizer::ParseInteger("0", 0, &i));
@@ -796,7 +791,7 @@ TEST_F(TokenizerTest, ParseFloat) {
EXPECT_EQ( 0.0, Tokenizer::ParseFloat("1e-9999999999999999999999999999"));
EXPECT_EQ(HUGE_VAL, Tokenizer::ParseFloat("1e+9999999999999999999999999999"));
-#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
+#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
// Test invalid integers that will never be tokenized as integers.
EXPECT_DEBUG_DEATH(Tokenizer::ParseFloat("zxy"),
"passed text that could not have been tokenized as a float");
@@ -804,7 +799,7 @@ TEST_F(TokenizerTest, ParseFloat) {
"passed text that could not have been tokenized as a float");
EXPECT_DEBUG_DEATH(Tokenizer::ParseFloat("-1.0"),
"passed text that could not have been tokenized as a float");
-#endif // PROTOBUF_HASDEATH_TEST
+#endif // PROTOBUF_HAS_DEATH_TEST
}
TEST_F(TokenizerTest, ParseString) {
@@ -843,10 +838,10 @@ TEST_F(TokenizerTest, ParseString) {
EXPECT_EQ("u0", output);
// Test invalid strings that will never be tokenized as strings.
-#ifdef PROTOBUF_HASDEATH_TEST // death tests do not work on Windows yet
+#ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
EXPECT_DEBUG_DEATH(Tokenizer::ParseString("", &output),
"passed text that could not have been tokenized as a string");
-#endif // PROTOBUF_HASDEATH_TEST
+#endif // PROTOBUF_HAS_DEATH_TEST
}
TEST_F(TokenizerTest, ParseStringAppend) {
@@ -880,10 +875,12 @@ ErrorCase kErrorCases[] = {
// String errors.
{ "'\\l' foo", true,
"0:2: Invalid escape sequence in string literal.\n" },
+ { "'\\X' foo", true,
+ "0:2: Invalid escape sequence in string literal.\n" },
{ "'\\x' foo", true,
"0:3: Expected hex digits for escape sequence.\n" },
{ "'foo", false,
- "0:4: String literals cannot cross line boundaries.\n" },
+ "0:4: Unexpected end of string.\n" },
{ "'bar\nfoo", true,
"0:4: String literals cannot cross line boundaries.\n" },
{ "'\\u01' foo", true,
@@ -951,6 +948,10 @@ ErrorCase kErrorCases[] = {
"0:0: Invalid control characters encountered in text.\n" },
{ string("\0\0foo", 5), true,
"0:0: Invalid control characters encountered in text.\n" },
+
+ // Check error from high order bits set
+ { "\300foo", true,
+ "0:0: Interpreting non ascii codepoint 192.\n" },
};
TEST_2D(TokenizerTest, Errors, kErrorCases, kBlockSizes) {

Powered by Google App Engine
This is Rietveld 408576698