Index: tools/gn/tokenizer.cc |
diff --git a/tools/gn/tokenizer.cc b/tools/gn/tokenizer.cc |
index ea7c55ab8b95ea10892bcb39d672ea7e5c844961..318a310a4db4cd37e05c34c93c9dfe8822881408 100644 |
--- a/tools/gn/tokenizer.cc |
+++ b/tools/gn/tokenizer.cc |
@@ -128,10 +128,17 @@ std::vector<Token> Tokenizer::Run() { |
std::string trimmed; |
// TODO(scottmg): Should write TrimWhitespace for StringPiece. |
base::TrimWhitespace(to_newline.as_string(), base::TRIM_ALL, &trimmed); |
- if (trimmed.empty()) |
+ if (trimmed.empty() && |
+ // If it's a standalone comment, but is a continuation of a comment |
+ // on a previous line, then instead make it a continued suffix |
+ // comment. |
+ (tokens_.empty() || tokens_.back().type() != Token::SUFFIX_COMMENT || |
+ tokens_.back().location().line_number() + 1 != |
+ location.line_number())) { |
type = Token::LINE_COMMENT; |
- else |
+ } else { |
type = Token::SUFFIX_COMMENT; |
+ } |
} |
tokens_.push_back(Token(location, type, token_value)); |