Chromium Code Reviews| Index: mojom/mojom_parser/lexer/lexer_test.go |
| diff --git a/mojom/mojom_parser/lexer/lexer_test.go b/mojom/mojom_parser/lexer/lexer_test.go |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..52ad247bd6bf6dab92ddd8f4a1a87acfae1faea0 |
| --- /dev/null |
| +++ b/mojom/mojom_parser/lexer/lexer_test.go |
| @@ -0,0 +1,190 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +package lexer |
| + |
| +import "testing" |
| + |
| +func checkEq(t *testing.T, expected, actual interface{}) { |
| + if expected != actual { |
| + t.Fatalf("Failed check: Expected (%v), Actual (%v)", expected, actual) |
| + } |
| +} |
| + |
|
rudominer
2015/10/12 18:06:06
Can you add a test where
- the input string is emp
azani
2015/10/13 00:23:46
Done.
|
| +// pumpTokens pumps all the tokens from a channel into a slice. |
| +func pumpTokens(tokensChan chan Token) []Token { |
| + tokens := []Token{} |
| + for token := range tokensChan { |
| + tokens = append(tokens, token) |
| + } |
| + return tokens |
| +} |
| + |
| +// TestAllSingleTokens tests for each token that a valid string is accepted as |
| +// the correct token. |
| +func TestAllSingleTokens(t *testing.T) { |
| + testData := []struct { |
| + source string |
| + token TokenKind |
| + }{ |
| + {"(", LPAREN}, |
| + {")", RPAREN}, |
| + {"[", LBRACKET}, |
| + {"]", RBRACKET}, |
| + {"{", LBRACE}, |
| + {"}", RBRACE}, |
| + {"<", LANGLE}, |
| + {">", RANGLE}, |
| + {";", SEMI}, |
| + {",", COMMA}, |
| + {".", DOT}, |
| + {"-", MINUS}, |
| + {"+", PLUS}, |
| + {"&", AMP}, |
| + {"?", QSTN}, |
| + {"=", EQUALS}, |
| + {"=>", RESPONSE}, |
| + {"somet_hi3ng", NAME}, |
| + {"import", IMPORT}, |
| + {"module", MODULE}, |
| + {"struct", STRUCT}, |
| + {"union", UNION}, |
| + {"interface", INTERFACE}, |
| + {"enum", ENUM}, |
| + {"const", CONST}, |
| + {"true", TRUE}, |
| + {"false", FALSE}, |
| + {"default", DEFAULT}, |
| + {"@10", ORDINAL}, |
| + {"10", INT_CONST_DEC}, |
| + {"0", INT_CONST_DEC}, |
| + {"0xA10", INT_CONST_HEX}, |
| + {"0xa10", INT_CONST_HEX}, |
| + {"0XA10", INT_CONST_HEX}, |
| + {"0Xa10", INT_CONST_HEX}, |
| + {"10.5", FLOAT_CONST}, |
| + {"10e5", FLOAT_CONST}, |
| + {"0.5", FLOAT_CONST}, |
| + {"0e5", FLOAT_CONST}, |
| + {"10e+5", FLOAT_CONST}, |
| + {"10e-5", FLOAT_CONST}, |
| + {"\"hello world\"", STRING_LITERAL}, |
| + {"\"hello \\\"real\\\" world\"", STRING_LITERAL}, |
| + } |
| + |
| + for i := range testData { |
| + l := lexer{source: testData[i].source, tokens: make(chan Token)} |
| + go l.run() |
| + tokens := pumpTokens(l.tokens) |
| + |
| + if len(tokens) != 1 { |
| + t.Fatalf("Source('%v'): Expected 1 token but got %v instead: %v", |
| + testData[i].source, len(tokens), tokens) |
| + } |
| + |
| + checkEq(t, testData[i].source, tokens[0].Text) |
| + checkEq(t, testData[i].token, tokens[0].Kind) |
| + } |
| +} |
| + |
| +// TestTokenPosition tests that the position in the source string, the line |
| +// number and the position in the line of the lexed token are correctly found. |
| +func TestTokenPosition(t *testing.T) { |
| + source := " \n ." |
| + l := lexer{source: source, tokens: make(chan Token)} |
| + go l.run() |
| + tokens := pumpTokens(l.tokens) |
| + token := tokens[0] |
| + |
| + checkEq(t, 5, token.CharPos) |
| + checkEq(t, 1, token.LineNo) |
| + checkEq(t, 2, token.LinePos) |
| +} |
| + |
| +// TestSkipSkippable tests that all skippable characters are skipped. |
| +func TestSkipSkippable(t *testing.T) { |
| + source := " \t \r \n ." |
| + l := lexer{source: source, tokens: make(chan Token)} |
| + go l.run() |
| + tokens := pumpTokens(l.tokens) |
| + |
| + checkEq(t, DOT, tokens[0].Kind) |
| +} |
| + |
| +// TestTokenize tests that a single token embedded in a larger string is |
| +// correctly lexed. |
| +func TestTokenize(t *testing.T) { |
| + ts := Tokenize(" \t . ") |
| + token := ts.PeekNext() |
| + checkEq(t, DOT, token.Kind) |
| + |
| + ts.ConsumeNext() |
| + token = ts.PeekNext() |
| + checkEq(t, EOF, token.Kind) |
| +} |
| + |
| +// TestTokenizeMoreThanOne tests that more than one token is correctly lexed. |
| +func TestTokenizeMoreThanOne(t *testing.T) { |
| + ts := Tokenize("()") |
| + checkEq(t, LPAREN, ts.PeekNext().Kind) |
| + ts.ConsumeNext() |
| + checkEq(t, RPAREN, ts.PeekNext().Kind) |
| + ts.ConsumeNext() |
| + checkEq(t, EOF, ts.PeekNext().Kind) |
| +} |
| + |
| +// TestIllegalChar tests that an illegal character is correctly spotted. |
| +func TestIllegalChar(t *testing.T) { |
| + ts := Tokenize(" \t $ ") |
| + checkEq(t, ERROR_ILLEGAL_CHAR, ts.PeekNext().Kind) |
| +} |
| + |
| +// TestUnterminatedStringLiteralEos tests that the correct error is emitted if |
| +// a quoted string is never closed. |
| +func TestUnterminatedStringLiteralEos(t *testing.T) { |
| + ts := Tokenize("\"hello world") |
| + checkEq(t, ERROR_UNTERMINATED_STRING_LITERAL, ts.PeekNext().Kind) |
| +} |
| + |
| +// TestUnterminatedStringLiteralEol tests that the correct error is emitted if |
| +// a quoted string is closed on a subsequent line. |
| +func TestUnterminatedStringLiteralEol(t *testing.T) { |
| + ts := Tokenize("\"hello\n world\"") |
| + checkEq(t, ERROR_UNTERMINATED_STRING_LITERAL, ts.PeekNext().Kind) |
| +} |
| + |
| +// TestSingleLineComment tests that single line comments are correctly skipped. |
| +func TestSingleLineComment(t *testing.T) { |
| + ts := Tokenize("( // some stuff\n)") |
| + checkEq(t, LPAREN, ts.PeekNext().Kind) |
| + ts.ConsumeNext() |
| + checkEq(t, RPAREN, ts.PeekNext().Kind) |
| +} |
| + |
| +// TestMultiLineComment tests that multi line comments are correctly skipped. |
| +func TestMultiLineComment(t *testing.T) { |
| + ts := Tokenize("( /* hello world/ * *\n */)") |
| + checkEq(t, LPAREN, ts.PeekNext().Kind) |
| + ts.ConsumeNext() |
| + checkEq(t, RPAREN, ts.PeekNext().Kind) |
| +} |
| + |
| +// TestUnterminatedMultiLineComment tests that unterminated multiline comments |
| +// emit the correct error. |
| +func TestUnterminatedMultiLineComment(t *testing.T) { |
| + ts := Tokenize("( /* hello world/ * *\n )") |
| + checkEq(t, LPAREN, ts.PeekNext().Kind) |
| + ts.ConsumeNext() |
| + checkEq(t, ERROR_UNTERMINATED_COMMENT, ts.PeekNext().Kind) |
| +} |
| + |
| +// TestUnterminatedMultiLineCommentAtStar tests that if the string ends at a * |
| +// (which could be the beginning of the close of a multiline comment) the right |
| +// error is emitted. |
| +func TestUnterminatedMultiLineCommentAtStar(t *testing.T) { |
| + ts := Tokenize("( /* hello world/ *") |
| + checkEq(t, LPAREN, ts.PeekNext().Kind) |
| + ts.ConsumeNext() |
| + checkEq(t, ERROR_UNTERMINATED_COMMENT, ts.PeekNext().Kind) |
| +} |