Index: pkg/compiler/lib/src/parser/parser.dart |
diff --git a/pkg/compiler/lib/src/parser/parser.dart b/pkg/compiler/lib/src/parser/parser.dart |
index f2e0577937e91946b398e1e8040ae978ff95a8ae..13e0be68debf521286df1145a7c7f2f7ae315a76 100644 |
--- a/pkg/compiler/lib/src/parser/parser.dart |
+++ b/pkg/compiler/lib/src/parser/parser.dart |
@@ -39,6 +39,8 @@ import '../tokens/token_constants.dart' show |
EOF_TOKEN, |
EQ_TOKEN, |
FUNCTION_TOKEN, |
+ GT_TOKEN, |
+ GT_GT_TOKEN, |
HASH_TOKEN, |
HEXADECIMAL_TOKEN, |
IDENTIFIER_TOKEN, |
@@ -99,11 +101,13 @@ class Parser { |
final Listener listener; |
bool mayParseFunctionExpressions = true; |
final bool enableConditionalDirectives; |
+ final bool enableGenericMethods; |
bool asyncAwaitKeywordsEnabled; |
Parser(this.listener, |
{this.enableConditionalDirectives: false, |
- this.asyncAwaitKeywordsEnabled: false}); |
+ this.asyncAwaitKeywordsEnabled: false, |
+ this.enableGenericMethods: false}); |
Token parseUnit(Token token) { |
listener.beginCompilationUnit(token); |
@@ -524,6 +528,66 @@ class Parser { |
return false; |
} |
+ bool isValidTypeArguments(Token token, {bool requireParenthesis: false}) { |
+ // To avoid changing the token stream during an investigation of whether |
+ // the parser is looking at a `typeArguments` construct, we do not replace |
+ // one `>>` token by two `>` tokens (which would destroy a correct |
+ // expression like `foo(a < 0, b >> 2)`). Instead, we count levels for the |
+ // '<'/'>' brackets directly. This makes sense because the type argument |
+ // sublanguage is so tiny: |
ahe
2016/02/23 08:05:27
The scanner should already have computed this for
eernst
2016/02/23 12:40:45
Ah, yes, I hadn't looked at that.
|
+ // |
+ // typeArguments ::= '<' typeList '>' |
+ // typeList ::= type (',' type)* |
+ // type ::= typeName typeArguments? |
+ // typeName ::= qualified |
+ // qualified ::= identifier ('.' identifier)? |
+ // |
+ // The point is that we check for parenthesis correctness for '<'/'>' plus |
+ // membership of the intersection between the following regular languages: |
+ // |
+ // anything starting with '<' and ending with '>' |
+ // (identifier | (identifier '.' identifier) | ',' | '<' | '>' | '>>')* |
+ // |
+ // Obviously, every correct `typeArguments` will pass this test. However, |
+ // some incorrect ones will also pass, e.g., `<K,<<><>>,>`. This should be |
+ // safe, but the real solution is of course the following: |
+ // |
+ // TODO(eernst): Prove that if a token sequence is accepted by this test, |
+ // but it is not a correct `typeArguments` then it is a syntax error rather |
+ // than a correct continuation of the program that does not contain a |
+ // `typeArguments` construct at this point. |
+ // If that property does not hold then we will reject some correct programs. |
+ if (optional('<', token)) { |
+ int angleBracketLevel = 1; |
+ token = token.next; |
+ while (!identical(token.kind, EOF_TOKEN) && angleBracketLevel > 0) { |
+ // Invariant: `angleBracketLevel` is #'<' - #'>' from initial token |
ahe
2016/02/23 08:05:27
What does #'<' mean?
eernst
2016/02/23 12:40:45
The number of '<' tokens. Added an explanation to
|
+ // to `token`, both included, and considering ['>>'] as ['>', '>']. |
ahe
2016/02/23 08:05:27
I don't think ['>>'] is a valid reference.
eernst
2016/02/23 12:40:45
Ah, of course not. It is intended to be the sequen
|
+ final int kind = token.kind; |
+ switch (kind) { |
+ case IDENTIFIER_TOKEN: |
+ if (optional('.', token.next)) { |
+ token = token.next.next; |
+ if (token.kind != IDENTIFIER_TOKEN) return false; |
+ } |
+ break; |
+ case COMMA_TOKEN: break; |
floitsch
2016/02/22 12:45:20
couldn't you treat the "comma" the same way as the
eernst
2016/02/22 18:02:51
Not immediately: The comma separates complex const
|
+ case LT_TOKEN: angleBracketLevel++; break; |
+ case GT_TOKEN: angleBracketLevel--; break; |
+ case GT_GT_TOKEN: angleBracketLevel -= 2; break; |
+ default: return false; |
+ } |
+ token = token.next; |
+ } |
+ if (angleBracketLevel != 0) return false; |
+ return requireParenthesis |
+ ? token.kind == OPEN_PAREN_TOKEN |
+ : true; |
+ } else { |
+ return false; |
+ } |
+ } |
+ |
Token parseQualified(Token token) { |
token = parseIdentifier(token); |
while (optional('.', token)) { |
@@ -540,6 +604,14 @@ class Parser { |
} |
} |
+ Token parseAndIgnoreQualifiedRestOpt(Token token) { |
+ if (optional('.', token)) { |
+ return parseAndIgnoreQualifiedRest(token); |
+ } else { |
+ return token; |
+ } |
+ } |
+ |
Token parseQualifiedRest(Token token) { |
assert(optional('.', token)); |
Token period = token; |
@@ -548,6 +620,12 @@ class Parser { |
return token; |
} |
+ Token parseAndIgnoreQualifiedRest(Token token) { |
+ assert(optional('.', token)); |
+ token = parseAndIgnoreIdentifier(token.next); |
+ return token; |
+ } |
+ |
Token skipBlock(Token token) { |
if (!optional('{', token)) { |
return listener.expectedBlockToSkip(token); |
@@ -679,6 +757,13 @@ class Parser { |
return token.next; |
} |
+ Token parseAndIgnoreIdentifier(Token token) { |
+ if (!token.isIdentifier()) { |
+ token = listener.expectedIdentifier(token); |
+ } |
+ return token.next; |
+ } |
+ |
Token expect(String string, Token token) { |
if (!identical(string, token.stringValue)) { |
return listener.expected(string, token); |
@@ -698,6 +783,14 @@ class Parser { |
return token; |
} |
+ Token parseAndIgnoreTypeParameter(Token token) { |
+ token = parseAndIgnoreIdentifier(token); |
+ if (optional('extends', token)) { |
+ token = parseAndIgnoreType(token.next); |
+ } |
+ return token; |
+ } |
+ |
/** |
* Returns true if the stringValue of the [token] is [value]. |
*/ |
@@ -748,6 +841,17 @@ class Parser { |
return token; |
} |
+ Token parseAndIgnoreType(Token token) { |
+ if (isValidTypeReference(token)) { |
+ token = parseAndIgnoreIdentifier(token); |
+ token = parseAndIgnoreQualifiedRestOpt(token); |
+ } else { |
+ token = listener.expectedType(token); |
+ } |
+ token = parseAndIgnoreTypeArgumentsOpt(token); |
+ return token; |
+ } |
+ |
Token parseTypeArgumentsOpt(Token token) { |
return parseStuff(token, |
(t) => listener.beginTypeArguments(t), |
@@ -756,6 +860,19 @@ class Parser { |
(t) => listener.handleNoTypeArguments(t)); |
} |
+ Token parseAndIgnoreTypeArgumentsOpt(Token token, |
+ {bool requireParenthesis: false}) { |
+ if (isValidTypeArguments(token, requireParenthesis: requireParenthesis)) { |
+ return parseStuff(token, |
+ (t) {}, |
+ (t) => parseAndIgnoreType(t), |
+ (c, bt, et) {}, |
+ (t) {}); |
+ } else { |
+ return token; |
+ } |
+ } |
+ |
Token parseTypeVariablesOpt(Token token) { |
return parseStuff(token, |
(t) => listener.beginTypeVariables(t), |
@@ -764,6 +881,14 @@ class Parser { |
(t) => listener.handleNoTypeVariables(t)); |
} |
+ Token parseAndIgnoreTypeParametersOpt(Token token) { |
floitsch
2016/02/22 12:45:20
All these duplicated parseAndIgnore make me think
eernst
2016/02/22 18:02:51
Will probably have to do that in the next step any
|
+ return parseStuff(token, |
+ (t) {}, |
+ (t) => parseAndIgnoreTypeParameter(t), |
+ (c, bt, et) {}, |
+ (t) {}); |
+ } |
+ |
// TODO(ahe): Clean this up. |
Token parseStuff(Token token, Function beginStuff, Function stuffParser, |
Function endStuff, Function handleNoStuff) { |
@@ -780,10 +905,6 @@ class Parser { |
token = new SymbolToken(GT_INFO, token.charOffset); |
token.next = new SymbolToken(GT_INFO, token.charOffset + 1); |
token.next.next = next; |
- } else if (identical(token.stringValue, '>>>')) { |
- token = new SymbolToken(GT_INFO, token.charOffset); |
- token.next = new SymbolToken(GT_GT_INFO, token.charOffset + 1); |
- token.next.next = next; |
} |
endStuff(count, begin, token); |
return expect('>', token); |
@@ -1014,6 +1135,9 @@ class Parser { |
} |
Token token = parseIdentifier(name); |
+ if (enableGenericMethods && getOrSet == null) { |
+ token = parseAndIgnoreTypeParametersOpt(token); |
+ } |
token = parseFormalParametersOpt(token); |
bool previousAsyncAwaitKeywordsEnabled = asyncAwaitKeywordsEnabled; |
token = parseAsyncModifier(token); |
@@ -1322,7 +1446,8 @@ class Parser { |
// error handling. |
final String value = token.stringValue; |
if ((identical(value, '(')) || (identical(value, '.')) |
- || (identical(value, '{')) || (identical(value, '=>'))) { |
+ || (identical(value, '{')) || (identical(value, '=>')) |
+ || (enableGenericMethods && identical(value, '<'))) { |
isField = false; |
break; |
} else if (identical(value, ';')) { |
@@ -1423,6 +1548,9 @@ class Parser { |
} |
token = parseQualifiedRestOpt(token); |
+ if (enableGenericMethods && getOrSet == null) { |
+ token = parseAndIgnoreTypeParametersOpt(token); |
+ } |
token = parseFormalParametersOpt(token); |
token = parseInitializersOpt(token); |
bool previousAsyncAwaitKeywordsEnabled = asyncAwaitKeywordsEnabled; |
@@ -1509,6 +1637,9 @@ class Parser { |
} |
token = parseQualifiedRestOpt(token); |
listener.endFunctionName(token); |
+ if (enableGenericMethods && getOrSet == null) { |
+ token = parseAndIgnoreTypeParametersOpt(token); |
+ } |
token = parseFormalParametersOpt(token); |
token = parseInitializersOpt(token); |
bool previousAsyncAwaitKeywordsEnabled = asyncAwaitKeywordsEnabled; |
@@ -2338,6 +2469,9 @@ class Parser { |
Token parseSend(Token token) { |
listener.beginSend(token); |
token = parseIdentifier(token); |
+ if (enableGenericMethods) { |
+ token = parseAndIgnoreTypeArgumentsOpt(token, requireParenthesis: true); |
+ } |
token = parseArgumentsOpt(token); |
listener.endSend(token); |
return token; |