Skip to content
This repository was archived by the owner on Feb 25, 2025. It is now read-only.

Commit f7e6224

Browse files
author
Dart CI
committed
Version 2.18.0-35.0.dev
Merge commit 'fec466e3fe726060d411bc0dc2ed5ae5977a2ab1' into 'dev'
2 parents b040cc3 + fec466e commit f7e6224

File tree

23 files changed

+458
-145
lines changed

23 files changed

+458
-145
lines changed

DEPS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ vars = {
3939

4040
# Checked-in SDK version. The checked-in SDK is a Dart SDK distribution in a
4141
# cipd package used to run Dart scripts in the build and test infrastructure.
42-
"sdk_tag": "version:2.16.2",
42+
"sdk_tag": "version:2.17.0-266.1.beta",
4343

4444
# co19 is a cipd package. Use update.sh in tests/co19[_2] to update these
4545
# hashes. It requires access to the dart-build-access group, which EngProd

pkg/_fe_analyzer_shared/lib/src/scanner/abstract_scanner.dart

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,15 @@ import 'dart:collection' show ListMixin;
99
import 'dart:typed_data' show Uint16List, Uint32List;
1010

1111
import 'token.dart'
12-
show BeginToken, Keyword, KeywordToken, SyntheticToken, Token, TokenType;
12+
show
13+
BeginToken,
14+
CommentToken,
15+
Keyword,
16+
KeywordToken,
17+
LanguageVersionToken,
18+
SyntheticToken,
19+
Token,
20+
TokenType;
1321

1422
import 'token.dart' as analyzer show StringToken;
1523

@@ -34,8 +42,7 @@ import 'error_token.dart'
3442

3543
import 'keyword_state.dart' show KeywordState;
3644

37-
import 'token_impl.dart'
38-
show CommentToken, DartDocToken, LanguageVersionToken, StringToken;
45+
import 'token_impl.dart' show DartDocToken, StringTokenImpl;
3946

4047
import 'token_constants.dart';
4148

@@ -570,11 +577,9 @@ abstract class AbstractScanner implements Scanner {
570577
{
571578
AbstractScanner option1 = createRecoveryOptionScanner();
572579
option1.insertSyntheticClosers(originalStack, groupingStack);
573-
option1Recoveries =
574-
option1.recoveryOptionTokenizer(option1.appendEndGroupInternal(
575-
/* foundMatchingBrace = */ true,
576-
type,
577-
openKind));
580+
option1Recoveries = option1.recoveryOptionTokenizer(
581+
option1.appendEndGroupInternal(
582+
/* foundMatchingBrace = */ true, type, openKind));
578583
option1Recoveries += option1.groupingStack.slowLength();
579584
}
580585

@@ -583,11 +588,9 @@ abstract class AbstractScanner implements Scanner {
583588
{
584589
AbstractScanner option2 = createRecoveryOptionScanner();
585590
option2.groupingStack = originalStack;
586-
option2Recoveries =
587-
option2.recoveryOptionTokenizer(option2.appendEndGroupInternal(
588-
/* foundMatchingBrace = */ false,
589-
type,
590-
openKind));
591+
option2Recoveries = option2.recoveryOptionTokenizer(
592+
option2.appendEndGroupInternal(
593+
/* foundMatchingBrace = */ false, type, openKind));
591594
// We add 1 to make this option pay for ignoring this token.
592595
option2Recoveries += option2.groupingStack.slowLength() + 1;
593596
}
@@ -1914,7 +1917,7 @@ abstract class AbstractScanner implements Scanner {
19141917
codeUnits.add(next);
19151918
next = advance();
19161919
}
1917-
appendToken(new StringToken.fromString(
1920+
appendToken(new StringTokenImpl.fromString(
19181921
TokenType.IDENTIFIER, new String.fromCharCodes(codeUnits), charOffset,
19191922
precedingComments: comments));
19201923
return next;

pkg/_fe_analyzer_shared/lib/src/scanner/recover.dart

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ library _fe_analyzer_shared.scanner.recover;
66

77
import 'token.dart' show Token, TokenType;
88

9-
import 'token_impl.dart' show StringToken;
9+
import 'token_impl.dart' show StringTokenImpl;
1010

1111
import 'error_token.dart' show ErrorToken;
1212

@@ -49,7 +49,7 @@ Token scannerRecovery(List<int> bytes, Token tokens, List<int> lineStarts) {
4949
}
5050

5151
Token synthesizeToken(int charOffset, String value, TokenType type) {
52-
return new StringToken.fromString(type, value, charOffset);
52+
return new StringTokenImpl.fromString(type, value, charOffset);
5353
}
5454

5555
Token skipToEof(Token token) {

pkg/_fe_analyzer_shared/lib/src/scanner/scanner.dart

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,7 @@ export 'abstract_scanner.dart'
2222

2323
export 'token_impl.dart'
2424
show
25-
LanguageVersionToken,
26-
StringToken,
25+
StringTokenImpl,
2726
isBinaryOperator,
2827
isMinusOperator,
2928
isTernaryOperator,
@@ -32,7 +31,7 @@ export 'token_impl.dart'
3231

3332
export 'error_token.dart' show ErrorToken, buildUnexpectedCharacterToken;
3433

35-
export 'token_impl.dart' show LanguageVersionToken;
34+
export 'token.dart' show LanguageVersionToken;
3635

3736
export 'token_constants.dart' show EOF_TOKEN;
3837

pkg/_fe_analyzer_shared/lib/src/scanner/string_scanner.dart

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,25 @@
44

55
library dart2js.scanner.string_scanner;
66

7-
import 'token.dart' show Token, SyntheticStringToken, TokenType;
7+
import 'token.dart'
8+
show
9+
CommentToken,
10+
LanguageVersionToken,
11+
SyntheticStringToken,
12+
Token,
13+
TokenType;
814

915
import 'token.dart' as analyzer show StringToken;
1016

1117
import 'abstract_scanner.dart'
1218
show AbstractScanner, LanguageVersionChanged, ScannerConfiguration;
1319

1420
import 'token_impl.dart'
15-
show CommentToken, DartDocToken, LanguageVersionToken, StringToken;
21+
show
22+
CommentTokenImpl,
23+
DartDocToken,
24+
LanguageVersionTokenImpl,
25+
StringTokenImpl;
1626

1727
import 'error_token.dart' show ErrorToken;
1828

@@ -69,7 +79,7 @@ class StringScanner extends AbstractScanner {
6979
analyzer.StringToken createSubstringToken(
7080
TokenType type, int start, bool asciiOnly,
7181
[int extraOffset = 0]) {
72-
return new StringToken.fromSubstring(
82+
return new StringTokenImpl.fromSubstring(
7383
type, string, start, scanOffset + extraOffset, tokenStart,
7484
canonicalize: true, precedingComments: comments);
7585
}
@@ -85,7 +95,7 @@ class StringScanner extends AbstractScanner {
8595
@override
8696
CommentToken createCommentToken(TokenType type, int start, bool asciiOnly,
8797
[int extraOffset = 0]) {
88-
return new CommentToken.fromSubstring(
98+
return new CommentTokenImpl.fromSubstring(
8999
type, string, start, scanOffset + extraOffset, tokenStart,
90100
canonicalize: true);
91101
}
@@ -101,7 +111,7 @@ class StringScanner extends AbstractScanner {
101111
@override
102112
LanguageVersionToken createLanguageVersionToken(
103113
int start, int major, int minor) {
104-
return new LanguageVersionToken.fromSubstring(
114+
return new LanguageVersionTokenImpl.fromSubstring(
105115
string, start, scanOffset, tokenStart, major, minor,
106116
canonicalize: true);
107117
}

pkg/_fe_analyzer_shared/lib/src/scanner/token_impl.dart

Lines changed: 29 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,14 @@
44

55
library _fe_analyzer_shared.scanner.token;
66

7-
import 'token.dart' as analyzer;
8-
import 'token.dart' show TokenType;
7+
import 'token.dart'
8+
show
9+
DocumentationCommentToken,
10+
SimpleToken,
11+
TokenType,
12+
CommentToken,
13+
StringToken,
14+
LanguageVersionToken;
915

1016
import 'token_constants.dart' show IDENTIFIER_TOKEN;
1117

@@ -16,7 +22,7 @@ import 'string_canonicalizer.dart';
1622
* number literals, comments, and error tokens, using the corresponding
1723
* precedence info.
1824
*/
19-
class StringToken extends analyzer.SimpleToken implements analyzer.StringToken {
25+
class StringTokenImpl extends SimpleToken implements StringToken {
2026
/**
2127
* The length threshold above which substring tokens are computed lazily.
2228
*
@@ -33,8 +39,8 @@ class StringToken extends analyzer.SimpleToken implements analyzer.StringToken {
3339
* Creates a non-lazy string token. If [canonicalize] is true, the string
3440
* is canonicalized before the token is created.
3541
*/
36-
StringToken.fromString(TokenType type, String value, int charOffset,
37-
{bool canonicalize: false, analyzer.CommentToken? precedingComments})
42+
StringTokenImpl.fromString(TokenType type, String value, int charOffset,
43+
{bool canonicalize: false, CommentToken? precedingComments})
3844
: valueOrLazySubstring = canonicalizedString(
3945
value, /* start = */ 0, value.length, canonicalize),
4046
super(type, charOffset, precedingComments);
@@ -43,9 +49,9 @@ class StringToken extends analyzer.SimpleToken implements analyzer.StringToken {
4349
* Creates a lazy string token. If [canonicalize] is true, the string
4450
* is canonicalized before the token is created.
4551
*/
46-
StringToken.fromSubstring(
52+
StringTokenImpl.fromSubstring(
4753
TokenType type, String data, int start, int end, int charOffset,
48-
{bool canonicalize: false, analyzer.CommentToken? precedingComments})
54+
{bool canonicalize: false, CommentToken? precedingComments})
4955
: super(type, charOffset, precedingComments) {
5056
int length = end - start;
5157
if (length <= LAZY_THRESHOLD) {
@@ -61,9 +67,9 @@ class StringToken extends analyzer.SimpleToken implements analyzer.StringToken {
6167
* Creates a lazy string token. If [asciiOnly] is false, the byte array
6268
* is passed through a UTF-8 decoder.
6369
*/
64-
StringToken.fromUtf8Bytes(TokenType type, List<int> data, int start, int end,
65-
bool asciiOnly, int charOffset,
66-
{analyzer.CommentToken? precedingComments})
70+
StringTokenImpl.fromUtf8Bytes(TokenType type, List<int> data, int start,
71+
int end, bool asciiOnly, int charOffset,
72+
{CommentToken? precedingComments})
6773
: super(type, charOffset, precedingComments) {
6874
int length = end - start;
6975
if (length <= LAZY_THRESHOLD) {
@@ -73,10 +79,6 @@ class StringToken extends analyzer.SimpleToken implements analyzer.StringToken {
7379
}
7480
}
7581

76-
StringToken._(TokenType type, this.valueOrLazySubstring, int charOffset,
77-
[analyzer.CommentToken? precedingComments])
78-
: super(type, charOffset, precedingComments);
79-
8082
@override
8183
String get lexeme {
8284
if (valueOrLazySubstring is String) {
@@ -119,28 +121,15 @@ class StringToken extends analyzer.SimpleToken implements analyzer.StringToken {
119121
String value() => lexeme;
120122
}
121123

122-
/**
123-
* A String-valued token that does not exist in the original source.
124-
*/
125-
class SyntheticStringToken extends StringToken
126-
implements analyzer.SyntheticStringToken {
127-
SyntheticStringToken(TokenType type, String value, int offset,
128-
[analyzer.CommentToken? precedingComments])
129-
: super._(type, value, offset, precedingComments);
130-
131-
@override
132-
int get length => 0;
133-
}
134-
135-
class CommentToken extends StringToken implements analyzer.CommentToken {
124+
class CommentTokenImpl extends StringTokenImpl implements CommentToken {
136125
@override
137-
analyzer.SimpleToken? parent;
126+
SimpleToken? parent;
138127

139128
/**
140129
* Creates a lazy comment token. If [canonicalize] is true, the string
141130
* is canonicalized before the token is created.
142131
*/
143-
CommentToken.fromSubstring(
132+
CommentTokenImpl.fromSubstring(
144133
TokenType type, String data, int start, int end, int charOffset,
145134
{bool canonicalize: false})
146135
: super.fromSubstring(type, data, start, end, charOffset,
@@ -149,44 +138,44 @@ class CommentToken extends StringToken implements analyzer.CommentToken {
149138
/**
150139
* Creates a non-lazy comment token.
151140
*/
152-
CommentToken.fromString(TokenType type, String lexeme, int charOffset)
141+
CommentTokenImpl.fromString(TokenType type, String lexeme, int charOffset)
153142
: super.fromString(type, lexeme, charOffset);
154143

155144
/**
156145
* Creates a lazy string token. If [asciiOnly] is false, the byte array
157146
* is passed through a UTF-8 decoder.
158147
*/
159-
CommentToken.fromUtf8Bytes(TokenType type, List<int> data, int start, int end,
160-
bool asciiOnly, int charOffset)
148+
CommentTokenImpl.fromUtf8Bytes(TokenType type, List<int> data, int start,
149+
int end, bool asciiOnly, int charOffset)
161150
: super.fromUtf8Bytes(type, data, start, end, asciiOnly, charOffset);
162151
}
163152

164-
class LanguageVersionToken extends CommentToken
165-
implements analyzer.LanguageVersionToken {
153+
class LanguageVersionTokenImpl extends CommentTokenImpl
154+
implements LanguageVersionToken {
166155
@override
167156
int major;
168157

169158
@override
170159
int minor;
171160

172-
LanguageVersionToken.from(String text, int offset, this.major, this.minor)
161+
LanguageVersionTokenImpl.from(String text, int offset, this.major, this.minor)
173162
: super.fromString(TokenType.SINGLE_LINE_COMMENT, text, offset);
174163

175-
LanguageVersionToken.fromSubstring(
164+
LanguageVersionTokenImpl.fromSubstring(
176165
String string, int start, int end, int tokenStart, this.major, this.minor,
177166
{bool canonicalize: false})
178167
: super.fromSubstring(
179168
TokenType.SINGLE_LINE_COMMENT, string, start, end, tokenStart,
180169
canonicalize: canonicalize);
181170

182-
LanguageVersionToken.fromUtf8Bytes(List<int> bytes, int start, int end,
171+
LanguageVersionTokenImpl.fromUtf8Bytes(List<int> bytes, int start, int end,
183172
int tokenStart, this.major, this.minor)
184173
: super.fromUtf8Bytes(
185174
TokenType.SINGLE_LINE_COMMENT, bytes, start, end, true, tokenStart);
186175
}
187176

188-
class DartDocToken extends CommentToken
189-
implements analyzer.DocumentationCommentToken {
177+
class DartDocToken extends CommentTokenImpl
178+
implements DocumentationCommentToken {
190179
/**
191180
* Creates a lazy comment token. If [canonicalize] is true, the string
192181
* is canonicalized before the token is created.

pkg/_fe_analyzer_shared/lib/src/scanner/utf8_bytes_scanner.dart

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,21 @@ library _fe_analyzer_shared.scanner.utf8_bytes_scanner;
66

77
import 'dart:convert' show unicodeBomCharacterRune, utf8;
88

9-
import 'token.dart' show SyntheticStringToken, TokenType;
9+
import 'token.dart' show LanguageVersionToken, SyntheticStringToken, TokenType;
1010

11-
import 'token.dart' as analyzer show StringToken;
11+
import 'token.dart' as analyzer;
1212

1313
import 'scanner.dart' show unicodeReplacementCharacter;
1414

1515
import 'abstract_scanner.dart'
1616
show AbstractScanner, LanguageVersionChanged, ScannerConfiguration;
1717

1818
import 'token_impl.dart'
19-
show CommentToken, DartDocToken, LanguageVersionToken, StringToken;
19+
show
20+
CommentTokenImpl,
21+
DartDocToken,
22+
LanguageVersionTokenImpl,
23+
StringTokenImpl;
2024

2125
/**
2226
* Scanner that reads from a UTF-8 encoded list of bytes and creates tokens
@@ -227,23 +231,25 @@ class Utf8BytesScanner extends AbstractScanner {
227231
analyzer.StringToken createSubstringToken(
228232
TokenType type, int start, bool asciiOnly,
229233
[int extraOffset = 0]) {
230-
return new StringToken.fromUtf8Bytes(
234+
return new StringTokenImpl.fromUtf8Bytes(
231235
type, bytes, start, byteOffset + extraOffset, asciiOnly, tokenStart,
232236
precedingComments: comments);
233237
}
234238

235239
@override
236240
analyzer.StringToken createSyntheticSubstringToken(
237241
TokenType type, int start, bool asciiOnly, String syntheticChars) {
238-
String source = StringToken.decodeUtf8(bytes, start, byteOffset, asciiOnly);
242+
String source =
243+
StringTokenImpl.decodeUtf8(bytes, start, byteOffset, asciiOnly);
239244
return new SyntheticStringToken(
240245
type, source + syntheticChars, tokenStart, source.length);
241246
}
242247

243248
@override
244-
CommentToken createCommentToken(TokenType type, int start, bool asciiOnly,
249+
analyzer.CommentToken createCommentToken(
250+
TokenType type, int start, bool asciiOnly,
245251
[int extraOffset = 0]) {
246-
return new CommentToken.fromUtf8Bytes(
252+
return new CommentTokenImpl.fromUtf8Bytes(
247253
type, bytes, start, byteOffset + extraOffset, asciiOnly, tokenStart);
248254
}
249255

@@ -257,7 +263,7 @@ class Utf8BytesScanner extends AbstractScanner {
257263
@override
258264
LanguageVersionToken createLanguageVersionToken(
259265
int start, int major, int minor) {
260-
return new LanguageVersionToken.fromUtf8Bytes(
266+
return new LanguageVersionTokenImpl.fromUtf8Bytes(
261267
bytes, start, byteOffset, tokenStart, major, minor);
262268
}
263269

0 commit comments

Comments
 (0)