Skip to content

Commit

Permalink
refactor(parser): remove lexer.current
Browse files Browse the repository at this point in the history
  • Loading branch information
overlookmotel committed Feb 4, 2024
1 parent 9ddf439 commit f2f0c42
Show file tree
Hide file tree
Showing 10 changed files with 34 additions and 47 deletions.
6 changes: 3 additions & 3 deletions crates/oxc_parser/src/lexer/byte_handlers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,14 @@ ascii_byte_handler!(SPS(lexer) {
// <VT> <FF> Irregular Whitespace
ascii_byte_handler!(ISP(lexer) {
lexer.consume_char();
lexer.trivia_builder.add_irregular_whitespace(lexer.current.token.start, lexer.offset());
lexer.trivia_builder.add_irregular_whitespace(lexer.token.start, lexer.offset());
Kind::Skip
});

// '\r' '\n'
ascii_byte_handler!(LIN(lexer) {
lexer.consume_char();
lexer.current.token.is_on_new_line = true;
lexer.token.is_on_new_line = true;
Kind::Skip
});

Expand Down Expand Up @@ -144,7 +144,7 @@ ascii_byte_handler!(HAS(lexer) {
lexer.consume_char();
// HashbangComment ::
// `#!` SingleLineCommentChars?
if lexer.current.token.start == 0 && lexer.next_eq('!') {
if lexer.token.start == 0 && lexer.next_eq('!') {
lexer.read_hashbang_comment()
} else {
lexer.private_identifier()
Expand Down
10 changes: 5 additions & 5 deletions crates/oxc_parser/src/lexer/comment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ impl<'a> Lexer<'a> {
/// Section 12.4 Single Line Comment
#[allow(clippy::cast_possible_truncation)]
pub(super) fn skip_single_line_comment(&mut self) -> Kind {
let start = self.current.token.start;
let start = self.token.start;
while let Some(c) = self.next_char() {
if is_line_terminator(c) {
self.current.token.is_on_new_line = true;
self.token.is_on_new_line = true;
self.trivia_builder
.add_single_line_comment(start, self.offset() - c.len_utf8() as u32);
return Kind::Skip;
Expand All @@ -25,11 +25,11 @@ impl<'a> Lexer<'a> {
pub(super) fn skip_multi_line_comment(&mut self) -> Kind {
while let Some(c) = self.next_char() {
if c == '*' && self.next_eq('/') {
self.trivia_builder.add_multi_line_comment(self.current.token.start, self.offset());
self.trivia_builder.add_multi_line_comment(self.token.start, self.offset());
return Kind::Skip;
}
if is_line_terminator(c) {
self.current.token.is_on_new_line = true;
self.token.is_on_new_line = true;
}
}
self.error(diagnostics::UnterminatedMultiLineComment(self.unterminated_range()));
Expand All @@ -43,7 +43,7 @@ impl<'a> Lexer<'a> {
break;
}
}
self.current.token.is_on_new_line = true;
self.token.is_on_new_line = true;
Kind::HashbangComment
}
}
2 changes: 1 addition & 1 deletion crates/oxc_parser/src/lexer/jsx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ impl<'a> Lexer<'a> {
}

pub(crate) fn next_jsx_child(&mut self) -> Token {
self.current.token.start = self.offset();
self.token.start = self.offset();
let kind = self.read_jsx_child();
self.finish_next(kind)
}
Expand Down
35 changes: 14 additions & 21 deletions crates/oxc_parser/src/lexer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,6 @@ pub enum LexerContext {
JsxAttributeValue,
}

/// Wrapper around `Token`.
/// TODO: This serves no purpose and can be replaced with `Token`.
struct LexerCurrent {
token: Token,
}

#[derive(Debug, Clone, Copy)]
struct Lookahead<'a> {
position: SourcePosition<'a>,
Expand All @@ -83,7 +77,7 @@ pub struct Lexer<'a> {

source_type: SourceType,

current: LexerCurrent,
token: Token,

pub(crate) errors: Vec<Error>,

Expand All @@ -108,12 +102,11 @@ impl<'a> Lexer<'a> {

// The first token is at the start of file, so is allows on a new line
let token = Token::new_on_new_line();
let current = LexerCurrent { token };
Self {
allocator,
source,
source_type,
current,
token,
errors: vec![],
lookahead: VecDeque::with_capacity(4), // 4 is the maximum lookahead for TypeScript
context: LexerContext::Regular,
Expand All @@ -133,7 +126,7 @@ impl<'a> Lexer<'a> {
pub fn checkpoint(&self) -> LexerCheckpoint<'a> {
LexerCheckpoint {
position: self.source.position(),
token: self.current.token,
token: self.token,
errors_pos: self.errors.len(),
}
}
Expand All @@ -148,7 +141,7 @@ impl<'a> Lexer<'a> {
// SAFETY: Caller guarantees `checkpoint` was created from this `Lexer`,
// and therefore `checkpoint.position` was created from `self.source`.
self.source.set_position(checkpoint.position);
self.current.token = checkpoint.token;
self.token = checkpoint.token;
self.lookahead.clear();
}

Expand Down Expand Up @@ -176,11 +169,11 @@ impl<'a> Lexer<'a> {
self.lookahead.push_back(Lookahead { position: self.source.position(), token: peeked });
}

// Call to `finish_next` in loop above leaves `self.current.token = Token::default()`.
// Only circumstance in which `self.current.token` wouldn't have been default at start of this
// Call to `finish_next` in loop above leaves `self.token = Token::default()`.
// Only circumstance in which `self.token` wouldn't have been default at start of this
// function is if we were at very start of file, before any tokens have been read, when
// `token.is_on_new_line` is `true`. But `lookahead` isn't called before the first token is
// read, so that's not possible. So no need to restore `self.current.token` here.
// read, so that's not possible. So no need to restore `self.token` here.
// It's already in same state as it was at start of this function.

// SAFETY: `position` was created above from `self.source`. `self.source` never changes.
Expand Down Expand Up @@ -208,11 +201,11 @@ impl<'a> Lexer<'a> {
}

fn finish_next(&mut self, kind: Kind) -> Token {
self.current.token.kind = kind;
self.current.token.end = self.offset();
debug_assert!(self.current.token.start <= self.current.token.end);
let token = self.current.token;
self.current.token = Token::default();
self.token.kind = kind;
self.token.end = self.offset();
debug_assert!(self.token.start <= self.token.end);
let token = self.token;
self.token = Token::default();
token
}

Expand All @@ -230,7 +223,7 @@ impl<'a> Lexer<'a> {

/// Get the current unterminated token range
fn unterminated_range(&self) -> Span {
Span::new(self.current.token.start, self.offset())
Span::new(self.token.start, self.offset())
}

/// Consume the current char if not at EOF
Expand Down Expand Up @@ -286,7 +279,7 @@ impl<'a> Lexer<'a> {
fn read_next_token(&mut self) -> Kind {
loop {
let offset = self.offset();
self.current.token.start = offset;
self.token.start = offset;

let byte = if let Some(byte) = self.source.peek_byte() {
byte
Expand Down
5 changes: 1 addition & 4 deletions crates/oxc_parser/src/lexer/punctuation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,7 @@ impl<'a> Lexer<'a> {
pub(super) fn read_minus(&mut self) -> Option<Kind> {
if self.next_eq('-') {
// SingleLineHTMLCloseComment `-->` in script mode
if self.current.token.is_on_new_line
&& self.source_type.is_script()
&& self.next_eq('>')
{
if self.token.is_on_new_line && self.source_type.is_script() && self.next_eq('>') {
None
} else {
Some(Kind::Minus2)
Expand Down
2 changes: 1 addition & 1 deletion crates/oxc_parser/src/lexer/regex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ impl<'a> Lexer<'a> {
/// Which means the parser needs to re-tokenize on `PrimaryExpression`,
/// `RegularExpressionLiteral` only appear on the right hand side of `PrimaryExpression`
pub(crate) fn next_regex(&mut self, kind: Kind) -> (Token, u32, RegExpFlags) {
self.current.token.start = self.offset()
self.token.start = self.offset()
- match kind {
Kind::Slash => 1,
Kind::SlashEq => 2,
Expand Down
4 changes: 2 additions & 2 deletions crates/oxc_parser/src/lexer/string.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ impl<'a> Lexer<'a> {
if !has_escape {
return;
}
self.escaped_strings.insert(self.current.token.start, s);
self.current.token.escaped = true;
self.escaped_strings.insert(self.token.start, s);
self.token.escaped = true;
}

pub(crate) fn get_string(&self, token: Token) -> &'a str {
Expand Down
7 changes: 3 additions & 4 deletions crates/oxc_parser/src/lexer/template.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ impl<'a> Lexer<'a> {
/// Re-tokenize the current `}` token for `TemplateSubstitutionTail`
/// See Section 12, the parser needs to re-tokenize on `TemplateSubstitutionTail`,
pub(crate) fn next_template_substitution_tail(&mut self) -> Token {
self.current.token.start = self.offset() - 1;
self.token.start = self.offset() - 1;
let kind = self.read_template_literal(Kind::TemplateMiddle, Kind::TemplateTail);
self.lookahead.clear();
self.finish_next(kind)
Expand All @@ -63,9 +63,8 @@ impl<'a> Lexer<'a> {
if !has_escape {
return;
}
self.escaped_templates
.insert(self.current.token.start, is_valid_escape_sequence.then(|| s));
self.current.token.escaped = true;
self.escaped_templates.insert(self.token.start, is_valid_escape_sequence.then(|| s));
self.token.escaped = true;
}

pub(crate) fn get_template_string(&self, token: Token) -> Option<&'a str> {
Expand Down
2 changes: 1 addition & 1 deletion crates/oxc_parser/src/lexer/typescript.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ impl<'a> Lexer<'a> {
Kind::ShiftLeftEq => 3,
_ => unreachable!(),
};
self.current.token.start = self.offset() - offset;
self.token.start = self.offset() - offset;
self.source.back(offset as usize - 1);
let kind = Kind::LAngle;
self.lookahead.clear();
Expand Down
8 changes: 3 additions & 5 deletions crates/oxc_parser/src/lexer/unicode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,13 @@ impl<'a> Lexer<'a> {
}
c if is_irregular_whitespace(c) => {
self.consume_char();
self.trivia_builder
.add_irregular_whitespace(self.current.token.start, self.offset());
self.trivia_builder.add_irregular_whitespace(self.token.start, self.offset());
Kind::Skip
}
c if is_irregular_line_terminator(c) => {
self.consume_char();
self.current.token.is_on_new_line = true;
self.trivia_builder
.add_irregular_whitespace(self.current.token.start, self.offset());
self.token.is_on_new_line = true;
self.trivia_builder.add_irregular_whitespace(self.token.start, self.offset());
Kind::Skip
}
_ => {
Expand Down

0 comments on commit f2f0c42

Please sign in to comment.