Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GH-97973: Return all necessary information from the tokenizer #97975

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion Lib/asyncio/base_subprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,9 @@ def _process_exited(self, returncode):
self._proc.returncode = returncode
self._call(self._protocol.process_exited)
for p in self._pipes.values():
p.pipe.close()
if p is not None:
p.pipe.close()

self._try_finish()

async def _wait(self):
Expand Down
22 changes: 21 additions & 1 deletion Lib/test/test_tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
open as tokenize_open, Untokenizer, generate_tokens,
NEWLINE, _generate_tokens_from_c_tokenizer)
NEWLINE, _generate_tokens_from_c_tokenizer, DEDENT)
from io import BytesIO, StringIO
import unittest
from textwrap import dedent
Expand Down Expand Up @@ -2512,6 +2512,26 @@ def get_tokens(string):
self.assertRaises(SyntaxError, get_tokens, "("*1000+"a"+")"*1000)
self.assertRaises(SyntaxError, get_tokens, "]")

def test_max_indent(self):
MAXINDENT = 100

def generate_source(indents):
source = ''.join((' ' * x) + 'if True:\n' for x in range(indents))
source += ' ' * indents + 'pass\n'
return source

valid = generate_source(MAXINDENT - 1)
tokens = list(_generate_tokens_from_c_tokenizer(valid))
self.assertEqual(tokens[-1].type, DEDENT)
compile(valid, "<string>", "exec")

invalid = generate_source(MAXINDENT)
tokens = list(_generate_tokens_from_c_tokenizer(invalid))
self.assertEqual(tokens[-1].type, NEWLINE)
self.assertRaises(
IndentationError, compile, invalid, "<string>", "exec"
)

def test_continuation_lines_indentation(self):
def get_tokens(string):
return [(kind, string) for (kind, string, *_) in _generate_tokens_from_c_tokenizer(string)]
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Modify the tokenizer to return all necessary information the parser needs to set location information in the AST nodes, so that the parser does not have to calculate those doing pointer arithmetic.
54 changes: 24 additions & 30 deletions Parser/pegen.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,50 +123,45 @@ growable_comment_array_deallocate(growable_comment_array *arr) {
}

static int
_get_keyword_or_name_type(Parser *p, const char *name, int name_len)
_get_keyword_or_name_type(Parser *p, struct token new_token)
{
int name_len = new_token.end_col_offset - new_token.col_offset;
assert(name_len > 0);

if (name_len >= p->n_keyword_lists ||
p->keywords[name_len] == NULL ||
p->keywords[name_len]->type == -1) {
return NAME;
}
for (KeywordToken *k = p->keywords[name_len]; k != NULL && k->type != -1; k++) {
if (strncmp(k->str, name, name_len) == 0) {
if (strncmp(k->str, new_token.start, name_len) == 0) {
return k->type;
}
}
return NAME;
}

static int
initialize_token(Parser *p, Token *token, const char *start, const char *end, int token_type) {
assert(token != NULL);
initialize_token(Parser *p, Token *parser_token, struct token new_token, int token_type) {
assert(parser_token != NULL);

token->type = (token_type == NAME) ? _get_keyword_or_name_type(p, start, (int)(end - start)) : token_type;
token->bytes = PyBytes_FromStringAndSize(start, end - start);
if (token->bytes == NULL) {
parser_token->type = (token_type == NAME) ? _get_keyword_or_name_type(p, new_token) : token_type;
parser_token->bytes = PyBytes_FromStringAndSize(new_token.start, new_token.end - new_token.start);
if (parser_token->bytes == NULL) {
return -1;
}

if (_PyArena_AddPyObject(p->arena, token->bytes) < 0) {
Py_DECREF(token->bytes);
if (_PyArena_AddPyObject(p->arena, parser_token->bytes) < 0) {
Py_DECREF(parser_token->bytes);
return -1;
}

token->level = p->tok->level;

const char *line_start = token_type == STRING ? p->tok->multi_line_start : p->tok->line_start;
int lineno = token_type == STRING ? p->tok->first_lineno : p->tok->lineno;
int end_lineno = p->tok->lineno;

int col_offset = (start != NULL && start >= line_start) ? (int)(start - line_start) : -1;
int end_col_offset = (end != NULL && end >= p->tok->line_start) ? (int)(end - p->tok->line_start) : -1;

token->lineno = lineno;
token->col_offset = p->tok->lineno == p->starting_lineno ? p->starting_col_offset + col_offset : col_offset;
token->end_lineno = end_lineno;
token->end_col_offset = p->tok->lineno == p->starting_lineno ? p->starting_col_offset + end_col_offset : end_col_offset;
parser_token->level = new_token.level;
parser_token->lineno = new_token.lineno;
parser_token->col_offset = p->tok->lineno == p->starting_lineno ? p->starting_col_offset + new_token.col_offset
: new_token.col_offset;
parser_token->end_lineno = new_token.end_lineno;
parser_token->end_col_offset = p->tok->lineno == p->starting_lineno ? p->starting_col_offset + new_token.end_col_offset
: new_token.end_col_offset;

p->fill += 1;

Expand Down Expand Up @@ -202,26 +197,25 @@ _resize_tokens_array(Parser *p) {
int
_PyPegen_fill_token(Parser *p)
{
const char *start;
const char *end;
int type = _PyTokenizer_Get(p->tok, &start, &end);
struct token new_token;
int type = _PyTokenizer_Get(p->tok, &new_token);

// Record and skip '# type: ignore' comments
while (type == TYPE_IGNORE) {
Py_ssize_t len = end - start;
Py_ssize_t len = new_token.end_col_offset - new_token.col_offset;
char *tag = PyMem_Malloc(len + 1);
if (tag == NULL) {
PyErr_NoMemory();
return -1;
}
strncpy(tag, start, len);
strncpy(tag, new_token.start, len);
tag[len] = '\0';
// Ownership of tag passes to the growable array
if (!growable_comment_array_add(&p->type_ignore_comments, p->tok->lineno, tag)) {
PyErr_NoMemory();
return -1;
}
type = _PyTokenizer_Get(p->tok, &start, &end);
type = _PyTokenizer_Get(p->tok, &new_token);
}

// If we have reached the end and we are in single input mode we need to insert a newline and reset the parsing
Expand All @@ -244,7 +238,7 @@ _PyPegen_fill_token(Parser *p)
}

Token *t = p->tokens[p->fill];
return initialize_token(p, t, start, end, type);
return initialize_token(p, t, new_token, type);
}

#if defined(Py_DEBUG)
Expand Down
5 changes: 2 additions & 3 deletions Parser/pegen_errors.c
Original file line number Diff line number Diff line change
Expand Up @@ -164,11 +164,10 @@ _PyPegen_tokenize_full_source_to_check_for_errors(Parser *p) {
Py_ssize_t current_err_line = current_token->lineno;

int ret = 0;
struct token new_token;

for (;;) {
const char *start;
const char *end;
switch (_PyTokenizer_Get(p->tok, &start, &end)) {
switch (_PyTokenizer_Get(p->tok, &new_token)) {
case ERRORTOKEN:
if (p->tok->level != 0) {
int error_lineno = p->tok->parenlinenostack[p->tok->level-1];
Expand Down
Loading