Skip to content

Commit

Permalink
Handle CRLF properly in the lexer
Browse files Browse the repository at this point in the history
The lexer already ignores CRLF in between tokens, but it doesn't
properly handle carriage returns inside strings and doc comments. Teach
it to treat CRLF as LF inside these tokens, and to disallow carriage
returns that are not followed by linefeeds. This includes handling an
escaped CRLF inside a regular string token the same way it handles an
escaped LF.

This is technically a breaking change, as bare carriage returns are no
longer allowed, and CRLF sequences are now treated as LF inside strings
and doc comments, but it's very unlikely to actually affect any
real-world code.

This change is necessary to have Rust code compile on Windows the same
way it does on Unix. The mozilla/rust repository explicitly sets eol=lf
for Rust source files, but other Rust repositories don't. Notably,
rust-http cannot be compiled on Windows without converting the CRLF line
endings back to LF.

[breaking-change]
  • Loading branch information
lilyball committed Jun 19, 2014
1 parent d41058e commit 8a8e497
Show file tree
Hide file tree
Showing 5 changed files with 215 additions and 23 deletions.
141 changes: 118 additions & 23 deletions src/libsyntax/parse/lexer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,47 @@ impl<'a> StringReader<'a> {
self.byte_offset(end).to_uint()))
}

/// Converts CRLF to LF in the given string, raising an error on bare CR.
fn translate_crlf<'a>(&self, start: BytePos,
s: &'a str, errmsg: &'a str) -> str::MaybeOwned<'a> {
let mut i = 0u;
while i < s.len() {
let str::CharRange { ch, next } = s.char_range_at(i);
if ch == '\r' {
if next < s.len() && s.char_at(next) == '\n' {
return translate_crlf_(self, start, s, errmsg, i).into_maybe_owned();
}
let pos = start + BytePos(i as u32);
let end_pos = start + BytePos(next as u32);
self.err_span_(pos, end_pos, errmsg);
}
i = next;
}
return s.into_maybe_owned();

fn translate_crlf_(rdr: &StringReader, start: BytePos,
s: &str, errmsg: &str, mut i: uint) -> String {
let mut buf = String::with_capacity(s.len());
let mut j = 0;
while i < s.len() {
let str::CharRange { ch, next } = s.char_range_at(i);
if ch == '\r' {
if j < i { buf.push_str(s.slice(j, i)); }
j = next;
if next >= s.len() || s.char_at(next) != '\n' {
let pos = start + BytePos(i as u32);
let end_pos = start + BytePos(next as u32);
rdr.err_span_(pos, end_pos, errmsg);
}
}
i = next;
}
if j < s.len() { buf.push_str(s.slice_from(j)); }
buf
}
}


/// Advance the StringReader by one character. If a newline is
/// discovered, add it to the FileMap's list of line start offsets.
pub fn bump(&mut self) {
Expand Down Expand Up @@ -305,7 +346,20 @@ impl<'a> StringReader<'a> {
// line comments starting with "///" or "//!" are doc-comments
if self.curr_is('/') || self.curr_is('!') {
let start_bpos = self.pos - BytePos(3);
while !self.curr_is('\n') && !self.is_eof() {
while !self.is_eof() {
match self.curr.unwrap() {
'\n' => break,
'\r' => {
if self.nextch_is('\n') {
// CRLF
break
} else {
self.err_span_(self.last_pos, self.pos,
"bare CR not allowed in doc-comment");
}
}
_ => ()
}
self.bump();
}
let ret = self.with_str_from(start_bpos, |string| {
Expand Down Expand Up @@ -370,6 +424,7 @@ impl<'a> StringReader<'a> {
let start_bpos = self.last_pos - BytePos(2);

let mut level: int = 1;
let mut has_cr = false;
while level > 0 {
if self.is_eof() {
let msg = if is_doc_comment {
Expand All @@ -379,25 +434,35 @@ impl<'a> StringReader<'a> {
};
let last_bpos = self.last_pos;
self.fatal_span_(start_bpos, last_bpos, msg);
} else if self.curr_is('/') && self.nextch_is('*') {
level += 1;
self.bump();
self.bump();
} else if self.curr_is('*') && self.nextch_is('/') {
level -= 1;
self.bump();
self.bump();
} else {
self.bump();
}
let n = self.curr.unwrap();
match n {
'/' if self.nextch_is('*') => {
level += 1;
self.bump();
}
'*' if self.nextch_is('/') => {
level -= 1;
self.bump();
}
'\r' => {
has_cr = true;
}
_ => ()
}
self.bump();
}

let res = if is_doc_comment {
self.with_str_from(start_bpos, |string| {
// but comments with only "*"s between two "/"s are not
if !is_block_non_doc_comment(string) {
let string = if has_cr {
self.translate_crlf(start_bpos, string,
"bare CR not allowed in block doc-comment")
} else { string.into_maybe_owned() };
Some(TokenAndSpan{
tok: token::DOC_COMMENT(str_to_ident(string)),
tok: token::DOC_COMMENT(str_to_ident(string.as_slice())),
sp: codemap::mk_sp(start_bpos, self.last_pos)
})
} else {
Expand Down Expand Up @@ -675,6 +740,10 @@ impl<'a> StringReader<'a> {
self.consume_whitespace();
return None
},
'\r' if delim == '"' && self.curr_is('\n') => {
self.consume_whitespace();
return None
}
c => {
let last_pos = self.last_pos;
self.err_span_char(
Expand All @@ -696,6 +765,15 @@ impl<'a> StringReader<'a> {
else { "character constant must be escaped" },
first_source_char);
}
'\r' => {
if self.curr_is('\n') {
self.bump();
return Some('\n');
} else {
self.err_span_(start, self.last_pos,
"bare CR not allowed in string, use \\r instead");
}
}
_ => if ascii_only && first_source_char > '\x7F' {
let last_pos = self.last_pos;
self.err_span_char(
Expand Down Expand Up @@ -1042,28 +1120,45 @@ impl<'a> StringReader<'a> {
self.bump();
let content_start_bpos = self.last_pos;
let mut content_end_bpos;
let mut has_cr = false;
'outer: loop {
if self.is_eof() {
let last_bpos = self.last_pos;
self.fatal_span_(start_bpos, last_bpos, "unterminated raw string");
}
if self.curr_is('"') {
content_end_bpos = self.last_pos;
for _ in range(0, hash_count) {
self.bump();
if !self.curr_is('#') {
continue 'outer;
//if self.curr_is('"') {
//content_end_bpos = self.last_pos;
//for _ in range(0, hash_count) {
//self.bump();
//if !self.curr_is('#') {
//continue 'outer;
let c = self.curr.unwrap();
match c {
'"' => {
content_end_bpos = self.last_pos;
for _ in range(0, hash_count) {
self.bump();
if !self.curr_is('#') {
continue 'outer;
}
}
break;
}
'\r' => {
has_cr = true;
}
break;
_ => ()
}
self.bump();
}
self.bump();
let str_content = self.with_str_from_to(
content_start_bpos,
content_end_bpos,
str_to_ident);
let str_content = self.with_str_from_to(content_start_bpos, content_end_bpos, |string| {
let string = if has_cr {
self.translate_crlf(content_start_bpos, string,
"bare CR not allowed in raw string")
} else { string.into_maybe_owned() };
str_to_ident(string.as_slice())
});
return token::LIT_STR_RAW(str_content, hash_count);
}
'-' => {
Expand Down
22 changes: 22 additions & 0 deletions src/libsyntax/parse/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,8 @@ mod test {
use owned_slice::OwnedSlice;
use ast;
use abi;
use attr;
use attr::AttrMetaMethods;
use parse::parser::Parser;
use parse::token::{str_to_ident};
use util::parser_testing::{string_to_tts, string_to_parser};
Expand Down Expand Up @@ -726,4 +728,24 @@ mod test {
}".to_string());
}

#[test] fn crlf_doc_comments() {
let sess = new_parse_sess();

let name = "<source>".to_string();
let source = "/// doc comment\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name.clone(), source, Vec::new(), &sess).unwrap();
let doc = attr::first_attr_value_str_by_name(item.attrs.as_slice(), "doc").unwrap();
assert_eq!(doc.get(), "/// doc comment");

let source = "/// doc comment\r\n/// line 2\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name.clone(), source, Vec::new(), &sess).unwrap();
let docs = item.attrs.iter().filter(|a| a.name().get() == "doc")
.map(|a| a.value_str().unwrap().get().to_string()).collect::<Vec<_>>();
assert_eq!(docs.as_slice(), &["/// doc comment".to_string(), "/// line 2".to_string()]);

let source = "/** doc comment\r\n * with CRLF */\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name, source, Vec::new(), &sess).unwrap();
let doc = attr::first_attr_value_str_by_name(item.attrs.as_slice(), "doc").unwrap();
assert_eq!(doc.get(), "/** doc comment\n * with CRLF */");
}
}
30 changes: 30 additions & 0 deletions src/test/compile-fail/lex-bare-cr-string-literal-doc-comment.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

// ignore-tidy-cr

/// doc comment with bare CR: ''
pub fn foo() {}
//~^^ ERROR: bare CR not allowed in doc-comment

/** block doc comment with bare CR: '' */
pub fn bar() {}
//~^^ ERROR: bare CR not allowed in block doc-comment

fn main() {
// the following string literal has a bare CR in it
let _s = "foobar"; //~ ERROR: bare CR not allowed in string

// the following string literal has a bare CR in it
let _s = r"barfoo"; //~ ERROR: bare CR not allowed in raw string

// the following string literal has a bare CR in it
let _s = "foo\bar"; //~ ERROR: unknown character escape: \r
}
Expand Down
1 change: 1 addition & 0 deletions src/test/run-pass/.gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
lexer-crlf-line-endings-string-literal-doc-comment.rs -text
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
// ignore-tidy-cr ignore-license
// ignore-tidy-cr (repeated again because of tidy bug)
// license is ignored because tidy can't handle the CRLF here properly.

// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

// NB: this file needs CRLF line endings. The .gitattributes file in
// this directory should enforce it.

// ignore-pretty

/// Doc comment that ends in CRLF
pub fn foo() {}

/** Block doc comment that
* contains CRLF characters
*/
pub fn bar() {}

fn main() {
let s = "string
literal";
assert_eq!(s, "string\nliteral");

let s = "literal with \
escaped newline";
assert_eq!(s, "literal with escaped newline");

let s = r"string
literal";
assert_eq!(s, "string\nliteral");

// validate that our source file has CRLF endings
let source = include_str!("lexer-crlf-line-endings-string-literal-doc-comment.rs");
assert!(source.contains("string\r\nliteral"));
}

5 comments on commit 8a8e497

@bors
Copy link
Contributor

@bors bors commented on 8a8e497 Jun 19, 2014

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

saw approval from cmr
at lilyball@8a8e497

@bors
Copy link
Contributor

@bors bors commented on 8a8e497 Jun 19, 2014

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

merging kballard/rust/lexer_crlf_handling = 8a8e497 into auto

@bors
Copy link
Contributor

@bors bors commented on 8a8e497 Jun 19, 2014

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

kballard/rust/lexer_crlf_handling = 8a8e497 merged ok, testing candidate = f8c9aec

@bors
Copy link
Contributor

@bors bors commented on 8a8e497 Jun 19, 2014

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fast-forwarding master to auto = f8c9aec

Please sign in to comment.