From 40e1a73461dbb72498beba7f71a289cdd285d0e5 Mon Sep 17 00:00:00 2001 From: Erik Hedvall Date: Sat, 28 Mar 2015 14:07:33 +0100 Subject: [PATCH] Upgrade to rustc 1.0.0-nightly (27901849e 2015-03-25) (built 2015-03-26) --- examples/noop-tokenize.rs | 4 ++-- examples/tokenize.rs | 4 ++-- macros/src/lib.rs | 4 ++-- macros/src/named_entities.rs | 7 ++++--- src/lib.rs | 2 +- src/sink/owned_dom.rs | 12 ++++++------ src/sink/rcdom.rs | 13 +++++++------ src/tokenizer/buffer_queue.rs | 8 ++++---- src/tokenizer/char_ref/mod.rs | 12 ++++++------ src/tokenizer/mod.rs | 19 +++++++++---------- src/tree_builder/actions.rs | 2 +- src/tree_builder/data.rs | 2 +- src/tree_builder/mod.rs | 10 ++++------ src/tree_builder/rules.rs | 2 +- src/util/smallcharset.rs | 4 ++-- tests/tokenizer.rs | 24 ++++++++++++------------ tests/tree_builder.rs | 32 ++++++++++++++++---------------- 17 files changed, 80 insertions(+), 81 deletions(-) diff --git a/examples/noop-tokenize.rs b/examples/noop-tokenize.rs index 34bde39b..0e7ce85e 100644 --- a/examples/noop-tokenize.rs +++ b/examples/noop-tokenize.rs @@ -9,7 +9,7 @@ // Run a single benchmark once. For use with profiling tools. -#![feature(core, test)] +#![feature(test)] extern crate test; extern crate html5ever; @@ -36,7 +36,7 @@ impl TokenSink for Sink { fn main() { let mut path = env::current_exe().unwrap(); path.push("../data/bench/"); - path.push(env::args().nth(1).unwrap().as_slice()); + path.push(env::args().nth(1).unwrap()); let mut file = fs::File::open(&path).unwrap(); let mut file_input = String::new(); diff --git a/examples/tokenize.rs b/examples/tokenize.rs index a81df7ac..5d379968 100644 --- a/examples/tokenize.rs +++ b/examples/tokenize.rs @@ -7,7 +7,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(core, collections)] +#![feature(collections)] extern crate html5ever; @@ -43,7 +43,7 @@ impl TokenSink for TokenPrinter { fn process_token(&mut self, token: Token) { match token { CharacterTokens(b) => { - for c in b.as_slice().chars() { + for c in b.chars() { self.do_char(c); } } diff --git a/macros/src/lib.rs b/macros/src/lib.rs index fd76e10e..62b1900f 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -11,12 +11,12 @@ #![crate_type="dylib"] #![feature(plugin_registrar, quote)] -#![feature(rustc_private, core, std_misc)] +#![feature(rustc_private, convert)] #![deny(warnings)] extern crate syntax; extern crate rustc; -extern crate "rustc-serialize" as rustc_serialize; +extern crate rustc_serialize; #[macro_use] extern crate mac; diff --git a/macros/src/named_entities.rs b/macros/src/named_entities.rs index 0505b030..3fd44b60 100644 --- a/macros/src/named_entities.rs +++ b/macros/src/named_entities.rs @@ -13,6 +13,7 @@ use std::path::PathBuf; use std::fs; use std::str::FromStr; use std::collections::HashMap; +use std::convert::From; use rustc_serialize::json; use rustc_serialize::json::Json; @@ -52,7 +53,7 @@ fn build_map(js: Json) -> Option> { } // Slice off the initial '&' - assert!(k.as_slice().char_at(0) == '&'); + assert!(k.chars().next() == Some('&')); map.insert(k[1..].to_string(), codepoint_pair); } @@ -95,7 +96,7 @@ pub fn expand(cx: &mut ExtCtxt, sp: Span, tt: &[TokenTree]) -> Box Box v, k => v, ...) let toks: Vec<_> = map.into_iter().flat_map(|(k, [c0, c1])| { - let k = k.as_slice(); + let k = &k[..]; (quote_tokens!(&mut *cx, $k => [$c0, $c1],)).into_iter() }).collect(); MacEager::expr(quote_expr!(&mut *cx, phf_map!($toks))) diff --git a/src/lib.rs b/src/lib.rs index 63f3fd3d..cdf0deba 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,7 @@ #![crate_name="html5ever"] #![crate_type="dylib"] -#![feature(plugin, box_syntax, no_std, core, collections, alloc)] +#![feature(plugin, box_syntax, no_std, core, collections, alloc, str_char)] #![deny(warnings)] #![allow(unused_parens)] diff --git a/src/sink/owned_dom.rs b/src/sink/owned_dom.rs index ce09d540..320edad5 100644 --- a/src/sink/owned_dom.rs +++ b/src/sink/owned_dom.rs @@ -223,7 +223,7 @@ impl TreeSink for Sink { // Append to an existing Text node if we have one. match child { AppendText(ref text) => match parent.children.last() { - Some(h) => if append_to_existing_text(*h, text.as_slice()) { return; }, + Some(h) => if append_to_existing_text(*h, &text) { return; }, _ => (), }, _ => (), @@ -247,7 +247,7 @@ impl TreeSink for Sink { // Look for a text node before the insertion point. (AppendText(text), i) => { let prev = parent.children[i-1]; - if append_to_existing_text(prev, text.as_slice()) { + if append_to_existing_text(prev, &text) { return Ok(()); } self.new_node(Text(text)) @@ -363,7 +363,7 @@ impl Serializable for Node { (_, &Element(ref name, ref attrs)) => { if traversal_scope == IncludeNode { try!(serializer.start_elem(name.clone(), - attrs.iter().map(|at| (&at.name, at.value.as_slice())))); + attrs.iter().map(|at| (&at.name, &at.value[..])))); } for child in self.children.iter() { @@ -385,9 +385,9 @@ impl Serializable for Node { (ChildrenOnly, _) => Ok(()), - (IncludeNode, &Doctype(ref name, _, _)) => serializer.write_doctype(name.as_slice()), - (IncludeNode, &Text(ref text)) => serializer.write_text(text.as_slice()), - (IncludeNode, &Comment(ref text)) => serializer.write_comment(text.as_slice()), + (IncludeNode, &Doctype(ref name, _, _)) => serializer.write_doctype(&name), + (IncludeNode, &Text(ref text)) => serializer.write_text(&text), + (IncludeNode, &Comment(ref text)) => serializer.write_comment(&text), (IncludeNode, &Document) => panic!("Can't serialize Document node itself"), } diff --git a/src/sink/rcdom.rs b/src/sink/rcdom.rs index 6e4eeb77..ed6ec00b 100644 --- a/src/sink/rcdom.rs +++ b/src/sink/rcdom.rs @@ -64,6 +64,7 @@ pub type Handle = Rc>; /// Weak reference to a DOM node, used for parent pointers. pub type WeakHandle = Weak>; +#[allow(trivial_casts)] fn same_node(x: &Handle, y: &Handle) -> bool { // FIXME: This shouldn't really need to touch the borrow flags, right? (&*x.borrow() as *const Node) == (&*y.borrow() as *const Node) @@ -164,7 +165,7 @@ impl TreeSink for RcDom { // Append to an existing Text node if we have one. match child { AppendText(ref text) => match parent.borrow().children.last() { - Some(h) => if append_to_existing_text(h, text.as_slice()) { return; }, + Some(h) => if append_to_existing_text(h, &text) { return; }, _ => (), }, _ => (), @@ -189,7 +190,7 @@ impl TreeSink for RcDom { (AppendText(text), i) => { let parent = parent.borrow(); let prev = &parent.children[i-1]; - if append_to_existing_text(prev, text.as_slice()) { + if append_to_existing_text(prev, &text) { return Ok(()); } new_node(Text(text)) @@ -276,7 +277,7 @@ impl Serializable for Handle { (_, &Element(ref name, ref attrs)) => { if traversal_scope == IncludeNode { try!(serializer.start_elem(name.clone(), - attrs.iter().map(|at| (&at.name, at.value.as_slice())))); + attrs.iter().map(|at| (&at.name, &at.value[..])))); } for handle in node.children.iter() { @@ -298,9 +299,9 @@ impl Serializable for Handle { (ChildrenOnly, _) => Ok(()), - (IncludeNode, &Doctype(ref name, _, _)) => serializer.write_doctype(name.as_slice()), - (IncludeNode, &Text(ref text)) => serializer.write_text(text.as_slice()), - (IncludeNode, &Comment(ref text)) => serializer.write_comment(text.as_slice()), + (IncludeNode, &Doctype(ref name, _, _)) => serializer.write_doctype(&name), + (IncludeNode, &Text(ref text)) => serializer.write_text(&text), + (IncludeNode, &Comment(ref text)) => serializer.write_comment(&text), (IncludeNode, &Document) => panic!("Can't serialize Document node itself"), } diff --git a/src/tokenizer/buffer_queue.rs b/src/tokenizer/buffer_queue.rs index d73253a8..b3efd478 100644 --- a/src/tokenizer/buffer_queue.rs +++ b/src/tokenizer/buffer_queue.rs @@ -74,7 +74,7 @@ impl BufferQueue { /// Look at the next available character, if any. pub fn peek(&mut self) -> Option { match self.buffers.front() { - Some(&Buffer { pos, ref buf }) => Some(buf.as_slice().char_at(pos)), + Some(&Buffer { pos, ref buf }) => Some(buf.char_at(pos)), None => None, } } @@ -84,7 +84,7 @@ impl BufferQueue { let (result, now_empty) = match self.buffers.front_mut() { None => (None, false), Some(&mut Buffer { ref mut pos, ref buf }) => { - let CharRange { ch, next } = buf.as_slice().char_range_at(*pos); + let CharRange { ch, next } = buf.char_range_at(*pos); *pos = next; (Some(ch), next >= buf.len()) } @@ -111,7 +111,7 @@ impl BufferQueue { *pos = new_pos; (Some(NotFromSet(out)), new_pos >= buf.len()) } else { - let CharRange { ch, next } = buf.as_slice().char_range_at(*pos); + let CharRange { ch, next } = buf.char_range_at(*pos); *pos = next; (Some(FromSet(ch)), next >= buf.len()) } @@ -146,7 +146,7 @@ impl BufferQueue { } let ref buf = self.buffers[buffers_exhausted]; - let d = buf.buf.as_slice().char_at(consumed_from_last); + let d = buf.buf.char_at(consumed_from_last); match (c.to_ascii_opt(), d.to_ascii_opt()) { (Some(c), Some(d)) => if c.eq_ignore_case(d) { () } else { return Some(false) }, _ => return Some(false), diff --git a/src/tokenizer/char_ref/mod.rs b/src/tokenizer/char_ref/mod.rs index 619f3b79..e5bfa5ce 100644 --- a/src/tokenizer/char_ref/mod.rs +++ b/src/tokenizer/char_ref/mod.rs @@ -172,7 +172,7 @@ impl CharRefTokenizer { use std::num::wrapping::WrappingOps; let c = unwrap_or_return!(tokenizer.peek(), Stuck); - match c.to_digit(base as u32) { + match c.to_digit(base) { Some(n) => { tokenizer.discard_char(); self.num = self.num.wrapping_mul(base); @@ -181,7 +181,7 @@ impl CharRefTokenizer { // We still parse digits and semicolon, but don't use the result. self.num_too_big = true; } - self.num = self.num.wrapping_add(n as u32); + self.num = self.num.wrapping_add(n); self.seen_digit = true; Progress } @@ -251,7 +251,7 @@ impl CharRefTokenizer { fn do_named(&mut self, tokenizer: &mut Tokenizer) -> Status { let c = unwrap_or_return!(tokenizer.get_char(), Stuck); self.name_buf_mut().push(c); - match data::NAMED_ENTITIES.get(self.name_buf().as_slice()) { + match data::NAMED_ENTITIES.get(&self.name_buf()[..]) { // We have either a full match or a prefix of one. Some(m) => { if m[0] != 0 { @@ -271,7 +271,7 @@ impl CharRefTokenizer { fn emit_name_error(&mut self, tokenizer: &mut Tokenizer) { let msg = format_if!(tokenizer.opts.exact_errors, "Invalid character reference", - "Invalid character reference &{}", self.name_buf().as_slice()); + "Invalid character reference &{}", self.name_buf()); tokenizer.emit_error(msg); } @@ -313,14 +313,14 @@ impl CharRefTokenizer { let name_len = self.name_len; assert!(name_len > 0); - let last_matched = self.name_buf().as_slice().char_at(name_len-1); + let last_matched = self.name_buf().char_at(name_len-1); // There might not be a next character after the match, if // we had a full match and then hit EOF. let next_after = if name_len == self.name_buf().len() { None } else { - Some(self.name_buf().as_slice().char_at(name_len)) + Some(self.name_buf().char_at(name_len)) }; // "If the character reference is being consumed as part of an diff --git a/src/tokenizer/mod.rs b/src/tokenizer/mod.rs index 09e26182..42241f72 100644 --- a/src/tokenizer/mod.rs +++ b/src/tokenizer/mod.rs @@ -15,7 +15,6 @@ use core::clone::Clone; use core::cmp::Ord; use core::iter::{range, IteratorExt}; use core::option::Option::{self, Some, None}; -use core::str::Str; pub use self::interface::{Doctype, Attribute, TagKind, StartTag, EndTag, Tag}; pub use self::interface::{Token, DoctypeToken, TagToken, CommentToken}; @@ -61,7 +60,7 @@ fn append_strings(lhs: &mut String, rhs: String) { if lhs.is_empty() { *lhs = rhs; } else { - lhs.push_str(rhs.as_slice()); + lhs.push_str(&rhs); } } @@ -182,7 +181,7 @@ impl Tokenizer { } let start_tag_name = opts.last_start_tag_name.take() - .map(|s| Atom::from_slice(s.as_slice())); + .map(|s| Atom::from_slice(&s)); let state = opts.initial_state.unwrap_or(states::Data); let discard_bom = opts.discard_bom; Tokenizer { @@ -229,7 +228,7 @@ impl Tokenizer { return; } - let pos = if self.discard_bom && input.as_slice().char_at(0) == '\u{feff}' { + let pos = if self.discard_bom && input.char_at(0) == '\u{feff}' { self.discard_bom = false; 3 // length of BOM in UTF-8 } else { @@ -385,7 +384,7 @@ impl Tokenizer { self.finish_attribute(); let name = replace(&mut self.current_tag_name, String::new()); - let name = Atom::from_slice(name.as_slice()); + let name = Atom::from_slice(&name); match self.current_tag_kind { StartTag => { @@ -448,7 +447,7 @@ impl Tokenizer { match self.last_start_tag_name.as_ref() { Some(last) => (self.current_tag_kind == EndTag) - && (self.current_tag_name.as_slice() == last.as_slice()), + && (self.current_tag_name == last.as_slice()), None => false, } } @@ -468,7 +467,7 @@ impl Tokenizer { // FIXME: the spec says we should error as soon as the name is finished. // FIXME: linear time search, do we care? let dup = { - let name = self.current_attr_name.as_slice(); + let name = &self.current_attr_name[..]; self.current_tag_attrs.iter().any(|a| a.name.local.as_slice() == name) }; @@ -481,7 +480,7 @@ impl Tokenizer { self.current_tag_attrs.push(Attribute { // The tree builder will adjust the namespace if necessary. // This only happens in foreign elements. - name: QualName::new(ns!(""), Atom::from_slice(name.as_slice())), + name: QualName::new(ns!(""), Atom::from_slice(&name)), value: replace(&mut self.current_attr_value, empty_str()), }); } @@ -810,7 +809,7 @@ impl Tokenizer { let c = get_char!(self); match c { '\t' | '\n' | '\x0C' | ' ' | '/' | '>' => { - let esc = if self.temp_buf.as_slice() == "script" { DoubleEscaped } else { Escaped }; + let esc = if self.temp_buf == "script" { DoubleEscaped } else { Escaped }; go!(self: emit c; to RawData ScriptDataEscaped esc); } _ => match lower_ascii_letter(c) { @@ -860,7 +859,7 @@ impl Tokenizer { let c = get_char!(self); match c { '\t' | '\n' | '\x0C' | ' ' | '/' | '>' => { - let esc = if self.temp_buf.as_slice() == "script" { Escaped } else { DoubleEscaped }; + let esc = if self.temp_buf == "script" { Escaped } else { DoubleEscaped }; go!(self: emit c; to RawData ScriptDataEscaped esc); } _ => match lower_ascii_letter(c) { diff --git a/src/tree_builder/actions.rs b/src/tree_builder/actions.rs index 66333147..794f6526 100644 --- a/src/tree_builder/actions.rs +++ b/src/tree_builder/actions.rs @@ -635,7 +635,7 @@ impl TreeBuilderActions fn is_type_hidden(&self, tag: &Tag) -> bool { match tag.attrs.iter().find(|&at| at.name == qualname!("", "type")) { None => false, - Some(at) => at.value.as_slice().eq_ignore_ascii_case("hidden"), + Some(at) => (&*at.value).eq_ignore_ascii_case("hidden"), } } diff --git a/src/tree_builder/data.rs b/src/tree_builder/data.rs index 586f7371..36818639 100644 --- a/src/tree_builder/data.rs +++ b/src/tree_builder/data.rs @@ -95,7 +95,7 @@ static HTML4_PUBLIC_PREFIXES: &'static [&'static str] = &[ pub fn doctype_error_and_quirks(doctype: &Doctype, iframe_srcdoc: bool) -> (bool, QuirksMode) { fn opt_as_slice<'t>(x: &'t Option) -> Option<&'t str> { - x.as_ref().map(|y| y.as_slice()) + x.as_ref().map(|y| &y[..]) } fn opt_to_ascii_lower(x: Option<&str>) -> Option { diff --git a/src/tree_builder/mod.rs b/src/tree_builder/mod.rs index c89c8fdf..81f0a44c 100644 --- a/src/tree_builder/mod.rs +++ b/src/tree_builder/mod.rs @@ -290,7 +290,7 @@ impl TreeBuilder for node in self.open_elems.iter() { let QualName { ns, local } = self.sink.elem_name(node.clone()); match ns { - ns!(HTML) => print!(" {}", local.as_slice()), + ns!(HTML) => print!(" {}", &local[..]), _ => panic!(), } } @@ -302,7 +302,7 @@ impl TreeBuilder &Element(ref h, _) => { let QualName { ns, local } = self.sink.elem_name(h.clone()); match ns { - ns!(HTML) => print!(" {}", local.as_slice()), + ns!(HTML) => print!(" {}", &local[..]), _ => panic!(), } } @@ -347,10 +347,8 @@ impl TreeBuilder token = t; } SplitWhitespace(buf) => { - let buf = buf.as_slice(); - let (len, is_ws) = unwrap_or_return!( - char_run(is_ascii_whitespace, buf), ()); + char_run(is_ascii_whitespace, &buf), ()); token = CharacterTokens( if is_ws { Whitespace } else { NotWhitespace }, @@ -420,7 +418,7 @@ impl TokenSink tokenizer::EOFToken => EOFToken, tokenizer::CharacterTokens(mut x) => { - if ignore_lf && x.len() >= 1 && x.as_slice().char_at(0) == '\n' { + if ignore_lf && x.len() >= 1 && x.char_at(0) == '\n' { x.remove(0); } if x.is_empty() { diff --git a/src/tree_builder/rules.rs b/src/tree_builder/rules.rs index 259f18af..c750bdf4 100644 --- a/src/tree_builder/rules.rs +++ b/src/tree_builder/rules.rs @@ -27,7 +27,7 @@ use std::borrow::Cow::Borrowed; fn any_not_whitespace(x: &String) -> bool { // FIXME: this might be much faster as a byte scan - x.as_slice().chars().any(|c| !is_ascii_whitespace(c)) + x.chars().any(|c| !is_ascii_whitespace(c)) } // This goes in a trait so that we can control visibility. diff --git a/src/util/smallcharset.rs b/src/util/smallcharset.rs index 52cb74a5..53f07e1d 100644 --- a/src/util/smallcharset.rs +++ b/src/util/smallcharset.rs @@ -56,10 +56,10 @@ mod test { for y in 0 .. 48usize { let mut s = repeat("x").take(x).collect::(); s.push(c); - s.push_str(repeat("x").take(y).collect::().as_slice()); + s.push_str(&repeat("x").take(y).collect::()); let set = small_char_set!('&' '\0'); - assert_eq!(x, set.nonmember_prefix_len(s.as_slice())); + assert_eq!(x, set.nonmember_prefix_len(&s)); } } } diff --git a/tests/tokenizer.rs b/tests/tokenizer.rs index c0c45636..ca258d2e 100644 --- a/tests/tokenizer.rs +++ b/tests/tokenizer.rs @@ -12,7 +12,7 @@ #![plugin(string_cache_plugin)] extern crate test; -extern crate "rustc-serialize" as rustc_serialize; +extern crate rustc_serialize; extern crate string_cache; extern crate html5ever; @@ -102,7 +102,7 @@ impl TokenSink for TokenLogger { fn process_token(&mut self, token: Token) { match token { CharacterTokens(b) => { - self.current_str.push_str(b.as_slice()); + self.current_str.push_str(&b); } NullCharacterToken => { @@ -200,7 +200,7 @@ fn json_to_token(js: &Json) -> Token { let parts = js.get_list(); // Collect refs here so we don't have to use "ref" in all the patterns below. let args: Vec<&Json> = parts[1..].iter().collect(); - match (parts[0].get_str().as_slice(), args.as_slice()) { + match (&parts[0].get_str()[..], &args[..]) { ("DOCTYPE", [name, public_id, system_id, correct]) => DoctypeToken(Doctype { name: name.get_nullable_str(), public_id: public_id.get_nullable_str(), @@ -210,10 +210,10 @@ fn json_to_token(js: &Json) -> Token { ("StartTag", [name, attrs, rest..]) => TagToken(Tag { kind: StartTag, - name: Atom::from_slice(name.get_str().as_slice()), + name: Atom::from_slice(&name.get_str()), attrs: attrs.get_obj().iter().map(|(k,v)| { Attribute { - name: QualName::new(ns!(""), Atom::from_slice(k.as_slice())), + name: QualName::new(ns!(""), Atom::from_slice(&k)), value: v.get_str() } }).collect(), @@ -225,7 +225,7 @@ fn json_to_token(js: &Json) -> Token { ("EndTag", [name]) => TagToken(Tag { kind: EndTag, - name: Atom::from_slice(name.get_str().as_slice()), + name: Atom::from_slice(&name.get_str()), attrs: vec!(), self_closing: false }), @@ -249,7 +249,7 @@ fn json_to_tokens(js: &Json, exact_errors: bool) -> Vec { for tok in js.get_list().iter() { match *tok { Json::String(ref s) - if s.as_slice() == "ParseError" => sink.process_token(ParseError(Borrowed(""))), + if &s[..] == "ParseError" => sink.process_token(ParseError(Borrowed(""))), _ => sink.process_token(json_to_token(tok)), } } @@ -269,7 +269,7 @@ fn unescape(s: &str) -> Option { } drop(it.next()); let hex: String = it.by_ref().take(4).collect(); - match num::from_str_radix(hex.as_slice(), 16).ok() + match num::from_str_radix(&hex, 16).ok() .and_then(char::from_u32) { // Some of the tests use lone surrogates, but we have no // way to represent them in the UTF-8 input to our parser. @@ -288,7 +288,7 @@ fn unescape_json(js: &Json) -> Json { match *js { // unwrap is OK here because the spec'd *output* of the tokenizer never // contains a lone surrogate. - Json::String(ref s) => Json::String(unescape(s.as_slice()).unwrap()), + Json::String(ref s) => Json::String(unescape(&s).unwrap()), Json::Array(ref xs) => Json::Array(xs.iter().map(unescape_json).collect()), Json::Object(ref obj) => { let mut new_obj = BTreeMap::new(); @@ -337,7 +337,7 @@ fn mk_tests(tests: &mut Vec, filename: &str, js: &Json) { // "Double-escaped" tests require additional processing of // the input and output. if obj.get(&"doubleEscaped".to_string()).map_or(false, |j| j.get_bool()) { - match unescape(input.as_slice()) { + match unescape(&input) { None => return, Some(i) => input = i, } @@ -350,7 +350,7 @@ fn mk_tests(tests: &mut Vec, filename: &str, js: &Json) { // Some tests want to start in a state other than Data. let state_overrides = match obj.get(&"initialStates".to_string()) { Some(&Json::Array(ref xs)) => xs.iter().map(|s| - Some(match s.get_str().as_slice() { + Some(match &s.get_str()[..] { "PLAINTEXT state" => Plaintext, "RAWTEXT state" => RawData(Rawtext), "RCDATA state" => RawData(Rcdata), @@ -416,6 +416,6 @@ fn start(argc: isize, argv: *const *const u8) -> isize { rt::args::init(argc, argv); } let args: Vec<_> = env::args().collect(); - test::test_main(args.as_slice(), tests(Path::new(env!("CARGO_MANIFEST_DIR")))); + test::test_main(&args, tests(Path::new(env!("CARGO_MANIFEST_DIR")))); 0 } diff --git a/tests/tree_builder.rs b/tests/tree_builder.rs index 4c64f392..be852c0d 100644 --- a/tests/tree_builder.rs +++ b/tests/tree_builder.rs @@ -7,7 +7,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(core, plugin, start, std_misc, test)] +#![feature(plugin, start, std_misc, test)] #![plugin(string_cache_plugin)] @@ -64,12 +64,12 @@ fn parse_tests>(mut lines: It) -> Vec { if line.starts_with("#") { finish_val!(); - if line.as_slice() == "#data" { + if line == "#data" { finish_test!(); } key = Some(line[1..].to_string()); } else { - val.push_str(line.as_slice()); + val.push_str(&line); val.push('\n'); } } @@ -83,7 +83,7 @@ fn parse_tests>(mut lines: It) -> Vec().as_slice()); + buf.push_str(&repeat(" ").take(indent).collect::()); let node = handle.borrow(); match node.node { @@ -91,22 +91,22 @@ fn serialize(buf: &mut String, indent: usize, handle: Handle) { Doctype(ref name, ref public, ref system) => { buf.push_str("\n"); } Text(ref text) => { buf.push_str("\""); - buf.push_str(text.as_slice()); + buf.push_str(&text); buf.push_str("\"\n"); } Comment(ref text) => { buf.push_str("\n"); } @@ -123,9 +123,9 @@ fn serialize(buf: &mut String, indent: usize, handle: Handle) { for attr in attrs.into_iter() { assert!(attr.name.ns == ns!("")); buf.push_str("|"); - buf.push_str(repeat(" ").take(indent+2).collect::().as_slice()); - buf.push_str(format!("{}=\"{}\"\n", - attr.name.local.as_slice(), attr.value).as_slice()); + buf.push_str(&repeat(" ").take(indent+2).collect::()); + buf.push_str(&format!("{}=\"{}\"\n", + attr.name.local.as_slice(), attr.value)); } } } @@ -148,16 +148,16 @@ fn make_test( let get_field = |key| { let field = fields.get(key).expect("missing field"); - field.as_slice().trim_right_matches('\n').to_string() + field.trim_right_matches('\n').to_string() }; let data = get_field("data"); let expected = get_field("document"); let context = fields.get("document-fragment") - .map(|field| Atom::from_slice(field.as_slice().trim_right_matches('\n'))); + .map(|field| Atom::from_slice(field.trim_right_matches('\n'))); let name = format!("tb: {}-{}", filename, idx); let ignore = ignores.contains(&name) - || IGNORE_SUBSTRS.iter().any(|&ig| data.as_slice().contains(ig)); + || IGNORE_SUBSTRS.iter().any(|&ig| data.contains(ig)); tests.push(TestDescAndFn { desc: TestDesc { @@ -229,10 +229,10 @@ fn start(argc: isize, argv: *const *const u8) -> isize { let f = fs::File::open(&src_dir.join("data/test/ignore")).unwrap(); let r = io::BufReader::new(f); for ln in r.lines() { - ignores.insert(ln.unwrap().as_slice().trim_right().to_string()); + ignores.insert(ln.unwrap().trim_right().to_string()); } } - test::test_main(args.as_slice(), tests(src_dir, &ignores)); + test::test_main(&args, tests(src_dir, &ignores)); 0 }